From 85450da2f118ef18cc70abb3cf81a7f85ac51781 Mon Sep 17 00:00:00 2001
From: XixinYang <121591093+XixinYang@users.noreply.github.com>
Date: Thu, 13 Jul 2023 11:08:23 +0800
Subject: [PATCH] refactor: uniform all model names (#701)
---
README.md | 2 +-
README_CN.md | 2 +-
RELEASE.md | 2 +-
benchmark_results.md | 196 +++++++++---------
configs/bit/bit_resnet101_ascend.yaml | 2 +-
configs/bit/bit_resnet50_ascend.yaml | 2 +-
configs/bit/bit_resnet50x3_ascend.yaml | 2 +-
configs/convnext/README.md | 6 +-
configs/convnextv2/README.md | 6 +-
configs/crossvit/README.md | 4 +-
configs/crossvit/crossvit_15_ascend.yaml | 2 +-
configs/crossvit/crossvit_18_ascend.yaml | 2 +-
configs/crossvit/crossvit_9_ascend.yaml | 2 +-
configs/densenet/README.md | 12 +-
configs/dpn/README.md | 12 +-
configs/ghostnet/README.md | 6 +-
configs/googlenet/README.md | 4 +-
configs/inceptionv3/README.md | 4 +-
configs/inceptionv4/README.md | 2 +-
configs/mixnet/README.md | 6 +-
configs/mnasnet/README.md | 14 +-
configs/mnasnet/mnasnet_0.5_ascend.yaml | 2 +-
configs/mnasnet/mnasnet_0.75_ascend.yaml | 2 +-
configs/mnasnet/mnasnet_0.75_gpu.yaml | 2 +-
configs/mnasnet/mnasnet_1.0_ascend.yaml | 2 +-
configs/mnasnet/mnasnet_1.0_gpu.yaml | 2 +-
configs/mnasnet/mnasnet_1.3_ascend.yaml | 2 +-
configs/mnasnet/mnasnet_1.4_ascend.yaml | 2 +-
configs/mnasnet/mnasnet_1.4_gpu.yaml | 2 +-
configs/mobilenetv1/README.md | 12 +-
.../mobilenetv1/mobilenet_v1_0.25_ascend.yaml | 2 +-
.../mobilenetv1/mobilenet_v1_0.25_gpu.yaml | 2 +-
.../mobilenetv1/mobilenet_v1_0.5_ascend.yaml | 2 +-
configs/mobilenetv1/mobilenet_v1_0.5_gpu.yaml | 2 +-
.../mobilenetv1/mobilenet_v1_0.75_ascend.yaml | 2 +-
.../mobilenetv1/mobilenet_v1_0.75_gpu.yaml | 2 +-
.../mobilenetv1/mobilenet_v1_1.0_ascend.yaml | 2 +-
configs/mobilenetv1/mobilenet_v1_1.0_gpu.yaml | 2 +-
configs/mobilenetv2/README.md | 10 +-
.../mobilenetv2/mobilenet_v2_0.75_ascend.yaml | 2 +-
.../mobilenetv2/mobilenet_v2_1.0_ascend.yaml | 2 +-
.../mobilenetv2/mobilenet_v2_1.4_ascend.yaml | 2 +-
configs/mobilenetv3/README.md | 8 +-
configs/pit/README.md | 8 +-
configs/pvt/README.md | 8 +-
configs/pvtv2/README.md | 14 +-
configs/repmlp/README.md | 6 +-
configs/repmlp/repmlp_t224_ascend.yaml | 2 +-
configs/res2net/README.md | 12 +-
configs/resnest/README.md | 4 +-
configs/resnet/README.md | 12 +-
configs/resnetv2/README.md | 8 +-
configs/resnext/README.md | 8 +-
configs/rexnet/README.md | 14 +-
configs/rexnet/rexnet_x09_ascend.yaml | 2 +-
configs/rexnet/rexnet_x10_ascend.yaml | 2 +-
configs/rexnet/rexnet_x13_ascend.yaml | 2 +-
configs/rexnet/rexnet_x15_ascend.yaml | 2 +-
configs/rexnet/rexnet_x20_ascend.yaml | 2 +-
configs/senet/README.md | 10 +-
configs/shufflenetv1/README.md | 8 +-
.../shufflenet_v1_0.5_ascend.yaml | 2 +-
.../shufflenet_v1_1.0_ascend.yaml | 2 +-
configs/shufflenetv2/README.md | 4 +-
configs/sknet/README.md | 12 +-
configs/squeezenet/README.md | 14 +-
configs/swintransformerv2/README.md | 4 +-
configs/xception/README.md | 2 +-
configs/xcit/README.md | 4 +-
configs/xcit/xcit_tiny_12_p16_ascend.yaml | 2 +-
docs/en/tutorials/deployment.md | 22 +-
docs/en/tutorials/inference.md | 14 +-
docs/zh/tutorials/deployment.md | 22 +-
docs/zh/tutorials/inference.md | 14 +-
mindcv/models/__init__.py | 38 ++--
mindcv/models/bit.py | 24 +--
mindcv/models/crossvit.py | 12 +-
.../{inception_v3.py => inceptionv3.py} | 0
.../{inception_v4.py => inceptionv4.py} | 0
mindcv/models/mnasnet.py | 40 ++--
.../{mobilenet_v1.py => mobilenetv1.py} | 32 +--
.../{mobilenet_v2.py => mobilenetv2.py} | 100 ++++-----
.../{mobilenet_v3.py => mobilenetv3.py} | 16 +-
mindcv/models/repmlp.py | 81 ++++----
mindcv/models/rexnet.py | 40 ++--
mindcv/models/shufflenetv1.py | 64 +++---
mindcv/models/shufflenetv2.py | 16 +-
...swin_transformer.py => swintransformer.py} | 0
...transformer_v2.py => swintransformerv2.py} | 0
mindcv/models/xcit.py | 10 +-
tests/modules/test_feature_extraction.py | 2 +-
tests/modules/test_models.py | 14 +-
92 files changed, 548 insertions(+), 541 deletions(-)
rename mindcv/models/{inception_v3.py => inceptionv3.py} (100%)
rename mindcv/models/{inception_v4.py => inceptionv4.py} (100%)
rename mindcv/models/{mobilenet_v1.py => mobilenetv1.py} (85%)
rename mindcv/models/{mobilenet_v2.py => mobilenetv2.py} (88%)
rename mindcv/models/{mobilenet_v3.py => mobilenetv3.py} (96%)
rename mindcv/models/{swin_transformer.py => swintransformer.py} (100%)
rename mindcv/models/{swin_transformer_v2.py => swintransformerv2.py} (100%)
diff --git a/README.md b/README.md
index 20fcda491..728f3eddb 100644
--- a/README.md
+++ b/README.md
@@ -216,7 +216,7 @@ Currently, MindCV supports the model families listed below. More models with pre
* EfficientNet (MBConvNet Family) https://arxiv.org/abs/1905.11946
* EfficientNet V2 - https://arxiv.org/abs/2104.00298
* GhostNet - https://arxiv.org/abs/1911.11907
-* GoogleNet - https://arxiv.org/abs/1409.4842
+* GoogLeNet - https://arxiv.org/abs/1409.4842
* Inception-V3 - https://arxiv.org/abs/1512.00567
* Inception-ResNet-V2 and Inception-V4 - https://arxiv.org/abs/1602.07261
* MNASNet - https://arxiv.org/abs/1807.11626
diff --git a/README_CN.md b/README_CN.md
index dda2fcad5..7b72b5b2c 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -217,7 +217,7 @@ python train.py --model=resnet50 --dataset=cifar10 \
* EfficientNet (MBConvNet Family) https://arxiv.org/abs/1905.11946
* EfficientNet V2 - https://arxiv.org/abs/2104.00298
* GhostNet - https://arxiv.org/abs/1911.11907
-* GoogleNet - https://arxiv.org/abs/1409.4842
+* GoogLeNet - https://arxiv.org/abs/1409.4842
* Inception-V3 - https://arxiv.org/abs/1512.00567
* Inception-ResNet-V2 and Inception-V4 - https://arxiv.org/abs/1602.07261
* MNASNet - https://arxiv.org/abs/1807.11626
diff --git a/RELEASE.md b/RELEASE.md
index 6423c4b85..b14f07a30 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -123,7 +123,7 @@
`mindcv.models` now expose `num_classes` and `in_channels` as constructor arguments:
- Add DenseNet models and pre-trained weights
-- Add GoogleNet models and pre-trained weights
+- Add GoogLeNet models and pre-trained weights
- Add Inception V3 models and pre-trained weights
- Add Inception V4 models and pre-trained weights
- Add MnasNet models and pre-trained weights
diff --git a/benchmark_results.md b/benchmark_results.md
index 919774447..d8be98502 100644
--- a/benchmark_results.md
+++ b/benchmark_results.md
@@ -1,81 +1,82 @@
| Model | Context | Top-1 (%) | Top-5 (%) | Params(M) | Recipe | Download |
| -------------- | -------- | --------- | --------- | --------- | ------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------- |
-| bit_resnet50 | D910x8-G | 76.81 | 93.17 | 25.55 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/bit/bit_resnet50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/bit/BiT_resnet50-1e4795a4.ckpt) |
-| bit_resnet50x3 | D910x8-G | 80.63 | 95.12 | 217.31 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/bit/bit_resnet50x3_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/bit/BiT_resnet50x3-a960f91f.ckpt) |
-| bit_resnet101 | D910x8-G | 77.93 | 93.75 | 44.54 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/bit/bit_resnet101_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/bit/BiT_resnet101-2efa9106.ckpt) |
+| BiT_resnet50 | D910x8-G | 76.81 | 93.17 | 25.55 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/bit/bit_resnet50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/bit/BiT_resnet50-1e4795a4.ckpt) |
+| BiT_resnet50x3 | D910x8-G | 80.63 | 95.12 | 217.31 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/bit/bit_resnet50x3_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/bit/BiT_resnet50x3-a960f91f.ckpt) |
+| BiT_resnet101 | D910x8-G | 77.93 | 93.75 | 44.54 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/bit/bit_resnet101_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/bit/BiT_resnet101-2efa9106.ckpt) |
| coat_lite_tiny | D910x8-G | 77.35 | 93.43 | 5.72 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/coat/coat_lite_tiny_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/coat/coat_lite_tiny-fa7bf894.ckpt) |
| coat_lite_mini | D910x8-G | 78.51 | 93.84 | 11.01 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/coat/coat_lite_mini_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/coat/coat_lite_mini-55a52f05.ckpt) |
| coat_tiny | D910x8-G | 79.67 | 94.88 | 5.50 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/coat/coat_tiny_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/coat/coat_tiny-071cb792.ckpt) |
+| coat_mini | D910x8-G | 81.08 | 95.34 | 10.34 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/coat/coat_mini_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/coat/coat_mini-57c5bce7.ckpt) |
| convit_tiny | D910x8-G | 73.66 | 91.72 | 5.71 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convit/convit_tiny_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convit/convit_tiny-e31023f2.ckpt) |
| convit_tiny_plus | D910x8-G | 77.00 | 93.60 | 9.97 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convit/convit_tiny_plus_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convit/convit_tiny_plus-e9d7fb92.ckpt) |
| convit_small | D910x8-G | 81.63 | 95.59 | 27.78 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convit/convit_small_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convit/convit_small-ba858604.ckpt) |
| convit_small_plus | D910x8-G | 81.80 | 95.42 | 48.98 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convit/convit_small_plus_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convit/convit_small_plus-2352b9f7.ckpt) |
| convit_base | D910x8-G | 82.10 | 95.52 | 86.54 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convit/convit_base_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convit/convit_base-c61b808c.ckpt) |
| convit_base_plus | D910x8-G | 81.96 | 95.04 | 153.13 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convit/convit_base_plus_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convit/convit_base_plus-5c61c9ce.ckpt) |
-| ConvNeXt_tiny | D910x64-G | 81.91 | 95.79 | 28.59 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convnext/convnext_tiny_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convnext/convnext_tiny-ae5ff8d7.ckpt) |
-| ConvNeXt_small | D910x64-G | 83.40 | 96.36 | 50.22 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convnext/convnext_small_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convnext/convnext_small-e23008f3.ckpt) |
-| ConvNeXt_base | D910x64-G | 83.32 | 96.24 | 88.59 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convnext/convnext_base_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convnext/convnext_base-ee3544b8.ckpt) |
-| ConvNeXtV2_tiny | D910x8-G | 82.43 | 95.98 | 28.64 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convnextv2/convnextv2_tiny_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convnextv2/convnextv2_tiny-d441ba2c.ckpt) |
+| convnext_tiny | D910x64-G | 81.91 | 95.79 | 28.59 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convnext/convnext_tiny_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convnext/convnext_tiny-ae5ff8d7.ckpt) |
+| convnext_small | D910x64-G | 83.40 | 96.36 | 50.22 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convnext/convnext_small_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convnext/convnext_small-e23008f3.ckpt) |
+| convnext_base | D910x64-G | 83.32 | 96.24 | 88.59 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convnext/convnext_base_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convnext/convnext_base-ee3544b8.ckpt) |
+| convnextv2_tiny | D910x8-G | 82.43 | 95.98 | 28.64 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convnextv2/convnextv2_tiny_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convnextv2/convnextv2_tiny-d441ba2c.ckpt) |
| crossvit_9 | D910x8-G | 73.56 | 91.79 | 8.55 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/crossvit/crossvit_9_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/crossvit/crossvit_9-e74c8e18.ckpt) |
| crossvit_15 | D910x8-G | 81.08 | 95.33 | 27.27 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/crossvit/crossvit_15_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/crossvit/crossvit_15-eaa43c02.ckpt) |
| crossvit_18 | D910x8-G | 81.93 | 95.75 | 43.27 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/crossvit/crossvit_18_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/crossvit/crossvit_18-ca0a2e43.ckpt) |
-| densenet_121 | D910x8-G | 75.64 | 92.84 | 8.06 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/densenet/densenet_121_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/densenet/densenet121-120_5004_Ascend.ckpt) |
-| densenet_161 | D910x8-G | 79.09 | 94.66 | 28.90 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/densenet/densenet_161_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/densenet/densenet161-120_5004_Ascend.ckpt) |
-| densenet_169 | D910x8-G | 77.26 | 93.71 | 14.31 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/densenet/densenet_169_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/densenet/densenet169-120_5004_Ascend.ckpt) |
-| densenet_201 | D910x8-G | 78.14 | 94.08 | 20.24 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/densenet/densenet_201_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/densenet/densenet201-120_5004_Ascend.ckpt) |
-| dpn92 | D910x8-G | 79.46 | 94.49 | 37.79 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/dpn/dpn92_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/dpn/dpn92-e3e0fca.ckpt) |
-| dpn98 | D910x8-G | 79.94 | 94.57 | 61.74 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/dpn/dpn98_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/dpn/dpn98-119a8207.ckpt) |
-| dpn107 | D910x8-G | 80.05 | 94.74 | 87.13 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/dpn/dpn107_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/dpn/dpn107-7d7df07b.ckpt) |
-| dpn131 | D910x8-G | 80.07 | 94.72 | 79.48 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/dpn/dpn131_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/dpn/dpn131-47f084b3.ckpt) |
+| densenet121 | D910x8-G | 75.64 | 92.84 | 8.06 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/densenet/densenet121_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/densenet/densenet121-120_5004_Ascend.ckpt) |
+| densenet161 | D910x8-G | 79.09 | 94.66 | 28.90 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/densenet/densenet161_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/densenet/densenet161-120_5004_Ascend.ckpt) |
+| densenet169 | D910x8-G | 77.26 | 93.71 | 14.31 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/densenet/densenet169_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/densenet/densenet169-120_5004_Ascend.ckpt) |
+| densenet201 | D910x8-G | 78.14 | 94.08 | 20.24 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/densenet/densenet201_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/densenet/densenet201-120_5004_Ascend.ckpt) |
+| dpn92 | D910x8-G | 79.46 | 94.49 | 37.79 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/dpn/dpn92_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/dpn/dpn92-e3e0fca.ckpt) |
+| dpn98 | D910x8-G | 79.94 | 94.57 | 61.74 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/dpn/dpn98_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/dpn/dpn98-119a8207.ckpt) |
+| dpn107 | D910x8-G | 80.05 | 94.74 | 87.13 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/dpn/dpn107_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/dpn/dpn107-7d7df07b.ckpt) |
+| dpn131 | D910x8-G | 80.07 | 94.72 | 79.48 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/dpn/dpn131_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/dpn/dpn131-47f084b3.ckpt) |
| edgenext_xx_small | D910x8-G | 71.02 | 89.99 | 1.33 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/edgenext/edgenext_xx_small_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/edgenext/edgenext_xx_small-afc971fb.ckpt) |
| edgenext_x_small | D910x8-G | 75.14 | 92.50 | 2.34 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/edgenext/edgenext_x_small_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/edgenext/edgenext_x_small-a200c6fc.ckpt) |
| edgenext_small | D910x8-G | 79.15 | 94.39 | 5.59 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/edgenext/edgenext_small_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/edgenext/edgenext_small-f530c372.ckpt) |
| edgenext_base | D910x8-G | 82.24 | 95.94 | 18.51 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/edgenext/edgenext_base_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/edgenext/edgenext_base-4335e9dc.ckpt) |
| efficientnet_b0 | D910x64-G | 76.89 | 93.16 | 5.33 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/efficientnet/efficientnet_b0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/efficientnet/efficientnet_b0-103ec70c.ckpt) |
| efficientnet_b1 | D910x64-G | 78.95 | 94.34 | 7.86 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/efficientnet/efficientnet_b1_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/efficientnet/efficientnet_b1-f8c6b13f.ckpt) |
-| GhostNet_050 | D910x8-G | 66.03 | 86.64 | 2.60 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/ghostnet/ghostnet_050_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/ghostnet/ghostnet_050-85b91860.ckpt) |
-| GhostNet_100 | D910x8-G | 73.78 | 91.66 | 5.20 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/ghostnet/ghostnet_100_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/ghostnet/ghostnet_100-bef8025a.ckpt) |
-| GhostNet_130 | D910x8-G | 75.50 | 92.56 | 7.39 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/ghostnet/ghostnet_130_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/ghostnet/ghostnet_130-cf4c235c.ckpt) |
-| GoogLeNet | D910x8-G | 72.68 | 90.89 | 6.99 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/googlenet/googlenet_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/googlenet/googlenet-5552fcd3.ckpt) |
+| ghostnet_050 | D910x8-G | 66.03 | 86.64 | 2.60 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/ghostnet/ghostnet_050_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/ghostnet/ghostnet_050-85b91860.ckpt) |
+| ghostnet_100 | D910x8-G | 73.78 | 91.66 | 5.20 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/ghostnet/ghostnet_100_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/ghostnet/ghostnet_100-bef8025a.ckpt) |
+| ghostnet_130 | D910x8-G | 75.50 | 92.56 | 7.39 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/ghostnet/ghostnet_130_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/ghostnet/ghostnet_130-cf4c235c.ckpt) |
+| googlenet | D910x8-G | 72.68 | 90.89 | 6.99 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/googlenet/googlenet_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/googlenet/googlenet-5552fcd3.ckpt) |
| hrnet_w32 | D910x8-G | 80.64 | 95.44 | 41.30 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/hrnet/hrnet_w32_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/hrnet/hrnet_w32-cc4fbd91.ckpt) |
| hrnet_w48 | D910x8-G | 81.19 | 95.69 | 77.57 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/hrnet/hrnet_w48_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/hrnet/hrnet_w48-2e3399cd.ckpt) |
-| Inception_v3 | D910x8-G | 79.11 | 94.40 | 27.20 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/inceptionv3/inception_v3_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/inception_v3/inception_v3-38f67890.ckpt) |
-| Inception_v4 | D910x8-G | 80.88 | 95.34 | 42.74 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/inceptionv4/inception_v4_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/inception_v4/inception_v4-db9c45b3.ckpt) |
-| MixNet_s | D910x8-G | 75.52 | 92.52 | 4.17 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mixnet/mixnet_s_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mixnet/mixnet_s-2a5ef3a3.ckpt) |
-| MixNet_m | D910x8-G | 76.64 | 93.05 | 5.06 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mixnet/mixnet_m_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mixnet/mixnet_m-74cc4cb1.ckpt) |
-| MixNet_l | D910x8-G | 78.73 | 94.31 | 7.38 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mixnet/mixnet_l_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mixnet/mixnet_l-978edf2b.ckpt) |
-| MnasNet_0.5 | D910x8-G | 68.07 | 88.09 | 2.14 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_0.5_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_050-7d8bf4db.ckpt) |
-| MnasNet_0.75 | D910x8-G | 71.81 | 90.53 | 3.20 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_0.75_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_075-465d366d.ckpt) |
-| MnasNet_1_0 | D910x8-G | 74.28 | 91.70 | 4.42 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_1.0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_100-1bcf43f8.ckpt) |
-| MnasNet_1_3 | D910x8-G | 75.65 | 92.64 | 6.33 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_1.3_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_130-a43a150a.ckpt) |
-| MnasNet_1.4 | D910x8-G | 76.01 | 92.83 | 7.16 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_1.4_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_140-7e20bb30.ckpt) |
-| MobileNet_v1_025 | D910x8-G | 53.87 | 77.66 | 0.47 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv1/mobilenet_v1_0.25_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_025-d3377fba.ckpt) |
-| MobileNet_v1_050 | D910x8-G | 65.94 | 86.51 | 1.34 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv1/mobilenet_v1_0.5_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_050-23e9ddbe.ckpt) |
-| MobileNet_v1_075 | D910x8-G | 70.44 | 89.49 | 2.60 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv1/mobilenet_v1_0.75_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_075-5bed0c73.ckpt) |
-| MobileNet_v1_100 | D910x8-G | 72.95 | 91.01 | 4.25 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv1/mobilenet_v1_1.0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_100-91c7b206.ckpt) |
-| MobileNet_v2_075 | D910x8-G | 69.98 | 89.32 | 2.66 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv2/mobilenet_v2_0.75_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv2/mobilenet_v2_075-bd7bd4c4.ckpt) |
-| MobileNet_v2_100 | D910x8-G | 72.27 | 90.72 | 3.54 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv2/mobilenet_v2_1.0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv2/mobilenet_v2_100-d5532038.ckpt) |
-| MobileNet_v2_140 | D910x8-G | 75.56 | 92.56 | 6.15 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv2/mobilenet_v2_1.4_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv2/mobilenet_v2_140-98776171.ckpt) |
-| MobileNetV3_small_100 | D910x8-G | 68.10 | 87.86 | 2.55 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv3/mobilenet_v3_small_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv3/mobilenet_v3_small_100-509c6047.ckpt) |
-| MobileNetV3_large_100 | D910x8-G | 75.23 | 92.31 | 5.51 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv3/mobilenet_v3_large_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv3/mobilenet_v3_large_100-1279ad5f.ckpt) |
+| inception_v3 | D910x8-G | 79.11 | 94.40 | 27.20 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/inceptionv3/inception_v3_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/inception_v3/inception_v3-38f67890.ckpt) |
+| inception_v4 | D910x8-G | 80.88 | 95.34 | 42.74 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/inceptionv4/inception_v4_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/inception_v4/inception_v4-db9c45b3.ckpt) |
+| mixnet_s | D910x8-G | 75.52 | 92.52 | 4.17 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mixnet/mixnet_s_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mixnet/mixnet_s-2a5ef3a3.ckpt) |
+| mixnet_m | D910x8-G | 76.64 | 93.05 | 5.06 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mixnet/mixnet_m_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mixnet/mixnet_m-74cc4cb1.ckpt) |
+| mixnet_l | D910x8-G | 78.73 | 94.31 | 7.38 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mixnet/mixnet_l_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mixnet/mixnet_l-978edf2b.ckpt) |
+| mnasnet_050 | D910x8-G | 68.07 | 88.09 | 2.14 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_050_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_050-7d8bf4db.ckpt) |
+| mnasnet_075 | D910x8-G | 71.81 | 90.53 | 3.20 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_075_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_075-465d366d.ckpt) |
+| mnasnet_100 | D910x8-G | 74.28 | 91.70 | 4.42 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_100_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_100-1bcf43f8.ckpt) |
+| mnasnet_130 | D910x8-G | 75.65 | 92.64 | 6.33 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_130_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_130-a43a150a.ckpt) |
+| mnasnet_140 | D910x8-G | 76.01 | 92.83 | 7.16 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_140_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_140-7e20bb30.ckpt) |
+| mobilenet_v1_025 | D910x8-G | 53.87 | 77.66 | 0.47 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv1/mobilenet_v1_025_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_025-d3377fba.ckpt) |
+| mobilenet_v1_050 | D910x8-G | 65.94 | 86.51 | 1.34 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv1/mobilenet_v1_050_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_050-23e9ddbe.ckpt) |
+| mobilenet_v1_075 | D910x8-G | 70.44 | 89.49 | 2.60 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv1/mobilenet_v1_075_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_075-5bed0c73.ckpt) |
+| mobilenet_v1_100 | D910x8-G | 72.95 | 91.01 | 4.25 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv1/mobilenet_v1_100_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_100-91c7b206.ckpt) |
+| mobilenet_v2_075 | D910x8-G | 69.98 | 89.32 | 2.66 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv2/mobilenet_v2_075_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv2/mobilenet_v2_075-bd7bd4c4.ckpt) |
+| mobilenet_v2_100 | D910x8-G | 72.27 | 90.72 | 3.54 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv2/mobilenet_v2_100_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv2/mobilenet_v2_100-d5532038.ckpt) |
+| mobilenet_v2_140 | D910x8-G | 75.56 | 92.56 | 6.15 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv2/mobilenet_v2_140_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv2/mobilenet_v2_140-98776171.ckpt) |
+| mobilenet_v3_small_100 | D910x8-G | 68.10 | 87.86 | 2.55 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv3/mobilenet_v3_small_100_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv3/mobilenet_v3_small_100-509c6047.ckpt) |
+| mobilenet_v3_large_100 | D910x8-G | 75.23 | 92.31 | 5.51 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv3/mobilenet_v3_large_100_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv3/mobilenet_v3_large_100-1279ad5f.ckpt) |
| mobilevit_xx_small | D910x8-G | 68.91 | 88.91 | 1.27 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilevit/mobilevit_xx_small_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilevit/mobilevit_xx_small-af9da8a0.ckpt) |
| mobilevit_x_small | D910x8-G | 74.99 | 92.32 | 2.32 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilevit/mobilevit_x_small_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilevit/mobilevit_x_small-673fc6f2.ckpt) |
| mobilevit_small | D910x8-G | 78.47 | 94.18 | 5.59 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilevit/mobilevit_small_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilevit/mobilevit_small-caf79638.ckpt) |
| nasnet_a_4x1056 | D910x8-G | 73.65 | 91.25 | 5.33 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/nasnet/nasnet_a_4x1056_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/nasnet/nasnet_a_4x1056-0fbb5cdd.ckpt) |
-| PiT_ti | D910x8-G | 72.96 | 91.33 | 4.85 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pit/pit_ti_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pit/pit_ti-e647a593.ckpt) |
-| PiT_xs | D910x8-G | 78.41 | 94.06 | 10.61 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pit/pit_xs_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pit/pit_xs-fea0d37e.ckpt) |
-| PiT_s | D910x8-G | 80.56 | 94.80 | 23.46 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pit/pit_s_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pit/pit_s-3c1ba36f.ckpt) |
-| PiT_b | D910x8-G | 81.87 | 95.04 | 73.76 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pit/pit_b_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pit/pit_b-2411c9b6.ckpt) |
+| pit_ti | D910x8-G | 72.96 | 91.33 | 4.85 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pit/pit_ti_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pit/pit_ti-e647a593.ckpt) |
+| pit_xs | D910x8-G | 78.41 | 94.06 | 10.61 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pit/pit_xs_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pit/pit_xs-fea0d37e.ckpt) |
+| pit_s | D910x8-G | 80.56 | 94.80 | 23.46 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pit/pit_s_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pit/pit_s-3c1ba36f.ckpt) |
+| pit_b | D910x8-G | 81.87 | 95.04 | 73.76 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pit/pit_b_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pit/pit_b-2411c9b6.ckpt) |
| poolformer_s12 | D910x8-G | 77.33 | 93.34 | 11.92 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/poolformer/poolformer_s12_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/poolformer/poolformer_s12-5be5c4e4.ckpt) |
-| PVT_tiny | D910x8-G | 74.81 | 92.18 | 13.23 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvt/pvt_tiny_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt/pvt_tiny-6abb953d.ckpt) |
-| PVT_small | D910x8-G | 79.66 | 94.71 | 24.49 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvt/pvt_small_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt/pvt_small-213c2ed1.ckpt) |
-| PVT_medium | D910x8-G | 81.82 | 95.81 | 44.21 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvt/pvt_medium_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt/pvt_medium-469e6802.ckpt) |
-| PVT_large | D910x8-G | 81.75 | 95.70 | 61.36 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvt/pvt_large_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt/pvt_large-bb6895d7.ckpt) |
-| PVTV2_b0 | D910x8-G | 71.50 | 90.60 | 3.67 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b0-1c4f6683.ckpt) |
-| PVTV2_b1 | D910x8-G | 78.91 | 94.49 | 14.01 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b1_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b1-3ceb171a.ckpt) |
-| PVTV2_b2 | D910x8-G | 81.99 | 95.74 | 25.35 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b2_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b2-0565d18e.ckpt) |
-| PVTV2_b3 | D910x8-G | 82.84 | 96.24 | 45.24 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b3_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b3-feaae3fc.ckpt) |
-| PVTV2_b4 | D910x8-G | 83.14 | 96.27 | 62.56 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b4_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b4-1cf4bc03.ckpt) |
+| pvt_tiny | D910x8-G | 74.81 | 92.18 | 13.23 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvt/pvt_tiny_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt/pvt_tiny-6abb953d.ckpt) |
+| pvt_small | D910x8-G | 79.66 | 94.71 | 24.49 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvt/pvt_small_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt/pvt_small-213c2ed1.ckpt) |
+| pvt_medium | D910x8-G | 81.82 | 95.81 | 44.21 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvt/pvt_medium_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt/pvt_medium-469e6802.ckpt) |
+| pvt_large | D910x8-G | 81.75 | 95.70 | 61.36 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvt/pvt_large_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt/pvt_large-bb6895d7.ckpt) |
+| pvt_v2_b0 | D910x8-G | 71.50 | 90.60 | 3.67 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b0-1c4f6683.ckpt) |
+| pvt_v2_b1 | D910x8-G | 78.91 | 94.49 | 14.01 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b1_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b1-3ceb171a.ckpt) |
+| pvt_v2_b2 | D910x8-G | 81.99 | 95.74 | 25.35 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b2_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b2-0565d18e.ckpt) |
+| pvt_v2_b3 | D910x8-G | 82.84 | 96.24 | 45.24 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b3_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b3-feaae3fc.ckpt) |
+| pvt_v2_b4 | D910x8-G | 83.14 | 96.27 | 62.56 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b4_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b4-1cf4bc03.ckpt) |
| regnet_x_200mf | D910x8-G | 68.74 | 88.38 | 2.68 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/regnet/regnet_x_200mf_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/regnet/regnet_x_200mf-0c2b1eb5.ckpt) |
| regnet_x_400mf | D910x8-G | 73.16 | 91.35 | 5.16 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/regnet/regnet_x_400mf_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/regnet/regnet_x_400mf-4848837d.ckpt) |
| regnet_x_600mf | D910x8-G | 74.34 | 92.00 | 6.20 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/regnet/regnet_x_600mf_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/regnet/regnet_x_600mf-ccd76c94.ckpt) |
@@ -84,6 +85,7 @@
| regnet_y_400mf | D910x8-G | 73.91 | 91.84 | 4.34 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/regnet/regnet_y_400mf_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/regnet/regnet_y_400mf-d496799d.ckpt) |
| regnet_y_600mf | D910x8-G | 75.69 | 92.50 | 6.06 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/regnet/regnet_y_600mf_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/regnet/regnet_y_600mf-a84e19b2.ckpt) |
| regnet_y_800mf | D910x8-G | 76.52 | 93.10 | 6.26 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/regnet/regnet_y_800mf_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/regnet/regnet_y_800mf-9b5211bd.ckpt) |
+| regnet_y_16gf | D910x8-G | 82.92 | 96.29 | 83.71 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/regnet/regnet_y_16gf_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/regnet/regnet_y_16gf-c30a856f.ckpt) |
| repmlp_t224 | D910x8-G | 76.71 | 93.30 | 38.30 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/repmlp/repmlp_t224_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/repmlp/repmlp_t224-8dbedd00.ckpt) |
| repvgg_a0 | D910x8-G | 72.19 | 90.75 | 9.13 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/repvgg/repvgg_a0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/repvgg/repvgg_a0-6e71139d.ckpt) |
| repvgg_a1 | D910x8-G | 74.19 | 91.89 | 14.12 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/repvgg/repvgg_a1_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/repvgg/repvgg_a1-539513ac.ckpt) |
@@ -95,47 +97,48 @@
| repvgg_b1g2 | D910x8-G | 78.03 | 94.09 | 45.85 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/repvgg/repvgg_b1g2_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/repvgg/repvgg_b1g2-f0dc714f.ckpt) |
| repvgg_b1g4 | D910x8-G | 77.64 | 94.03 | 40.03 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/repvgg/repvgg_b1g4_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/repvgg/repvgg_b1g4-bd93230e.ckpt) |
| repvgg_b2g4 | D910x8-G | 78.8 | 94.36 | 61.84 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/repvgg/repvgg_b2g4_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/repvgg/repvgg_b2g4-e79eeadd.ckpt) |
-| Res2Net50 | D910x8-G | 79.35 | 94.64 | 25.76 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/res2net/res2net_50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/res2net/res2net50-f42cf71b.ckpt) |
-| Res2Net101 | D910x8-G | 79.56 | 94.70 | 45.33 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/res2net/res2net_101_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/res2net/res2net101-8ae60132.ckpt) |
-| Res2Net50-v1b | D910x8-G | 80.32 | 95.09 | 25.77 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/res2net/res2net_50_v1b_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/res2net/res2net50_v1b-99304e92.ckpt) |
-| Res2Net101-v1b | D910x8-G | 81.14 | 95.41 | 45.35 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/res2net/res2net_101_v1b_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/res2net/res2net101_v1b-7e6db001.ckpt) |
-| ResNeSt50 | D910x8-G | 80.81 | 95.16 | 27.55 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnest/resnest50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnest/resnest50-f2e7fc9c.ckpt) |
-| ResNeSt101 | D910x8-G | 82.90 | 96.12 | 48.41 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnest/resnest101_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnest/resnest101-7cc5c258.ckpt) |
-| ResNet18 | D910x8-G | 70.21 | 89.62 | 11.70 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet_18_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet18-1e65cd21.ckpt) |
-| ResNet34 | D910x8-G | 74.15 | 91.98 | 21.81 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet_34_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet34-f297d27e.ckpt) |
-| ResNet50 | D910x8-G | 76.69 | 93.50 | 25.61 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet_50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet50-e0733ab8.ckpt) |
-| ResNet101 | D910x8-G | 78.24 | 94.09 |44.65 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet_101_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet101-689c5e77.ckpt) |
-| ResNet152 | D910x8-G | 78.72 | 94.45 | 60.34| [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet_152_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet152-beb689d8.ckpt) |
-| ResNetv2_50 | D910x8-G | 76.90 | 93.37 | 25.60 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnetv2/resnetv2_50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnetv2/resnetv2_50-3c2f143b.ckpt) |
-| ResNetv2_101 | D910x8-G | 78.48 | 94.23 | 44.55 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnetv2/resnetv2_101_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnetv2/resnetv2_101-5d4c49a1.ckpt) |
-| ResNeXt50_32x4d | D910x8-G | 78.53 | 94.10 | 25.10 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnext/resnext50_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnext/resnext50_32x4d-af8aba16.ckpt) |
-| ResNeXt101_32x4d | D910x8-G | 79.83 | 94.80 | 44.32 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnext/resnext101_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnext/resnext101_32x4d-3c1e9c51.ckpt) |
-| ResNeXt101_64x4d | D910x8-G | 80.30 | 94.82 | 83.66 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnext/resnext101_64x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnext/resnext101_64x4d-8929255b.ckpt) |
-| ResNeXt152_64x4d | D910x8-G | 80.52 | 95.00 | 115.27 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnext/resnext152_64x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnext/resnext152_64x4d-3aba275c.ckpt) |
-| rexnet_x09 | D910x8-G | 77.06 | 93.41 | 4.13 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_x09_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_09-da498331.ckpt) |
-| rexnet_x10 | D910x8-G | 77.38 | 93.60 | 4.84 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_x10_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_10-c5fb2dc7.ckpt) |
-| rexnet_x13 | D910x8-G | 79.06 | 94.28 | 7.61 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_x13_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_13-a49c41e5.ckpt) |
-| rexnet_x15 | D910x8-G | 79.95 | 94.74 | 9.79 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_x15_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_15-37a931d3.ckpt) |
-| rexnet_x20 | D910x8-G | 80.64 | 94.99 | 16.45 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_x20_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_20-c5810914.ckpt) |
-| SEResNet18 | D910x8-G | 71.81 | 90.49 | 11.80 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnet18_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnet18-7880643b.ckpt) |
-| SEResNet34 | D910x8-G | 75.38 | 92.50 | 21.98 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnet34_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnet34-8179d3c9.ckpt) |
-| SEResNet50 | D910x8-G | 78.32 | 94.07 | 28.14 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnet50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnet50-ff9cd214.ckpt) |
-| SEResNeXt26_32x4d | D910x8-G | 77.17 | 93.42 | 16.83 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnext26_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnext26_32x4d-5361f5b6.ckpt) |
-| SEResNeXt50_32x4d | D910x8-G | 78.71 | 94.36 | 27.63 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnext50_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnext50_32x4d-fdc35aca.ckpt) |
-| shufflenet_v1_g3_x0_5 | D910x8-G | 57.05 | 79.73 | 0.73 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/shufflenetv1/shufflenet_v1_0.5_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv1/shufflenet_v1_g3_05-42cfe109.ckpt) |
-| shufflenet_v1_g3_x1_0 | D910x8-G | 67.77 | 87.73 | 1.89 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/shufflenetv1/shufflenet_v1_1.0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv1/shufflenet_v1_g3_10-245f0ccf.ckpt) |
-| shufflenet_v2_x0_5 | D910x8-G | 60.68 | 82.44 | 1.37 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/shufflenetv2/shufflenet_v2_0.5_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv2/shufflenet_v2_05-a53c62b9.ckpt) |
-| shufflenet_v2_x1_0 | D910x8-G | 69.51 | 88.67 | 2.29 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/shufflenetv2/shufflenet_v2_1.0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv2/shufflenet_v2_10-e6b8c4fe.ckpt) |
-| shufflenet_v2_x1_5 | D910x8-G | 72.59 | 90.79 | 3.53 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/shufflenetv2/shufflenet_v2_1.5_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv2/shufflenet_v2_15-e717dd88.ckpt) |
-| shufflenet_v2_x2_0 | D910x8-G | 75.14 | 92.13 | 7.44 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/shufflenetv2/shufflenet_v2_2.0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv2/shufflenet_v2_20-ada6a359.ckpt) |
-| skresnet18 | D910x8-G | 73.09 | 91.20 | 11.97 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/sknet/skresnet18_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/sknet/skresnet18-868228e5.ckpt) |
-| skresnet34 | D910x8-G | 76.71 | 93.10 | 22.31 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/sknet/skresnet34_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/sknet/skresnet34-d668b629.ckpt) |
-| skresnet50_32x4d | D910x8-G | 79.08 | 94.60 | 37.31 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/sknet/skresnext50_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/sknet/skresnext50_32x4d-395413a2.ckpt) |
-| squeezenet_1.0 | D910x8-G | 59.01 | 81.01 | 1.25 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/squeezenet/squeezenet_1.0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/squeezenet/squeezenet1_0-e2d78c4a.ckpt) |
-| squeezenet_1.0 | GPUx8-G | 58.83 | 81.08 | 1.25 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/squeezenet/squeezenet_1.0_gpu.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/squeezenet/squeezenet1_0_gpu-685f5941.ckpt) |
-| squeezenet_1.1 | D910x8-G | 58.44 | 80.84 | 1.24 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/squeezenet/squeezenet_1.1_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/squeezenet/squeezenet1_1-da256d3a.ckpt) |
-| squeezenet_1.1 | GPUx8-G | 59.18 | 81.41 | 1.24 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/squeezenet/squeezenet_1.1_gpu.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/squeezenet/squeezenet1_1_gpu-0e33234a.ckpt) |
+| res2net50 | D910x8-G | 79.35 | 94.64 | 25.76 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/res2net/res2net50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/res2net/res2net50-f42cf71b.ckpt) |
+| res2net101 | D910x8-G | 79.56 | 94.70 | 45.33 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/res2net/res2net101_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/res2net/res2net101-8ae60132.ckpt) |
+| res2net50_v1b | D910x8-G | 80.32 | 95.09 | 25.77 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/res2net/res2net50_v1b_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/res2net/res2net50_v1b-99304e92.ckpt) |
+| res2net101_v1b | D910x8-G | 81.14 | 95.41 | 45.35 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/res2net/res2net101_v1b_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/res2net/res2net101_v1b-7e6db001.ckpt) |
+| resnest50 | D910x8-G | 80.81 | 95.16 | 27.55 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnest/resnest50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnest/resnest50-f2e7fc9c.ckpt) |
+| resnest101 | D910x8-G | 82.90 | 96.12 | 48.41 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnest/resnest101_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnest/resnest101-7cc5c258.ckpt) |
+| resnet18 | D910x8-G | 70.21 | 89.62 | 11.70 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet18_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet18-1e65cd21.ckpt) |
+| resnet34 | D910x8-G | 74.15 | 91.98 | 21.81 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet34_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet34-f297d27e.ckpt) |
+| resnet50 | D910x8-G | 76.69 | 93.50 | 25.61 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet50-e0733ab8.ckpt) |
+| resnet101 | D910x8-G | 78.24 | 94.09 |44.65 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet101_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet101-689c5e77.ckpt) |
+| resnet152 | D910x8-G | 78.72 | 94.45 | 60.34| [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet152_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet152-beb689d8.ckpt) |
+| resnetv2_50 | D910x8-G | 76.90 | 93.37 | 25.60 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnetv2/resnetv2_50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnetv2/resnetv2_50-3c2f143b.ckpt) |
+| resnetv2_101 | D910x8-G | 78.48 | 94.23 | 44.55 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnetv2/resnetv2_101_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnetv2/resnetv2_101-5d4c49a1.ckpt) |
+| resnext50_32x4d | D910x8-G | 78.53 | 94.10 | 25.10 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnext/resnext50_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnext/resnext50_32x4d-af8aba16.ckpt) |
+| resnext101_32x4d | D910x8-G | 79.83 | 94.80 | 44.32 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnext/resnext101_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnext/resnext101_32x4d-3c1e9c51.ckpt) |
+| resnext101_64x4d | D910x8-G | 80.30 | 94.82 | 83.66 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnext/resnext101_64x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnext/resnext101_64x4d-8929255b.ckpt) |
+| resnext152_64x4d | D910x8-G | 80.52 | 95.00 | 115.27 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnext/resnext152_64x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnext/resnext152_64x4d-3aba275c.ckpt) |
+| rexnet_09 | D910x8-G | 77.06 | 93.41 | 4.13 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_09_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_09-da498331.ckpt) |
+| rexnet_10 | D910x8-G | 77.38 | 93.60 | 4.84 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_10_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_10-c5fb2dc7.ckpt) |
+| rexnet_13 | D910x8-G | 79.06 | 94.28 | 7.61 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_13_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_13-a49c41e5.ckpt) |
+| rexnet_15 | D910x8-G | 79.95 | 94.74 | 9.79 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_15_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_15-37a931d3.ckpt) |
+| rexnet_20 | D910x8-G | 80.64 | 94.99 | 16.45 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_20_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_20-c5810914.ckpt) |
+| seresnet18 | D910x8-G | 71.81 | 90.49 | 11.80 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnet18_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnet18-7880643b.ckpt) |
+| seresnet34 | D910x8-G | 75.38 | 92.50 | 21.98 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnet34_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnet34-8179d3c9.ckpt) |
+| seresnet50 | D910x8-G | 78.32 | 94.07 | 28.14 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnet50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnet50-ff9cd214.ckpt) |
+| seresnext26_32x4d | D910x8-G | 77.17 | 93.42 | 16.83 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnext26_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnext26_32x4d-5361f5b6.ckpt) |
+| seresnext50_32x4d | D910x8-G | 78.71 | 94.36 | 27.63 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnext50_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnext50_32x4d-fdc35aca.ckpt) |
+| shufflenet_v1_g3_05 | D910x8-G | 57.05 | 79.73 | 0.73 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/shufflenetv1/shufflenet_v1_g3_05_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv1/shufflenet_v1_g3_05-42cfe109.ckpt) |
+| shufflenet_v1_g3_10 | D910x8-G | 67.77 | 87.73 | 1.89 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/shufflenetv1/shufflenet_v1_g3_10_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv1/shufflenet_v1_g3_10-245f0ccf.ckpt) |
+| shufflenet_v2_x0_5 | D910x8-G | 60.53 | 82.11 | 1.37 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/shufflenetv2/shufflenet_v2_x0_5_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv2/shufflenet_v2_x0_5-8c841061.ckpt) |
+| shufflenet_v2_x1_0 | D910x8-G | 69.47 | 88.88 | 2.29 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/shufflenetv2/shufflenet_v2_x1_0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv2/shufflenet_v2_x1_0-0da4b7fa.ckpt) |
+| shufflenet_v2_x1_5 | D910x8-G | 72.79 | 90.93 | 3.53 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/shufflenetv2/shufflenet_v2_x1_5_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv2/shufflenet_v2_x1_5-00b56131.ckpt) |
+| shufflenet_v2_x2_0 | D910x8-G | 75.07 | 92.08 | 7.44 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/shufflenetv2/shufflenet_v2_x2_0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv2/shufflenet_v2_x2_0-ed8e698d.ckpt) |
+| skresnet18 | D910x8-G | 73.09 | 91.20 | 11.97 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/sknet/skresnet18_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/sknet/skresnet18-868228e5.ckpt) |
+| skresnet34 | D910x8-G | 76.71 | 93.10 | 22.31 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/sknet/skresnet34_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/sknet/skresnet34-d668b629.ckpt) |
+| skresnext50_32x4d | D910x8-G | 79.08 | 94.60 | 37.31 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/sknet/skresnext50_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/sknet/skresnext50_32x4d-395413a2.ckpt) |
+| squeezenet1_0 | D910x8-G | 59.01 | 81.01 | 1.25 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/squeezenet/squeezenet1_0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/squeezenet/squeezenet1_0-e2d78c4a.ckpt) |
+| squeezenet1_0 | GPUx8-G | 58.83 | 81.08 | 1.25 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/squeezenet/squeezenet1_0_gpu.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/squeezenet/squeezenet1_0_gpu-685f5941.ckpt) |
+| squeezenet1_1 | D910x8-G | 58.44 | 80.84 | 1.24 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/squeezenet/squeezenet1_1_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/squeezenet/squeezenet1_1-da256d3a.ckpt) |
+| squeezenet1_1 | GPUx8-G | 59.18 | 81.41 | 1.24 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/squeezenet/squeezenet1_1_gpu.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/squeezenet/squeezenet1_1_gpu-0e33234a.ckpt) |
| swin_tiny | D910x8-G | 80.82 | 94.80 | 33.38 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/swintransformer/swin_tiny_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/swin/swin_tiny-0ff2f96d.ckpt) |
+| swinv2_tiny_window8 | D910x8-G | 81.42 | 95.43 | 28.78 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/swintransformerv2/swinv2_tiny_window8_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/swinv2/swinv2_tiny_window8-3ef8b787.ckpt) |
| vgg11 | D910x8-G | 71.86 | 90.50 | 132.86 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/vgg/vgg11_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/vgg/vgg11-ef31d161.ckpt) |
| vgg13 | D910x8-G | 72.87 | 91.02 | 133.04 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/vgg/vgg13_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/vgg/vgg13-da805e6e.ckpt) |
| vgg16 | D910x8-G | 74.61 | 91.87 | 138.35 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/vgg/vgg16_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/vgg/vgg16-95697531.ckpt) |
@@ -144,11 +147,12 @@
| visformer_tiny_v2 | D910x8-G | 78.82 | 94.41 | 9.38 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/visformer/visformer_tiny_v2_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/visformer/visformer_tiny_v2-6711a758.ckpt) |
| visformer_small | D910x8-G | 81.76 | 95.88 | 40.25 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/visformer/visformer_small_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/visformer/visformer_small-6c83b6db.ckpt) |
| visformer_small_v2 | D910x8-G | 82.17 | 95.90 | 23.52 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/visformer/visformer_small_v2_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/visformer/visformer_small_v2-63674ade.ckpt) |
-| vit_b_32_224 | D910x8-G | 75.86 | 92.08 | 87.46 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/vit/vit_b32_224_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/vit/vit_b_32_224-7553218f.ckpt) |
-| vit_l_16_224 | D910x8-G | 76.34 | 92.79 | 303.31 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/vit/vit_l16_224_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/vit/vit_l_16_224-f02b2487.ckpt) |
-| vit_l_32_224 | D910x8-G | 73.71 | 90.92 | 305.52 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/vit/vit_b32_224_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/vit/vit_l_32_224-3a961018.ckpt) |
-| Xception | D910x8-G | 79.01 | 94.25 | 22.91 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/xception/xception_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/xception/xception-2c1e711df.ckpt) |
-| xcit_tiny_12_p16 | D910x8-G | 77.67 | 93.79 | 7.00 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/xcit/xcit_tiny_12_p16_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/xcit/xcit_tiny_12_p16_224-1b1c9301.ckpt) |
+| vit_b_32_224 | D910x8-G | 75.86 | 92.08 | 87.46 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/vit/vit_b_32_224_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/vit/vit_b_32_224-7553218f.ckpt) |
+| vit_l_16_224 | D910x8-G | 76.34 | 92.79 | 303.31 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/vit/vit_l_16_224_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/vit/vit_l_16_224-f02b2487.ckpt) |
+| vit_l_32_224 | D910x8-G | 73.71 | 90.92 | 305.52 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/vit/vit_b_32_224_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/vit/vit_l_32_224-3a961018.ckpt) |
+| volo_d1 | D910x8-G | 82.59 | 95.99 | 27 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/volo/volo_d1_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/volo/volo_d1-c7efada9.ckpt) |
+| xception | D910x8-G | 79.01 | 94.25 | 22.91 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/xception/xception_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/xception/xception-2c1e711df.ckpt) |
+| xcit_tiny_12_p16_224 | D910x8-G | 77.67 | 93.79 | 7.00 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/xcit/xcit_tiny_12_p16_224_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/xcit/xcit_tiny_12_p16_224-1b1c9301.ckpt) |
#### Notes
- Context: Training context denoted as {device}x{pieces}-{MS mode}, where mindspore mode can be G - graph mode or F - pynative mode with ms function. For example, D910x8-G is for training on 8 pieces of Ascend 910 NPU using graph mode.
diff --git a/configs/bit/bit_resnet101_ascend.yaml b/configs/bit/bit_resnet101_ascend.yaml
index a05551144..314113366 100644
--- a/configs/bit/bit_resnet101_ascend.yaml
+++ b/configs/bit/bit_resnet101_ascend.yaml
@@ -18,7 +18,7 @@ hflip: 0.5
crop_pct: 0.875
# model
-model: 'BiTresnet101'
+model: 'BiT_resnet101'
num_classes: 1000
pretrained: False
ckpt_path: ''
diff --git a/configs/bit/bit_resnet50_ascend.yaml b/configs/bit/bit_resnet50_ascend.yaml
index 2fb689978..148c08e55 100644
--- a/configs/bit/bit_resnet50_ascend.yaml
+++ b/configs/bit/bit_resnet50_ascend.yaml
@@ -18,7 +18,7 @@ hflip: 0.5
crop_pct: 0.875
# model
-model: 'BiTresnet50'
+model: 'BiT_resnet50'
num_classes: 1000
pretrained: False
ckpt_path: ''
diff --git a/configs/bit/bit_resnet50x3_ascend.yaml b/configs/bit/bit_resnet50x3_ascend.yaml
index 47bb0aeba..baf959281 100644
--- a/configs/bit/bit_resnet50x3_ascend.yaml
+++ b/configs/bit/bit_resnet50x3_ascend.yaml
@@ -20,7 +20,7 @@ crop_pct: 0.875
auto_augment: "randaug-m7-mstd0.5"
# model
-model: 'BiTresnet50x3'
+model: 'BiT_resnet50x3'
num_classes: 1000
pretrained: False
ckpt_path: ''
diff --git a/configs/convnext/README.md b/configs/convnext/README.md
index a9827e56d..f37fb5971 100644
--- a/configs/convnext/README.md
+++ b/configs/convnext/README.md
@@ -25,9 +25,9 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
|----------------|-----------|-----------|-----------|------------|-------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------|
-| ConvNeXt_tiny | D910x64-G | 81.91 | 95.79 | 28.59 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convnext/convnext_tiny_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convnext/convnext_tiny-ae5ff8d7.ckpt) |
-| ConvNeXt_small | D910x64-G | 83.40 | 96.36 | 50.22 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convnext/convnext_small_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convnext/convnext_small-e23008f3.ckpt) |
-| ConvNeXt_base | D910x64-G | 83.32 | 96.24 | 88.59 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convnext/convnext_base_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convnext/convnext_base-ee3544b8.ckpt) |
+| convnext_tiny | D910x64-G | 81.91 | 95.79 | 28.59 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convnext/convnext_tiny_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convnext/convnext_tiny-ae5ff8d7.ckpt) |
+| convnext_small | D910x64-G | 83.40 | 96.36 | 50.22 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convnext/convnext_small_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convnext/convnext_small-e23008f3.ckpt) |
+| convnext_base | D910x64-G | 83.32 | 96.24 | 88.59 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convnext/convnext_base_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convnext/convnext_base-ee3544b8.ckpt) |
diff --git a/configs/convnextv2/README.md b/configs/convnextv2/README.md
index 443f6bb53..03e1e4a6a 100644
--- a/configs/convnextv2/README.md
+++ b/configs/convnextv2/README.md
@@ -22,9 +22,9 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
-| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
-|-----------------|----------|-----------|-----------|------------|----------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------|
-| ConvNeXtV2_tiny | D910x8-G | 82.43 | 95.98 | 28.64 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convnextv2/convnextv2_tiny_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convnextv2/convnextv2_tiny-d441ba2c.ckpt) |
+| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
+|------------------|----------|-----------|-----------|------------|----------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------|
+| convnextv2_tiny | D910x8-G | 82.43 | 95.98 | 28.64 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/convnextv2/convnextv2_tiny_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/convnextv2/convnextv2_tiny-d441ba2c.ckpt) |
diff --git a/configs/crossvit/README.md b/configs/crossvit/README.md
index b602b1335..a3d843f5f 100644
--- a/configs/crossvit/README.md
+++ b/configs/crossvit/README.md
@@ -1,4 +1,4 @@
-# Crossvit
+# CrossViT
> [CrossViT: Cross-Attention Multi-Scale Vision Transformer for Image Classification](https://arxiv.org/abs/2103.14899)
## Introduction
@@ -77,7 +77,7 @@ python train.py --config configs/crossvit/crossvit_15_ascend.yaml --data_dir /pa
To validate the accuracy of the trained model, you can use `validate.py` and parse the checkpoint path with `--ckpt_path`.
```
-python validate.py -c configs/crossvit/crossvit15_ascend.yaml --data_dir /path/to/imagenet --ckpt_path /path/to/ckpt
+python validate.py -c configs/crossvit/crossvit_15_ascend.yaml --data_dir /path/to/imagenet --ckpt_path /path/to/ckpt
```
### Deployment
diff --git a/configs/crossvit/crossvit_15_ascend.yaml b/configs/crossvit/crossvit_15_ascend.yaml
index 90adbe6d9..5dc38c918 100644
--- a/configs/crossvit/crossvit_15_ascend.yaml
+++ b/configs/crossvit/crossvit_15_ascend.yaml
@@ -28,7 +28,7 @@ crop_pct: 0.935
ema: True
# model
-model: 'crossvit15'
+model: 'crossvit_15'
num_classes: 1000
pretrained: False
ckpt_path: ''
diff --git a/configs/crossvit/crossvit_18_ascend.yaml b/configs/crossvit/crossvit_18_ascend.yaml
index 7f696e5d1..44ee26838 100644
--- a/configs/crossvit/crossvit_18_ascend.yaml
+++ b/configs/crossvit/crossvit_18_ascend.yaml
@@ -28,7 +28,7 @@ crop_pct: 0.935
ema: True
# model
-model: 'crossvit18'
+model: 'crossvit_18'
num_classes: 1000
pretrained: False
ckpt_path: ''
diff --git a/configs/crossvit/crossvit_9_ascend.yaml b/configs/crossvit/crossvit_9_ascend.yaml
index fad9401d8..74a8e4b29 100644
--- a/configs/crossvit/crossvit_9_ascend.yaml
+++ b/configs/crossvit/crossvit_9_ascend.yaml
@@ -27,7 +27,7 @@ color_jitter: 0.4
crop_pct: 0.935
# model
-model: 'crossvit9'
+model: 'crossvit_9'
num_classes: 1000
pretrained: False
ckpt_path: ''
diff --git a/configs/densenet/README.md b/configs/densenet/README.md
index ae61d98f4..1cbe576e7 100644
--- a/configs/densenet/README.md
+++ b/configs/densenet/README.md
@@ -37,12 +37,12 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
-| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
-|--------------|----------|-----------|-----------|------------|-----------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------|
-| densenet_121 | D910x8-G | 75.64 | 92.84 | 8.06 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/densenet/densenet_121_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/densenet/densenet121-120_5004_Ascend.ckpt) |
-| densenet_161 | D910x8-G | 79.09 | 94.66 | 28.90 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/densenet/densenet_161_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/densenet/densenet161-120_5004_Ascend.ckpt) |
-| densenet_169 | D910x8-G | 77.26 | 93.71 | 14.31 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/densenet/densenet_169_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/densenet/densenet169-120_5004_Ascend.ckpt) |
-| densenet_201 | D910x8-G | 78.14 | 94.08 | 20.24 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/densenet/densenet_201_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/densenet/densenet201-120_5004_Ascend.ckpt) |
+| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
+|-------------|----------|-----------|-----------|------------|-----------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------|
+| densenet121 | D910x8-G | 75.64 | 92.84 | 8.06 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/densenet/densenet_121_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/densenet/densenet121-120_5004_Ascend.ckpt) |
+| densenet161 | D910x8-G | 79.09 | 94.66 | 28.90 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/densenet/densenet_161_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/densenet/densenet161-120_5004_Ascend.ckpt) |
+| densenet169 | D910x8-G | 77.26 | 93.71 | 14.31 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/densenet/densenet_169_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/densenet/densenet169-120_5004_Ascend.ckpt) |
+| densenet201 | D910x8-G | 78.14 | 94.08 | 20.24 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/densenet/densenet_201_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/densenet/densenet201-120_5004_Ascend.ckpt) |
diff --git a/configs/dpn/README.md b/configs/dpn/README.md
index 99a9cd3df..5adc2e2db 100644
--- a/configs/dpn/README.md
+++ b/configs/dpn/README.md
@@ -32,12 +32,12 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
-| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
-|-------|----------|-----------|-----------|------------|------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------|
-| dpn92 | D910x8-G | 79.46 | 94.49 | 37.79 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/dpn/dpn92_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/dpn/dpn92-e3e0fca.ckpt) |
-| dpn98 | D910x8-G | 79.94 | 94.57 | 61.74 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/dpn/dpn98_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/dpn/dpn98-119a8207.ckpt) |
-| dpn107 | D910x8-G | 80.05 | 94.74 | 87.13 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/dpn/dpn107_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/dpn/dpn107-7d7df07b.ckpt) |
-| dpn131 | D910x8-G | 80.07 | 94.72 | 79.48 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/dpn/dpn131_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/dpn/dpn131-47f084b3.ckpt) |
+| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
+|---------|----------|-----------|-----------|------------|------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------|
+| dpn92 | D910x8-G | 79.46 | 94.49 | 37.79 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/dpn/dpn92_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/dpn/dpn92-e3e0fca.ckpt) |
+| dpn98 | D910x8-G | 79.94 | 94.57 | 61.74 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/dpn/dpn98_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/dpn/dpn98-119a8207.ckpt) |
+| dpn107 | D910x8-G | 80.05 | 94.74 | 87.13 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/dpn/dpn107_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/dpn/dpn107-7d7df07b.ckpt) |
+| dpn131 | D910x8-G | 80.07 | 94.72 | 79.48 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/dpn/dpn131_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/dpn/dpn131-47f084b3.ckpt) |
diff --git a/configs/ghostnet/README.md b/configs/ghostnet/README.md
index 2f9267af3..034133a76 100644
--- a/configs/ghostnet/README.md
+++ b/configs/ghostnet/README.md
@@ -29,9 +29,9 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
|--------------|----------|-----------|-----------|------------|-----------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------|
-| GhostNet_050 | D910x8-G | 66.03 | 86.64 | 2.60 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/ghostnet/ghostnet_050_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/ghostnet/ghostnet_050-85b91860.ckpt) |
-| GhostNet_100 | D910x8-G | 73.78 | 91.66 | 5.20 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/ghostnet/ghostnet_100_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/ghostnet/ghostnet_100-bef8025a.ckpt) |
-| GhostNet_130 | D910x8-G | 75.50 | 92.56 | 7.39 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/ghostnet/ghostnet_130_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/ghostnet/ghostnet_130-cf4c235c.ckpt) |
+| ghostnet_050 | D910x8-G | 66.03 | 86.64 | 2.60 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/ghostnet/ghostnet_050_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/ghostnet/ghostnet_050-85b91860.ckpt) |
+| ghostnet_100 | D910x8-G | 73.78 | 91.66 | 5.20 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/ghostnet/ghostnet_100_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/ghostnet/ghostnet_100-bef8025a.ckpt) |
+| ghostnet_130 | D910x8-G | 75.50 | 92.56 | 7.39 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/ghostnet/ghostnet_130_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/ghostnet/ghostnet_130-cf4c235c.ckpt) |
diff --git a/configs/googlenet/README.md b/configs/googlenet/README.md
index ee429ff96..bc55bc597 100644
--- a/configs/googlenet/README.md
+++ b/configs/googlenet/README.md
@@ -14,7 +14,7 @@ training results.[[1](#references)]
- Figure 1. Architecture of GoogLENet [1]
+ Figure 1. Architecture of GoogLeNet [1]
## Results
@@ -25,7 +25,7 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
|-----------|----------|-----------|-----------|------------|---------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------|
-| GoogLeNet | D910x8-G | 72.68 | 90.89 | 6.99 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/googlenet/googlenet_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/googlenet/googlenet-5552fcd3.ckpt) |
+| googlenet | D910x8-G | 72.68 | 90.89 | 6.99 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/googlenet/googlenet_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/googlenet/googlenet-5552fcd3.ckpt) |
diff --git a/configs/inceptionv3/README.md b/configs/inceptionv3/README.md
index 0cd5da3cc..f4d7cd36c 100644
--- a/configs/inceptionv3/README.md
+++ b/configs/inceptionv3/README.md
@@ -3,7 +3,7 @@
## Introduction
-InceptionV3 is an upgraded version of GoogleNet. One of the most important improvements of V3 is Factorization, which
+InceptionV3 is an upgraded version of GoogLeNet. One of the most important improvements of V3 is Factorization, which
decomposes 7x7 into two one-dimensional convolutions (1x7, 7x1), and 3x3 is the same (1x3, 3x1), such benefits, both It
can accelerate the calculation (excess computing power can be used to deepen the network), and can split 1 conv into 2
convs, which further increases the network depth and increases the nonlinearity of the network. It is also worth noting
@@ -26,7 +26,7 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
|--------------|----------|-----------|-----------|------------|---------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------|
-| Inception_v3 | D910x8-G | 79.11 | 94.40 | 27.20 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/inceptionv3/inception_v3_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/inception_v3/inception_v3-38f67890.ckpt) |
+| inception_v3 | D910x8-G | 79.11 | 94.40 | 27.20 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/inceptionv3/inception_v3_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/inception_v3/inception_v3-38f67890.ckpt) |
diff --git a/configs/inceptionv4/README.md b/configs/inceptionv4/README.md
index 34521e5a8..f9df0ba21 100644
--- a/configs/inceptionv4/README.md
+++ b/configs/inceptionv4/README.md
@@ -23,7 +23,7 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
|--------------|----------|-----------|-----------|------------|---------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------|
-| Inception_v4 | D910x8-G | 80.88 | 95.34 | 42.74 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/inceptionv4/inception_v4_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/inception_v4/inception_v4-db9c45b3.ckpt) |
+| inception_v4 | D910x8-G | 80.88 | 95.34 | 42.74 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/inceptionv4/inception_v4_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/inception_v4/inception_v4-db9c45b3.ckpt) |
diff --git a/configs/mixnet/README.md b/configs/mixnet/README.md
index 146dcdfac..2a5fbac86 100644
--- a/configs/mixnet/README.md
+++ b/configs/mixnet/README.md
@@ -25,9 +25,9 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
|----------|----------|-----------|-----------|------------|-----------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------|
-| MixNet_s | D910x8-G | 75.52 | 92.52 | 4.17 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mixnet/mixnet_s_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mixnet/mixnet_s-2a5ef3a3.ckpt) |
-| MixNet_m | D910x8-G | 76.64 | 93.05 | 5.06 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mixnet/mixnet_m_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mixnet/mixnet_m-74cc4cb1.ckpt) |
-| MixNet_l | D910x8-G | 78.73 | 94.31 | 7.38 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mixnet/mixnet_l_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mixnet/mixnet_l-978edf2b.ckpt) |
+| mixnet_s | D910x8-G | 75.52 | 92.52 | 4.17 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mixnet/mixnet_s_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mixnet/mixnet_s-2a5ef3a3.ckpt) |
+| mixnet_m | D910x8-G | 76.64 | 93.05 | 5.06 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mixnet/mixnet_m_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mixnet/mixnet_m-74cc4cb1.ckpt) |
+| mixnet_l | D910x8-G | 78.73 | 94.31 | 7.38 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mixnet/mixnet_l_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mixnet/mixnet_l-978edf2b.ckpt) |
diff --git a/configs/mnasnet/README.md b/configs/mnasnet/README.md
index ec3161bfa..7c74b0640 100644
--- a/configs/mnasnet/README.md
+++ b/configs/mnasnet/README.md
@@ -18,13 +18,13 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
-| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
-|--------------|----------|-----------|-----------|------------|----------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------|
-| MnasNet_0.5 | D910x8-G | 68.07 | 88.09 | 2.14 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_0.5_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_050-7d8bf4db.ckpt) |
-| MnasNet_0.75 | D910x8-G | 71.81 | 90.53 | 3.20 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_0.75_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_075-465d366d.ckpt) |
-| MnasNet_1_0 | D910x8-G | 74.28 | 91.70 | 4.42 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_1.0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_100-1bcf43f8.ckpt) |
-| MnasNet_1_3 | D910x8-G | 75.65 | 92.64 | 6.33 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_1.3_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_130-a43a150a.ckpt) |
-| MnasNet_1.4 | D910x8-G | 76.01 | 92.83 | 7.16 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_1.4_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_140-7e20bb30.ckpt) |
+| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
+|-------------|----------|-----------|-----------|------------|---------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------|
+| mnasnet_050 | D910x8-G | 68.07 | 88.09 | 2.14 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_0.5_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_050-7d8bf4db.ckpt) |
+| mnasnet_075 | D910x8-G | 71.81 | 90.53 | 3.20 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_0.75_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_075-465d366d.ckpt) |
+| mnasnet_100 | D910x8-G | 74.28 | 91.70 | 4.42 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_1.0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_100-1bcf43f8.ckpt) |
+| mnasnet_130 | D910x8-G | 75.65 | 92.64 | 6.33 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_1.3_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_130-a43a150a.ckpt) |
+| mnasnet_140 | D910x8-G | 76.01 | 92.83 | 7.16 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mnasnet/mnasnet_1.4_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_140-7e20bb30.ckpt) |
diff --git a/configs/mnasnet/mnasnet_0.5_ascend.yaml b/configs/mnasnet/mnasnet_0.5_ascend.yaml
index 4c525d66f..bbce1a8bf 100644
--- a/configs/mnasnet/mnasnet_0.5_ascend.yaml
+++ b/configs/mnasnet/mnasnet_0.5_ascend.yaml
@@ -21,7 +21,7 @@ interpolation: 'bicubic'
crop_pct: 0.875
# model
-model: 'mnasnet0_5'
+model: 'mnasnet_050'
num_classes: 1000
pretrained: False
ckpt_path: ''
diff --git a/configs/mnasnet/mnasnet_0.75_ascend.yaml b/configs/mnasnet/mnasnet_0.75_ascend.yaml
index 04e819de3..814c35807 100644
--- a/configs/mnasnet/mnasnet_0.75_ascend.yaml
+++ b/configs/mnasnet/mnasnet_0.75_ascend.yaml
@@ -20,7 +20,7 @@ interpolation: 'bilinear'
crop_pct: 0.875
# model
-model: 'mnasnet0_75'
+model: 'mnasnet_075'
num_classes: 1000
pretrained: False
ckpt_path: ''
diff --git a/configs/mnasnet/mnasnet_0.75_gpu.yaml b/configs/mnasnet/mnasnet_0.75_gpu.yaml
index 7c8d3fe60..7bbbadd9a 100644
--- a/configs/mnasnet/mnasnet_0.75_gpu.yaml
+++ b/configs/mnasnet/mnasnet_0.75_gpu.yaml
@@ -20,7 +20,7 @@ interpolation: 'bilinear'
crop_pct: 0.875
# model
-model: 'mnasnet0_75'
+model: 'mnasnet_075'
num_classes: 1000
pretrained: False
ckpt_path: ''
diff --git a/configs/mnasnet/mnasnet_1.0_ascend.yaml b/configs/mnasnet/mnasnet_1.0_ascend.yaml
index 2deb8de0b..771fdda34 100644
--- a/configs/mnasnet/mnasnet_1.0_ascend.yaml
+++ b/configs/mnasnet/mnasnet_1.0_ascend.yaml
@@ -20,7 +20,7 @@ interpolation: 'bilinear'
crop_pct: 0.875
# model
-model: 'mnasnet1_0'
+model: 'mnasnet_100'
num_classes: 1000
pretrained: False
ckpt_path: ''
diff --git a/configs/mnasnet/mnasnet_1.0_gpu.yaml b/configs/mnasnet/mnasnet_1.0_gpu.yaml
index 01be4dcc8..4efd8240b 100644
--- a/configs/mnasnet/mnasnet_1.0_gpu.yaml
+++ b/configs/mnasnet/mnasnet_1.0_gpu.yaml
@@ -20,7 +20,7 @@ interpolation: 'bilinear'
crop_pct: 0.875
# model
-model: 'mnasnet1_0'
+model: 'mnasnet_100'
num_classes: 1000
pretrained: False
ckpt_path: ''
diff --git a/configs/mnasnet/mnasnet_1.3_ascend.yaml b/configs/mnasnet/mnasnet_1.3_ascend.yaml
index 80457b696..5dcc2624f 100644
--- a/configs/mnasnet/mnasnet_1.3_ascend.yaml
+++ b/configs/mnasnet/mnasnet_1.3_ascend.yaml
@@ -21,7 +21,7 @@ interpolation: 'bicubic'
crop_pct: 0.875
# model
-model: 'mnasnet1_3'
+model: 'mnasnet_130'
num_classes: 1000
pretrained: False
ckpt_path: ''
diff --git a/configs/mnasnet/mnasnet_1.4_ascend.yaml b/configs/mnasnet/mnasnet_1.4_ascend.yaml
index 489031e87..dc84463ab 100644
--- a/configs/mnasnet/mnasnet_1.4_ascend.yaml
+++ b/configs/mnasnet/mnasnet_1.4_ascend.yaml
@@ -20,7 +20,7 @@ interpolation: 'bilinear'
crop_pct: 0.875
# model
-model: 'mnasnet1_4'
+model: 'mnasnet_140'
num_classes: 1000
pretrained: False
ckpt_path: ''
diff --git a/configs/mnasnet/mnasnet_1.4_gpu.yaml b/configs/mnasnet/mnasnet_1.4_gpu.yaml
index 54bbe7705..7d30e3568 100644
--- a/configs/mnasnet/mnasnet_1.4_gpu.yaml
+++ b/configs/mnasnet/mnasnet_1.4_gpu.yaml
@@ -20,7 +20,7 @@ interpolation: 'bilinear'
crop_pct: 0.875
# model
-model: 'mnasnet1_4'
+model: 'mnasnet_140'
num_classes: 1000
pretrained: False
ckpt_path: ''
diff --git a/configs/mobilenetv1/README.md b/configs/mobilenetv1/README.md
index ceb34a6ab..9940ecbe0 100644
--- a/configs/mobilenetv1/README.md
+++ b/configs/mobilenetv1/README.md
@@ -18,12 +18,12 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
-| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
-|------------------|----------|-----------|-----------|------------|-------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|
-| MobileNet_v1_025 | D910x8-G | 53.87 | 77.66 | 0.47 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv1/mobilenet_v1_0.25_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_025-d3377fba.ckpt) |
-| MobileNet_v1_050 | D910x8-G | 65.94 | 86.51 | 1.34 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv1/mobilenet_v1_0.5_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_050-23e9ddbe.ckpt) |
-| MobileNet_v1_075 | D910x8-G | 70.44 | 89.49 | 2.60 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv1/mobilenet_v1_0.75_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_075-5bed0c73.ckpt) |
-| MobileNet_v1_100 | D910x8-G | 72.95 | 91.01 | 4.25 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv1/mobilenet_v1_1.0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_100-91c7b206.ckpt) |
+| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
+|------------------|----------|-----------|-----------|------------|------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|
+| mobilenet_v1_025 | D910x8-G | 53.87 | 77.66 | 0.47 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv1/mobilenet_v1_0.25_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_025-d3377fba.ckpt) |
+| mobilenet_v1_050 | D910x8-G | 65.94 | 86.51 | 1.34 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv1/mobilenet_v1_0.5_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_050-23e9ddbe.ckpt) |
+| mobilenet_v1_075 | D910x8-G | 70.44 | 89.49 | 2.60 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv1/mobilenet_v1_0.75_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_075-5bed0c73.ckpt) |
+| mobilenet_v1_100 | D910x8-G | 72.95 | 91.01 | 4.25 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv1/mobilenet_v1_1.0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_100-91c7b206.ckpt) |
diff --git a/configs/mobilenetv1/mobilenet_v1_0.25_ascend.yaml b/configs/mobilenetv1/mobilenet_v1_0.25_ascend.yaml
index 37cb56f0a..a089bb6b3 100644
--- a/configs/mobilenetv1/mobilenet_v1_0.25_ascend.yaml
+++ b/configs/mobilenetv1/mobilenet_v1_0.25_ascend.yaml
@@ -20,7 +20,7 @@ interpolation: 'bilinear'
crop_pct: 0.875
# model
-model: 'mobilenet_v1_025_224'
+model: 'mobilenet_v1_025'
num_classes: 1001
pretrained: False
ckpt_path: ''
diff --git a/configs/mobilenetv1/mobilenet_v1_0.25_gpu.yaml b/configs/mobilenetv1/mobilenet_v1_0.25_gpu.yaml
index 37cb56f0a..a089bb6b3 100644
--- a/configs/mobilenetv1/mobilenet_v1_0.25_gpu.yaml
+++ b/configs/mobilenetv1/mobilenet_v1_0.25_gpu.yaml
@@ -20,7 +20,7 @@ interpolation: 'bilinear'
crop_pct: 0.875
# model
-model: 'mobilenet_v1_025_224'
+model: 'mobilenet_v1_025'
num_classes: 1001
pretrained: False
ckpt_path: ''
diff --git a/configs/mobilenetv1/mobilenet_v1_0.5_ascend.yaml b/configs/mobilenetv1/mobilenet_v1_0.5_ascend.yaml
index eb39df9a2..e9c0445cb 100644
--- a/configs/mobilenetv1/mobilenet_v1_0.5_ascend.yaml
+++ b/configs/mobilenetv1/mobilenet_v1_0.5_ascend.yaml
@@ -20,7 +20,7 @@ interpolation: 'bilinear'
crop_pct: 0.875
# model
-model: 'mobilenet_v1_050_224'
+model: 'mobilenet_v1_050'
num_classes: 1001
pretrained: False
ckpt_path: ''
diff --git a/configs/mobilenetv1/mobilenet_v1_0.5_gpu.yaml b/configs/mobilenetv1/mobilenet_v1_0.5_gpu.yaml
index eb39df9a2..e9c0445cb 100644
--- a/configs/mobilenetv1/mobilenet_v1_0.5_gpu.yaml
+++ b/configs/mobilenetv1/mobilenet_v1_0.5_gpu.yaml
@@ -20,7 +20,7 @@ interpolation: 'bilinear'
crop_pct: 0.875
# model
-model: 'mobilenet_v1_050_224'
+model: 'mobilenet_v1_050'
num_classes: 1001
pretrained: False
ckpt_path: ''
diff --git a/configs/mobilenetv1/mobilenet_v1_0.75_ascend.yaml b/configs/mobilenetv1/mobilenet_v1_0.75_ascend.yaml
index 1934b5cab..8a37f2394 100644
--- a/configs/mobilenetv1/mobilenet_v1_0.75_ascend.yaml
+++ b/configs/mobilenetv1/mobilenet_v1_0.75_ascend.yaml
@@ -20,7 +20,7 @@ interpolation: 'bilinear'
crop_pct: 0.875
# model
-model: 'mobilenet_v1_075_224'
+model: 'mobilenet_v1_075'
num_classes: 1001
pretrained: False
ckpt_path: ''
diff --git a/configs/mobilenetv1/mobilenet_v1_0.75_gpu.yaml b/configs/mobilenetv1/mobilenet_v1_0.75_gpu.yaml
index 1934b5cab..8a37f2394 100644
--- a/configs/mobilenetv1/mobilenet_v1_0.75_gpu.yaml
+++ b/configs/mobilenetv1/mobilenet_v1_0.75_gpu.yaml
@@ -20,7 +20,7 @@ interpolation: 'bilinear'
crop_pct: 0.875
# model
-model: 'mobilenet_v1_075_224'
+model: 'mobilenet_v1_075'
num_classes: 1001
pretrained: False
ckpt_path: ''
diff --git a/configs/mobilenetv1/mobilenet_v1_1.0_ascend.yaml b/configs/mobilenetv1/mobilenet_v1_1.0_ascend.yaml
index c8e62f24d..543e65832 100644
--- a/configs/mobilenetv1/mobilenet_v1_1.0_ascend.yaml
+++ b/configs/mobilenetv1/mobilenet_v1_1.0_ascend.yaml
@@ -20,7 +20,7 @@ interpolation: 'bilinear'
crop_pct: 0.875
# model
-model: 'mobilenet_v1_100_224'
+model: 'mobilenet_v1_100'
num_classes: 1001
pretrained: False
ckpt_path: ''
diff --git a/configs/mobilenetv1/mobilenet_v1_1.0_gpu.yaml b/configs/mobilenetv1/mobilenet_v1_1.0_gpu.yaml
index f3c06df7a..4ef48e61c 100644
--- a/configs/mobilenetv1/mobilenet_v1_1.0_gpu.yaml
+++ b/configs/mobilenetv1/mobilenet_v1_1.0_gpu.yaml
@@ -21,7 +21,7 @@ interpolation: 'bilinear'
crop_pct: 0.875
# model
-model: 'mobilenet_v1_100_224'
+model: 'mobilenet_v1_100'
num_classes: 1001
pretrained: False
ckpt_path: ''
diff --git a/configs/mobilenetv2/README.md b/configs/mobilenetv2/README.md
index 6a73779be..70d505991 100644
--- a/configs/mobilenetv2/README.md
+++ b/configs/mobilenetv2/README.md
@@ -20,11 +20,11 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
-| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
-|------------------|----------|-----------|-----------|------------|-------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|
-| MobileNet_v2_075 | D910x8-G | 69.98 | 89.32 | 2.66 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv2/mobilenet_v2_0.75_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv2/mobilenet_v2_075-bd7bd4c4.ckpt) |
-| MobileNet_v2_100 | D910x8-G | 72.27 | 90.72 | 3.54 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv2/mobilenet_v2_1.0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv2/mobilenet_v2_100-d5532038.ckpt) |
-| MobileNet_v2_140 | D910x8-G | 75.56 | 92.56 | 6.15 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv2/mobilenet_v2_1.4_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv2/mobilenet_v2_140-98776171.ckpt) |
+| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
+|----------------------|----------|-----------|-----------|------------|------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|
+| mobilenet_v2_075 | D910x8-G | 69.98 | 89.32 | 2.66 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv2/mobilenet_v2_0.75_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv2/mobilenet_v2_075-bd7bd4c4.ckpt) |
+| mobilenet_v2_100 | D910x8-G | 72.27 | 90.72 | 3.54 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv2/mobilenet_v2_1.0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv2/mobilenet_v2_100-d5532038.ckpt) |
+| mobilenet_v2_140 | D910x8-G | 75.56 | 92.56 | 6.15 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv2/mobilenet_v2_1.4_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv2/mobilenet_v2_140-98776171.ckpt) |
diff --git a/configs/mobilenetv2/mobilenet_v2_0.75_ascend.yaml b/configs/mobilenetv2/mobilenet_v2_0.75_ascend.yaml
index 5db7a9de1..82fb12395 100644
--- a/configs/mobilenetv2/mobilenet_v2_0.75_ascend.yaml
+++ b/configs/mobilenetv2/mobilenet_v2_0.75_ascend.yaml
@@ -22,7 +22,7 @@ interpolation: 'bilinear'
crop_pct: 0.875
# model
-model: 'mobilenet_v2_075_224'
+model: 'mobilenet_v2_075'
num_classes: 1000
pretrained: False
ckpt_path: ''
diff --git a/configs/mobilenetv2/mobilenet_v2_1.0_ascend.yaml b/configs/mobilenetv2/mobilenet_v2_1.0_ascend.yaml
index 73a87d9a7..5d271c1f7 100644
--- a/configs/mobilenetv2/mobilenet_v2_1.0_ascend.yaml
+++ b/configs/mobilenetv2/mobilenet_v2_1.0_ascend.yaml
@@ -22,7 +22,7 @@ interpolation: 'bilinear'
crop_pct: 0.875
# model
-model: 'mobilenet_v2_100_224'
+model: 'mobilenet_v2_100'
num_classes: 1000
pretrained: False
ckpt_path: ''
diff --git a/configs/mobilenetv2/mobilenet_v2_1.4_ascend.yaml b/configs/mobilenetv2/mobilenet_v2_1.4_ascend.yaml
index 5ed871b52..56397195a 100644
--- a/configs/mobilenetv2/mobilenet_v2_1.4_ascend.yaml
+++ b/configs/mobilenetv2/mobilenet_v2_1.4_ascend.yaml
@@ -22,7 +22,7 @@ interpolation: 'bilinear'
crop_pct: 0.875
# model
-model: 'mobilenet_v2_140_224'
+model: 'mobilenet_v2_140'
num_classes: 1000
pretrained: False
ckpt_path: ''
diff --git a/configs/mobilenetv3/README.md b/configs/mobilenetv3/README.md
index 4db0853ff..10c3330cc 100644
--- a/configs/mobilenetv3/README.md
+++ b/configs/mobilenetv3/README.md
@@ -20,10 +20,10 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
-| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
-|-----------------------|----------|-----------|-----------|------------|--------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|
-| MobileNetV3_small_100 | D910x8-G | 68.10 | 87.86 | 2.55 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv3/mobilenet_v3_small_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv3/mobilenet_v3_small_100-509c6047.ckpt) |
-| MobileNetV3_large_100 | D910x8-G | 75.23 | 92.31 | 5.51 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv3/mobilenet_v3_large_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv3/mobilenet_v3_large_100-1279ad5f.ckpt) |
+| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
+|------------------------|----------|-----------|-----------|------------|--------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|
+| mobilenet_v3_small_100 | D910x8-G | 68.10 | 87.86 | 2.55 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv3/mobilenet_v3_small_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv3/mobilenet_v3_small_100-509c6047.ckpt) |
+| mobilenet_v3_large_100 | D910x8-G | 75.23 | 92.31 | 5.51 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/mobilenetv3/mobilenet_v3_large_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv3/mobilenet_v3_large_100-1279ad5f.ckpt) |
diff --git a/configs/pit/README.md b/configs/pit/README.md
index e13ad181c..1242ba873 100644
--- a/configs/pit/README.md
+++ b/configs/pit/README.md
@@ -22,10 +22,10 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
|--------|----------|-----------|-----------|------------|------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------|
-| PiT_ti | D910x8-G | 72.96 | 91.33 | 4.85 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pit/pit_ti_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pit/pit_ti-e647a593.ckpt) |
-| PiT_xs | D910x8-G | 78.41 | 94.06 | 10.61 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pit/pit_xs_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pit/pit_xs-fea0d37e.ckpt) |
-| PiT_s | D910x8-G | 80.56 | 94.80 | 23.46 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pit/pit_s_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pit/pit_s-3c1ba36f.ckpt) |
-| PiT_b | D910x8-G | 81.87 | 95.04 | 73.76 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pit/pit_b_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pit/pit_b-2411c9b6.ckpt) |
+| pit_ti | D910x8-G | 72.96 | 91.33 | 4.85 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pit/pit_ti_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pit/pit_ti-e647a593.ckpt) |
+| pit_xs | D910x8-G | 78.41 | 94.06 | 10.61 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pit/pit_xs_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pit/pit_xs-fea0d37e.ckpt) |
+| pit_s | D910x8-G | 80.56 | 94.80 | 23.46 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pit/pit_s_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pit/pit_s-3c1ba36f.ckpt) |
+| pit_b | D910x8-G | 81.87 | 95.04 | 73.76 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pit/pit_b_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pit/pit_b-2411c9b6.ckpt) |
diff --git a/configs/pvt/README.md b/configs/pvt/README.md
index 2ab969374..ddcee7ee7 100644
--- a/configs/pvt/README.md
+++ b/configs/pvt/README.md
@@ -16,10 +16,10 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
|:----------:|:--------:|:---------:|:---------:|:----------:|----------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------|
-| PVT_tiny | D910x8-G | 74.81 | 92.18 | 13.23 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvt/pvt_tiny_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt/pvt_tiny-6abb953d.ckpt) |
-| PVT_small | D910x8-G | 79.66 | 94.71 | 24.49 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvt/pvt_small_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt/pvt_small-213c2ed1.ckpt) |
-| PVT_medium | D910x8-G | 81.82 | 95.81 | 44.21 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvt/pvt_medium_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt/pvt_medium-469e6802.ckpt) |
-| PVT_large | D910x8-G | 81.75 | 95.70 | 61.36 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvt/pvt_large_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt/pvt_large-bb6895d7.ckpt) |
+| pvt_tiny | D910x8-G | 74.81 | 92.18 | 13.23 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvt/pvt_tiny_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt/pvt_tiny-6abb953d.ckpt) |
+| pvt_small | D910x8-G | 79.66 | 94.71 | 24.49 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvt/pvt_small_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt/pvt_small-213c2ed1.ckpt) |
+| pvt_medium | D910x8-G | 81.82 | 95.81 | 44.21 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvt/pvt_medium_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt/pvt_medium-469e6802.ckpt) |
+| pvt_large | D910x8-G | 81.75 | 95.70 | 61.36 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvt/pvt_large_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt/pvt_large-bb6895d7.ckpt) |
diff --git a/configs/pvtv2/README.md b/configs/pvtv2/README.md
index e4a4abf48..7c691bd34 100644
--- a/configs/pvtv2/README.md
+++ b/configs/pvtv2/README.md
@@ -22,13 +22,13 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
-| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
-|----------|----------|-----------|-----------|------------|------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------|
-| PVTV2_b0 | D910x8-G | 71.50 | 90.60 | 3.67 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b0-1c4f6683.ckpt) |
-| PVTV2_b1 | D910x8-G | 78.91 | 94.49 | 14.01 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b1_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b1-3ceb171a.ckpt) |
-| PVTV2_b2 | D910x8-G | 81.99 | 95.74 | 25.35 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b2_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b2-0565d18e.ckpt) |
-| PVTV2_b3 | D910x8-G | 82.84 | 96.24 | 45.24 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b3_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b3-feaae3fc.ckpt) |
-| PVTV2_b4 | D910x8-G | 83.14 | 96.27 | 62.56 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b4_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b4-1cf4bc03.ckpt) |
+| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
+|-----------|----------|-----------|-----------|------------|------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------|
+| pvt_v2_b0 | D910x8-G | 71.50 | 90.60 | 3.67 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b0-1c4f6683.ckpt) |
+| pvt_v2_b1 | D910x8-G | 78.91 | 94.49 | 14.01 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b1_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b1-3ceb171a.ckpt) |
+| pvt_v2_b2 | D910x8-G | 81.99 | 95.74 | 25.35 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b2_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b2-0565d18e.ckpt) |
+| pvt_v2_b3 | D910x8-G | 82.84 | 96.24 | 45.24 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b3_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b3-feaae3fc.ckpt) |
+| pvt_v2_b4 | D910x8-G | 83.14 | 96.27 | 62.56 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/pvtv2/pvt_v2_b4_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/pvt_v2/pvt_v2_b4-1cf4bc03.ckpt) |
diff --git a/configs/repmlp/README.md b/configs/repmlp/README.md
index 614f369c8..18fcca20a 100644
--- a/configs/repmlp/README.md
+++ b/configs/repmlp/README.md
@@ -24,8 +24,8 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
-| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
-|:-----------:|:--------:|:---------:|:---------:|:----------:|--------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------|
+| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
+|:--------------:|:--------:|:---------:|:---------:|:----------:|-----------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------|
| repmlp_t224 | D910x8-G | 76.71 | 93.30 | 38.30 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/repmlp/repmlp_t224_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/repmlp/repmlp_t224-8dbedd00.ckpt) |
@@ -79,7 +79,7 @@ python train.py --config configs/repmlp/repmlp_t224_ascend.yaml --data_dir /path
To validate the accuracy of the trained model, you can use `validate.py` and parse the checkpoint path with `--ckpt_path`.
```shell
-python validate.py --model=RepMLPNet_T224 --data_dir /path/to/imagenet --ckpt_path /path/to/ckpt
+python validate.py --model=repmlp_t224 --data_dir /path/to/imagenet --ckpt_path /path/to/ckpt
```
### Deployment
diff --git a/configs/repmlp/repmlp_t224_ascend.yaml b/configs/repmlp/repmlp_t224_ascend.yaml
index 883d4c1f2..c47e2777f 100644
--- a/configs/repmlp/repmlp_t224_ascend.yaml
+++ b/configs/repmlp/repmlp_t224_ascend.yaml
@@ -30,7 +30,7 @@ auto_augment: 'randaug-m9-mstd0.5-inc1'
# model
-model: 'RepMLPNet_T224'
+model: 'repmlp_t224'
num_classes: 1000
in_channels: 3
pretrained: False
diff --git a/configs/res2net/README.md b/configs/res2net/README.md
index 825e1d1f0..0576c3527 100644
--- a/configs/res2net/README.md
+++ b/configs/res2net/README.md
@@ -19,12 +19,12 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
-| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
-|-----------------|-----------|-----------|-------|------------|-------------------------------------------------------------------------------------------------------|---|
-| Res2Net50 | D910x8-G | 79.35 | 94.64 | 25.76 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/res2net/res2net_50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/res2net/res2net50-f42cf71b.ckpt) |
-| Res2Net101 | D910x8-G | 79.56 | 94.70 | 45.33 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/res2net/res2net_101_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/res2net/res2net101-8ae60132.ckpt) |
-| Res2Net50-v1b | D910x8-G | 80.32 | 95.09 | 25.77 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/res2net/res2net_50_v1b_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/res2net/res2net50_v1b-99304e92.ckpt) |
-| Res2Net101-v1b | D910x8-G | 81.14 | 95.41 | 45.35 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/res2net/res2net_101_v1b_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/res2net/res2net101_v1b-7e6db001.ckpt) |
+| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
+|----------------|-----------|-----------|-------|------------|-------------------------------------------------------------------------------------------------------|---|
+| res2net50 | D910x8-G | 79.35 | 94.64 | 25.76 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/res2net/res2net_50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/res2net/res2net50-f42cf71b.ckpt) |
+| res2net101 | D910x8-G | 79.56 | 94.70 | 45.33 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/res2net/res2net_101_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/res2net/res2net101-8ae60132.ckpt) |
+| res2net50_v1b | D910x8-G | 80.32 | 95.09 | 25.77 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/res2net/res2net_50_v1b_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/res2net/res2net50_v1b-99304e92.ckpt) |
+| res2net101_v1b | D910x8-G | 81.14 | 95.41 | 45.35 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/res2net/res2net_101_v1b_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/res2net/res2net101_v1b-7e6db001.ckpt) |
diff --git a/configs/resnest/README.md b/configs/resnest/README.md
index 81953c928..4e79305d5 100644
--- a/configs/resnest/README.md
+++ b/configs/resnest/README.md
@@ -24,8 +24,8 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
|------------|----------|-----------|-----------|------------|--------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------|
-| ResNeSt50 | D910x8-G | 80.81 | 95.16 | 27.55 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnest/resnest50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnest/resnest50-f2e7fc9c.ckpt) |
-| ResNeSt101 | D910x8-G | 82.90 | 96.12 | 48.41 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnest/resnest101_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnest/resnest101-7cc5c258.ckpt) |
+| resnest50 | D910x8-G | 80.81 | 95.16 | 27.55 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnest/resnest50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnest/resnest50-f2e7fc9c.ckpt) |
+| resnest101 | D910x8-G | 82.90 | 96.12 | 48.41 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnest/resnest101_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnest/resnest101-7cc5c258.ckpt) |
diff --git a/configs/resnet/README.md b/configs/resnet/README.md
index 79b54e871..3eaaf7c89 100644
--- a/configs/resnet/README.md
+++ b/configs/resnet/README.md
@@ -19,13 +19,13 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
-| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
+| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
|-----------------|-----------|-----------|-----------|-------|-------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------|
-| ResNet18 | D910x8-G | 70.21 | 89.62 | 11.70 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet_18_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet18-1e65cd21.ckpt) |
-| ResNet34 | D910x8-G | 74.15 | 91.98 | 21.81 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet_34_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet34-f297d27e.ckpt) |
-| ResNet50 | D910x8-G | 76.69 | 93.50 | 25.61 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet_50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet50-e0733ab8.ckpt) |
-| ResNet101 | D910x8-G | 78.24 | 94.09 |44.65 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet_101_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet101-689c5e77.ckpt) |
-| ResNet152 | D910x8-G | 78.72 | 94.45 | 60.34| [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet_152_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet152-beb689d8.ckpt) |
+| resnet18 | D910x8-G | 70.21 | 89.62 | 11.70 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet_18_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet18-1e65cd21.ckpt) |
+| resnet34 | D910x8-G | 74.15 | 91.98 | 21.81 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet_34_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet34-f297d27e.ckpt) |
+| resnet50 | D910x8-G | 76.69 | 93.50 | 25.61 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet_50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet50-e0733ab8.ckpt) |
+| resnet101 | D910x8-G | 78.24 | 94.09 |44.65 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet_101_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet101-689c5e77.ckpt) |
+| resnet152 | D910x8-G | 78.72 | 94.45 | 60.34| [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnet/resnet_152_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnet/resnet152-beb689d8.ckpt) |
diff --git a/configs/resnetv2/README.md b/configs/resnetv2/README.md
index 978c50df5..ceb4b48ee 100644
--- a/configs/resnetv2/README.md
+++ b/configs/resnetv2/README.md
@@ -20,10 +20,10 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
-| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
-|-----------------|-----------|-----------|-----------|-------|-------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------|
-| ResNetv2_50 | D910x8-G | 76.90 | 93.37 | 25.60 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnetv2/resnetv2_50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnetv2/resnetv2_50-3c2f143b.ckpt) |
-| ResNetv2_101 | D910x8-G | 78.48 | 94.23 | 44.55 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnetv2/resnetv2_101_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnetv2/resnetv2_101-5d4c49a1.ckpt) |
+| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
+|--------------|-----------|-----------|-----------|-------|-------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------|
+| resnetv2_50 | D910x8-G | 76.90 | 93.37 | 25.60 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnetv2/resnetv2_50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnetv2/resnetv2_50-3c2f143b.ckpt) |
+| resnetv2_101 | D910x8-G | 78.48 | 94.23 | 44.55 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnetv2/resnetv2_101_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnetv2/resnetv2_101-5d4c49a1.ckpt) |
diff --git a/configs/resnext/README.md b/configs/resnext/README.md
index 4b0235afa..022a7301f 100644
--- a/configs/resnext/README.md
+++ b/configs/resnext/README.md
@@ -26,10 +26,10 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
|------------------|----------|-----------|-----------|------------|--------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------|
-| ResNeXt50_32x4d | D910x8-G | 78.53 | 94.10 | 25.10 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnext/resnext50_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnext/resnext50_32x4d-af8aba16.ckpt) |
-| ResNeXt101_32x4d | D910x8-G | 79.83 | 94.80 | 44.32 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnext/resnext101_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnext/resnext101_32x4d-3c1e9c51.ckpt) |
-| ResNeXt101_64x4d | D910x8-G | 80.30 | 94.82 | 83.66 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnext/resnext101_64x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnext/resnext101_64x4d-8929255b.ckpt) |
-| ResNeXt152_64x4d | D910x8-G | 80.52 | 95.00 | 115.27 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnext/resnext152_64x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnext/resnext152_64x4d-3aba275c.ckpt) |
+| resnext50_32x4d | D910x8-G | 78.53 | 94.10 | 25.10 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnext/resnext50_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnext/resnext50_32x4d-af8aba16.ckpt) |
+| resnext101_32x4d | D910x8-G | 79.83 | 94.80 | 44.32 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnext/resnext101_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnext/resnext101_32x4d-3c1e9c51.ckpt) |
+| resnext101_64x4d | D910x8-G | 80.30 | 94.82 | 83.66 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnext/resnext101_64x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnext/resnext101_64x4d-8929255b.ckpt) |
+| resnext152_64x4d | D910x8-G | 80.52 | 95.00 | 115.27 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/resnext/resnext152_64x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/resnext/resnext152_64x4d-3aba275c.ckpt) |
diff --git a/configs/rexnet/README.md b/configs/rexnet/README.md
index 32d71aad7..74edfb0c6 100644
--- a/configs/rexnet/README.md
+++ b/configs/rexnet/README.md
@@ -12,13 +12,13 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
-| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
-|-----------------|-----------|-------|-------|------------|------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------|
-| rexnet_x09 | D910x8-G | 77.06 | 93.41 | 4.13 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_x09_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_09-da498331.ckpt) |
-| rexnet_x10 | D910x8-G | 77.38 | 93.60 | 4.84 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_x10_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_10-c5fb2dc7.ckpt) |
-| rexnet_x13 | D910x8-G | 79.06 | 94.28 | 7.61 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_x13_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_13-a49c41e5.ckpt) |
-| rexnet_x15 | D910x8-G | 79.95 | 94.74 | 9.79 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_x15_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_15-37a931d3.ckpt) |
-| rexnet_x20 | D910x8-G | 80.64 | 94.99 | 16.45 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_x20_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_20-c5810914.ckpt) |
+| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
+|-----------------|-----------|-------|-------|------------|-------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------|
+| rexnet_09 | D910x8-G | 77.06 | 93.41 | 4.13 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_x09_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_09-da498331.ckpt) |
+| rexnet_10 | D910x8-G | 77.38 | 93.60 | 4.84 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_x10_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_10-c5fb2dc7.ckpt) |
+| rexnet_13 | D910x8-G | 79.06 | 94.28 | 7.61 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_x13_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_13-a49c41e5.ckpt) |
+| rexnet_15 | D910x8-G | 79.95 | 94.74 | 9.79 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_x15_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_15-37a931d3.ckpt) |
+| rexnet_20 | D910x8-G | 80.64 | 94.99 | 16.45 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/rexnet/rexnet_x20_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_20-c5810914.ckpt) |
diff --git a/configs/rexnet/rexnet_x09_ascend.yaml b/configs/rexnet/rexnet_x09_ascend.yaml
index 4fb833079..615b41a41 100644
--- a/configs/rexnet/rexnet_x09_ascend.yaml
+++ b/configs/rexnet/rexnet_x09_ascend.yaml
@@ -20,7 +20,7 @@ auto_augment: "randaug-m9-mstd0.5"
re_value: "random"
# model
-model: "rexnet_x09"
+model: "rexnet_09"
num_classes: 1000
pretrained: False
ckpt_path: ""
diff --git a/configs/rexnet/rexnet_x10_ascend.yaml b/configs/rexnet/rexnet_x10_ascend.yaml
index 2febdc729..1439df063 100644
--- a/configs/rexnet/rexnet_x10_ascend.yaml
+++ b/configs/rexnet/rexnet_x10_ascend.yaml
@@ -20,7 +20,7 @@ auto_augment: "randaug-m9-mstd0.5"
re_value: "random"
# model
-model: "rexnet_x10"
+model: "rexnet_10"
num_classes: 1000
pretrained: False
ckpt_path: ""
diff --git a/configs/rexnet/rexnet_x13_ascend.yaml b/configs/rexnet/rexnet_x13_ascend.yaml
index 4bcb3d7ef..38d07d352 100644
--- a/configs/rexnet/rexnet_x13_ascend.yaml
+++ b/configs/rexnet/rexnet_x13_ascend.yaml
@@ -20,7 +20,7 @@ auto_augment: "randaug-m9-mstd0.5"
re_value: "random"
# model
-model: "rexnet_x13"
+model: "rexnet_13"
num_classes: 1000
pretrained: False
ckpt_path: ""
diff --git a/configs/rexnet/rexnet_x15_ascend.yaml b/configs/rexnet/rexnet_x15_ascend.yaml
index ee1ceeeb3..2165d8c3f 100644
--- a/configs/rexnet/rexnet_x15_ascend.yaml
+++ b/configs/rexnet/rexnet_x15_ascend.yaml
@@ -20,7 +20,7 @@ auto_augment: "randaug-m9-mstd0.5"
re_value: "random"
# model
-model: "rexnet_x15"
+model: "rexnet_15"
num_classes: 1000
pretrained: False
ckpt_path: ""
diff --git a/configs/rexnet/rexnet_x20_ascend.yaml b/configs/rexnet/rexnet_x20_ascend.yaml
index 9f609a3ba..b1729e176 100644
--- a/configs/rexnet/rexnet_x20_ascend.yaml
+++ b/configs/rexnet/rexnet_x20_ascend.yaml
@@ -20,7 +20,7 @@ auto_augment: "randaug-m9-mstd0.5"
re_value: "random"
# model
-model: "rexnet_x20"
+model: "rexnet_20"
num_classes: 1000
pretrained: False
ckpt_path: ""
diff --git a/configs/senet/README.md b/configs/senet/README.md
index 75fdb89e3..fe7b5050f 100644
--- a/configs/senet/README.md
+++ b/configs/senet/README.md
@@ -25,11 +25,11 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
|-------------------|----------|-----------|-----------|------------|-------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------|
-| SEResNet18 | D910x8-G | 71.81 | 90.49 | 11.80 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnet18_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnet18-7880643b.ckpt) |
-| SEResNet34 | D910x8-G | 75.38 | 92.50 | 21.98 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnet34_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnet34-8179d3c9.ckpt) |
-| SEResNet50 | D910x8-G | 78.32 | 94.07 | 28.14 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnet50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnet50-ff9cd214.ckpt) |
-| SEResNeXt26_32x4d | D910x8-G | 77.17 | 93.42 | 16.83 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnext26_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnext26_32x4d-5361f5b6.ckpt) |
-| SEResNeXt50_32x4d | D910x8-G | 78.71 | 94.36 | 27.63 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnext50_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnext50_32x4d-fdc35aca.ckpt) |
+| seresnet18 | D910x8-G | 71.81 | 90.49 | 11.80 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnet18_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnet18-7880643b.ckpt) |
+| seresnet34 | D910x8-G | 75.38 | 92.50 | 21.98 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnet34_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnet34-8179d3c9.ckpt) |
+| seresnet50 | D910x8-G | 78.32 | 94.07 | 28.14 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnet50_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnet50-ff9cd214.ckpt) |
+| seresnext26_32x4d | D910x8-G | 77.17 | 93.42 | 16.83 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnext26_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnext26_32x4d-5361f5b6.ckpt) |
+| seresnext50_32x4d | D910x8-G | 78.71 | 94.36 | 27.63 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/senet/seresnext50_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/senet/seresnext50_32x4d-fdc35aca.ckpt) |
diff --git a/configs/shufflenetv1/README.md b/configs/shufflenetv1/README.md
index a37b70fd9..794f7ebc4 100644
--- a/configs/shufflenetv1/README.md
+++ b/configs/shufflenetv1/README.md
@@ -20,10 +20,10 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
-| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
-|-----------------------|----------|-----------|-----------|------------|---------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------|
-| shufflenet_v1_g3_x0_5 | D910x8-G | 57.05 | 79.73 | 0.73 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/shufflenetv1/shufflenet_v1_0.5_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv1/shufflenet_v1_g3_05-42cfe109.ckpt) |
-| shufflenet_v1_g3_x1_0 | D910x8-G | 67.77 | 87.73 | 1.89 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/shufflenetv1/shufflenet_v1_1.0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv1/shufflenet_v1_g3_10-245f0ccf.ckpt) |
+| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
+|---------------------|----------|-----------|-----------|------------|--------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------|
+| shufflenet_v1_g3_05 | D910x8-G | 57.05 | 79.73 | 0.73 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/shufflenetv1/shufflenet_v1_0.5_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv1/shufflenet_v1_g3_05-42cfe109.ckpt) |
+| shufflenet_v1_g3_10 | D910x8-G | 67.77 | 87.73 | 1.89 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/shufflenetv1/shufflenet_v1_1.0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv1/shufflenet_v1_g3_10-245f0ccf.ckpt) |
diff --git a/configs/shufflenetv1/shufflenet_v1_0.5_ascend.yaml b/configs/shufflenetv1/shufflenet_v1_0.5_ascend.yaml
index f078bb184..adccfdb9f 100644
--- a/configs/shufflenetv1/shufflenet_v1_0.5_ascend.yaml
+++ b/configs/shufflenetv1/shufflenet_v1_0.5_ascend.yaml
@@ -19,7 +19,7 @@ interpolation: "bilinear"
crop_pct: 0.875
# model
-model: "shufflenet_v1_g3_x0_5"
+model: "shufflenet_v1_g3_05"
num_classes: 1000
pretrained: False
ckpt_path: ""
diff --git a/configs/shufflenetv1/shufflenet_v1_1.0_ascend.yaml b/configs/shufflenetv1/shufflenet_v1_1.0_ascend.yaml
index b1866a6b2..a48a0d28b 100644
--- a/configs/shufflenetv1/shufflenet_v1_1.0_ascend.yaml
+++ b/configs/shufflenetv1/shufflenet_v1_1.0_ascend.yaml
@@ -19,7 +19,7 @@ interpolation: "bilinear"
crop_pct: 0.875
# model
-model: "shufflenet_v1_g3_x1_0"
+model: "shufflenet_v1_g3_10"
num_classes: 1000
pretrained: False
ckpt_path: ""
diff --git a/configs/shufflenetv2/README.md b/configs/shufflenetv2/README.md
index e4e56f280..0156b9d32 100644
--- a/configs/shufflenetv2/README.md
+++ b/configs/shufflenetv2/README.md
@@ -26,8 +26,8 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
-| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
-|--------------------|----------|-----------|-----------|------------|--------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|
+| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
+|------------------|----------|-----------|-----------|------------|--------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|
| shufflenet_v2_x0_5 | D910x8-G | 60.53 | 82.11 | 1.37 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/shufflenetv2/shufflenet_v2_0.5_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv2/shufflenet_v2_x0_5-8c841061.ckpt) |
| shufflenet_v2_x1_0 | D910x8-G | 69.47 | 88.88 | 2.29 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/shufflenetv2/shufflenet_v2_1.0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv2/shufflenet_v2_x1_0-0da4b7fa.ckpt) |
| shufflenet_v2_x1_5 | D910x8-G | 72.79 | 90.93 | 3.53 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/shufflenetv2/shufflenet_v2_1.5_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv2/shufflenet_v2_x1_5-00b56131.ckpt) |
diff --git a/configs/sknet/README.md b/configs/sknet/README.md
index ee0d4a6e7..673b93d7f 100644
--- a/configs/sknet/README.md
+++ b/configs/sknet/README.md
@@ -21,13 +21,15 @@ multi-scale information from, e.g., 3×3, 5×5, 7×7 convolutional kernels insid
## Results
+Our reproduced model performance on ImageNet-1K is reported as follows.
+
-| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
-|----------------|---------|-----------|-----------|------------|------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------|
-| skresnet18 | D910x8-G | 73.09 | 91.20 | 11.97 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/sknet/skresnet18_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/sknet/skresnet18-868228e5.ckpt) |
-| skresnet34 | D910x8-G | 76.71 | 93.10 | 22.31 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/sknet/skresnet34_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/sknet/skresnet34-d668b629.ckpt) |
-| skresnet50_32x4d | D910x8-G | 79.08 | 94.60 | 37.31 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/sknet/skresnext50_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/sknet/skresnext50_32x4d-395413a2.ckpt) |
+| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
+|-------------------|---------|-----------|-----------|------------|------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------|
+| skresnet18 | D910x8-G | 73.09 | 91.20 | 11.97 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/sknet/skresnet18_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/sknet/skresnet18-868228e5.ckpt) |
+| skresnet34 | D910x8-G | 76.71 | 93.10 | 22.31 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/sknet/skresnet34_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/sknet/skresnet34-d668b629.ckpt) |
+| skresnext50_32x4d | D910x8-G | 79.08 | 94.60 | 37.31 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/sknet/skresnext50_32x4d_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/sknet/skresnext50_32x4d-395413a2.ckpt) |
diff --git a/configs/squeezenet/README.md b/configs/squeezenet/README.md
index da440d380..e29bdd45c 100644
--- a/configs/squeezenet/README.md
+++ b/configs/squeezenet/README.md
@@ -21,14 +21,16 @@ Middle: SqueezeNet with simple bypass; Right: SqueezeNet with complex bypass.
## Results
+Our reproduced model performance on ImageNet-1K is reported as follows.
+
-| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
-|----------------|---------|-----------|-----------|------------|------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------|
-| squeezenet_1.0 | D910x8-G | 59.01 | 81.01 | 1.25 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/squeezenet/squeezenet_1.0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/squeezenet/squeezenet1_0-e2d78c4a.ckpt) |
-| squeezenet_1.0 | GPUx8-G | 58.83 | 81.08 | 1.25 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/squeezenet/squeezenet_1.0_gpu.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/squeezenet/squeezenet1_0_gpu-685f5941.ckpt) |
-| squeezenet_1.1 | D910x8-G | 58.44 | 80.84 | 1.24 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/squeezenet/squeezenet_1.1_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/squeezenet/squeezenet1_1-da256d3a.ckpt) |
-| squeezenet_1.1 | GPUx8-G | 59.18 | 81.41 | 1.24 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/squeezenet/squeezenet_1.1_gpu.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/squeezenet/squeezenet1_1_gpu-0e33234a.ckpt) |
+| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
+|---------------|---------|-----------|-----------|------------|---------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------|
+| squeezenet1_0 | D910x8-G | 59.01 | 81.01 | 1.25 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/squeezenet/squeezenet_1.0_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/squeezenet/squeezenet1_0-e2d78c4a.ckpt) |
+| squeezenet1_0 | GPUx8-G | 58.83 | 81.08 | 1.25 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/squeezenet/squeezenet_1.0_gpu.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/squeezenet/squeezenet1_0_gpu-685f5941.ckpt) |
+| squeezenet1_1 | D910x8-G | 58.44 | 80.84 | 1.24 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/squeezenet/squeezenet_1.1_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/squeezenet/squeezenet1_1-da256d3a.ckpt) |
+| squeezenet1_1 | GPUx8-G | 59.18 | 81.41 | 1.24 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/squeezenet/squeezenet_1.1_gpu.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/squeezenet/squeezenet1_1_gpu-0e33234a.ckpt) |
diff --git a/configs/swintransformerv2/README.md b/configs/swintransformerv2/README.md
index d950859ab..18bf88cba 100644
--- a/configs/swintransformerv2/README.md
+++ b/configs/swintransformerv2/README.md
@@ -25,8 +25,8 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
-| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
-|---------------------|----------|-----------|-----------|------------|---------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------|
+| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
+|----------------------|----------|-----------|-----------|------------|---------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------|
| swinv2_tiny_window8 | D910x8-G | 81.42 | 95.43 | 28.78 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/swintransformerv2/swinv2_tiny_window8_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/swinv2/swinv2_tiny_window8-3ef8b787.ckpt) |
diff --git a/configs/xception/README.md b/configs/xception/README.md
index 7b67553a3..61ff965a5 100644
--- a/configs/xception/README.md
+++ b/configs/xception/README.md
@@ -26,7 +26,7 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
|----------|----------|-----------|-----------|------------|-------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------|
-| Xception | D910x8-G | 79.01 | 94.25 | 22.91 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/xception/xception_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/xception/xception-2c1e711df.ckpt) |
+| xception | D910x8-G | 79.01 | 94.25 | 22.91 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/xception/xception_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/xception/xception-2c1e711df.ckpt) |
diff --git a/configs/xcit/README.md b/configs/xcit/README.md
index dfaef4a72..c20fe5c77 100644
--- a/configs/xcit/README.md
+++ b/configs/xcit/README.md
@@ -20,7 +20,7 @@ Our reproduced model performance on ImageNet-1K is reported as follows.
| Model | Context | Top-1 (%) | Top-5 (%) | Params (M) | Recipe | Download |
|--------------|----------|-----------|-----------|------------|-----------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------|
-| xcit_tiny_12_p16 | D910x8-G | 77.67 | 93.79 | 7.00 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/xcit/xcit_tiny_12_p16_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/xcit/xcit_tiny_12_p16_224-1b1c9301.ckpt) |
+| xcit_tiny_12_p16_224 | D910x8-G | 77.67 | 93.79 | 7.00 | [yaml](https://github.com/mindspore-lab/mindcv/blob/main/configs/xcit/xcit_tiny_12_p16_ascend.yaml) | [weights](https://download.mindspore.cn/toolkits/mindcv/xcit/xcit_tiny_12_p16_224-1b1c9301.ckpt) |
@@ -73,7 +73,7 @@ python train.py --config configs/xcit/xcit_tiny_12_p16_ascend.yaml --data_dir /p
To validate the accuracy of the trained model, you can use `validate.py` and parse the checkpoint path with `--ckpt_path`.
```
-python validate.py -c configs/xcit/xcit_tiny_12_p16_ascend.yaml --data_dir /path/to/imagenet --ckpt_path /path/to/ckpt
+python validate.py -c configs/xcit/xcit_tiny_12_p16_224_ascend.yaml --data_dir /path/to/imagenet --ckpt_path /path/to/ckpt
```
### Deployment
diff --git a/configs/xcit/xcit_tiny_12_p16_ascend.yaml b/configs/xcit/xcit_tiny_12_p16_ascend.yaml
index c5e8f8e5a..4307ef3ea 100644
--- a/configs/xcit/xcit_tiny_12_p16_ascend.yaml
+++ b/configs/xcit/xcit_tiny_12_p16_ascend.yaml
@@ -26,7 +26,7 @@ ema: True
ema_decay: 0.99996
# model
-model: 'xcit_tiny_12_p16'
+model: 'xcit_tiny_12_p16_224'
num_classes: 1000
pretrained: False
ckpt_path: ""
diff --git a/docs/en/tutorials/deployment.md b/docs/en/tutorials/deployment.md
index 4ed7d62cd..d19241cc0 100644
--- a/docs/en/tutorials/deployment.md
+++ b/docs/en/tutorials/deployment.md
@@ -24,12 +24,12 @@ from mindcv.models import create_model
import numpy as np
import mindspore as ms
-model = create_model(model_name='mobilenet_v2_100_224', num_classes=1000, pretrained=True)
+model = create_model(model_name='mobilenet_v2_100', num_classes=1000, pretrained=True)
input_np = np.random.uniform(0.0, 1.0, size=[1, 3, 224, 224]).astype(np.float32)
-# Export mobilenet_v2_100_224.mindir to current folder.
-ms.export(model, ms.Tensor(input_np), file_name='mobilenet_v2_100_224', file_format='MINDIR')
+# Export mobilenet_v2_100.mindir to current folder.
+ms.export(model, ms.Tensor(input_np), file_name='mobilenet_v2_100', file_format='MINDIR')
```
## Deploying the Serving Inference Service
@@ -40,9 +40,9 @@ Start Serving with the following files:
```text
demo
-├── mobilenet_v2_100_224
+├── mobilenet_v2_100
│ ├── 1
-│ │ └── mobilenet_v2_100_224.mindir
+│ │ └── mobilenet_v2_100.mindir
│ └── servable_config.py
│── serving_server.py
├── serving_client.py
@@ -54,8 +54,8 @@ demo
└─ ……
```
-- `mobilenet_v2_100_224`: Model folder. The folder name is the model name.
-- `mobilenet_v2_100_224.mindir`: Model file generated by the network in the previous step, which is stored in folder 1 (the number indicates the version number). Different versions are stored in different folders. The version number must be a string of digits. By default, the latest model file is started.
+- `mobilenet_v2_100`: Model folder. The folder name is the model name.
+- `mobilenet_v2_100.mindir`: Model file generated by the network in the previous step, which is stored in folder 1 (the number indicates the version number). Different versions are stored in different folders. The version number must be a string of digits. By default, the latest model file is started.
- `servable_config.py`: Model configuration script. Declare the model and specify the input and output parameters of the model.
- `serving_server.py`: Script to start the Serving server.
- `serving_client.py`: Script to start the Python client.
@@ -68,7 +68,7 @@ Content of the configuration file `servable_config.py`:
from mindspore_serving.server import register
# Declare the model. The parameter model_file indicates the name of the model file, and model_format indicates the model type.
-model = register.declare_model(model_file="mobilenet_v2_100_224.mindir", model_format="MindIR")
+model = register.declare_model(model_file="mobilenet_v2_100.mindir", model_format="MindIR")
# The input parameters of the Servable method are specified by the input parameters of the Python method. The output parameters of the Servable method are specified by the output_names of register_method.
@register.register_method(output_names=["score"])
@@ -79,7 +79,7 @@ def predict(image):
### Starting the Service
-The `server` function of MindSpore can provide deployment service through either gRPC or RESTful. The following uses gRPC as an example. The service startup script `serving_server.py` deploys the `mobilenet_v2_100_224` in the local directory to device 0 and starts the gRPC server at 127.0.0.1:5500. Content of the script:
+The `server` function of MindSpore can provide deployment service through either gRPC or RESTful. The following uses gRPC as an example. The service startup script `serving_server.py` deploys the `mobilenet_v2_100` in the local directory to device 0 and starts the gRPC server at 127.0.0.1:5500. Content of the script:
```python
import os
@@ -89,7 +89,7 @@ from mindspore_serving import server
def start():
servable_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
- servable_config = server.ServableStartConfig(servable_directory=servable_dir, servable_name="mobilenet_v2_100_224",
+ servable_config = server.ServableStartConfig(servable_directory=servable_dir, servable_name="mobilenet_v2_100",
device_ids=0)
server.start_servables(servable_configs=servable_config)
server.start_grpc_server(address="127.0.0.1:5500")
@@ -137,7 +137,7 @@ def postprocess(score):
return idx2label[max_idx]
def predict():
- client = Client("127.0.0.1:5500", "mobilenet_v2_100_224", "predict")
+ client = Client("127.0.0.1:5500", "mobilenet_v2_100", "predict")
instances = []
images, _ = next(data_loader.create_tuple_iterator())
image_np = images.asnumpy().squeeze()
diff --git a/docs/en/tutorials/inference.md b/docs/en/tutorials/inference.md
index 690ffe61a..a6b0f98af 100644
--- a/docs/en/tutorials/inference.md
+++ b/docs/en/tutorials/inference.md
@@ -17,13 +17,13 @@ from mindcv.models import registry
registry.list_models()
```
- ['BiTresnet50',
- 'RepMLPNet_B224',
- 'RepMLPNet_B256',
- 'RepMLPNet_D256',
- 'RepMLPNet_L256',
- 'RepMLPNet_T224',
- 'RepMLPNet_T256',
+ ['BiT_resnet50',
+ 'repmlp_b224',
+ 'repmlp_b256',
+ 'repmlp_d256',
+ 'repmlp_l256',
+ 'repmlp_t224',
+ 'repmlp_t256',
'convit_base',
'convit_base_plus',
'convit_small',
diff --git a/docs/zh/tutorials/deployment.md b/docs/zh/tutorials/deployment.md
index 60266fd6a..4b0bda576 100644
--- a/docs/zh/tutorials/deployment.md
+++ b/docs/zh/tutorials/deployment.md
@@ -24,12 +24,12 @@ from mindcv.models import create_model
import numpy as np
import mindspore as ms
-model = create_model(model_name='mobilenet_v2_100_224', num_classes=1000, pretrained=True)
+model = create_model(model_name='mobilenet_v2_100', num_classes=1000, pretrained=True)
input_np = np.random.uniform(0.0, 1.0, size=[1, 3, 224, 224]).astype(np.float32)
-# 导出文件mobilenet_v2_100_224.mindir到当前文件夹
-ms.export(model, ms.Tensor(input_np), file_name='mobilenet_v2_100_224', file_format='MINDIR')
+# 导出文件mobilenet_v2_100.mindir到当前文件夹
+ms.export(model, ms.Tensor(input_np), file_name='mobilenet_v2_100', file_format='MINDIR')
```
## 部署Serving推理服务
@@ -40,9 +40,9 @@ ms.export(model, ms.Tensor(input_np), file_name='mobilenet_v2_100_224', file_for
```text
demo
-├── mobilenet_v2_100_224
+├── mobilenet_v2_100
│ ├── 1
-│ │ └── mobilenet_v2_100_224.mindir
+│ │ └── mobilenet_v2_100.mindir
│ └── servable_config.py
│── serving_server.py
├── serving_client.py
@@ -54,8 +54,8 @@ demo
└─ ……
```
-- `mobilenet_v2_100_224`为模型文件夹,文件夹名即为模型名。
-- `mobilenet_v2_100_224.mindir`为上一步网络生成的模型文件,放置在文件夹1下,1为版本号,不同的版本放置在不同的文件夹下,版本号需以纯数字串命名,默认配置下启动最大数值的版本号的模型文件。
+- `mobilenet_v2_100`为模型文件夹,文件夹名即为模型名。
+- `mobilenet_v2_100.mindir`为上一步网络生成的模型文件,放置在文件夹1下,1为版本号,不同的版本放置在不同的文件夹下,版本号需以纯数字串命名,默认配置下启动最大数值的版本号的模型文件。
- `servable_config.py`为模型配置脚本,对模型进行声明、入参和出参定义。
- `serving_server.py`为启动服务脚本文件。
- `serving_client.py`为启动客户端脚本文件。
@@ -68,7 +68,7 @@ demo
from mindspore_serving.server import register
# 进行模型声明,其中declare_model入参model_file指示模型的文件名称,model_format指示模型的模型类别
-model = register.declare_model(model_file="mobilenet_v2_100_224.mindir", model_format="MindIR")
+model = register.declare_model(model_file="mobilenet_v2_100.mindir", model_format="MindIR")
# Servable方法的入参由Python方法的入参指定,Servable方法的出参由register_method的output_names指定
@register.register_method(output_names=["score"])
@@ -79,7 +79,7 @@ def predict(image):
### 启动服务
-MindSpore的`server`函数提供两种服务部署,一种是gRPC方式,一种是通过RESTful方式,本教程以gRPC方式为例。服务启动脚本`serving_server.py`把本地目录下的`mobilenet_v2_100_224`部署到设备0,并启动地址为127.0.0.1:5500的gRPC服务器。脚本文件内容如下:
+MindSpore的`server`函数提供两种服务部署,一种是gRPC方式,一种是通过RESTful方式,本教程以gRPC方式为例。服务启动脚本`serving_server.py`把本地目录下的`mobilenet_v2_100`部署到设备0,并启动地址为127.0.0.1:5500的gRPC服务器。脚本文件内容如下:
```python
import os
@@ -89,7 +89,7 @@ from mindspore_serving import server
def start():
servable_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
- servable_config = server.ServableStartConfig(servable_directory=servable_dir, servable_name="mobilenet_v2_100_224",
+ servable_config = server.ServableStartConfig(servable_directory=servable_dir, servable_name="mobilenet_v2_100",
device_ids=0)
server.start_servables(servable_configs=servable_config)
server.start_grpc_server(address="127.0.0.1:5500")
@@ -137,7 +137,7 @@ def postprocess(score):
return idx2label[max_idx]
def predict():
- client = Client("127.0.0.1:5500", "mobilenet_v2_100_224", "predict")
+ client = Client("127.0.0.1:5500", "mobilenet_v2_100", "predict")
instances = []
images, _ = next(data_loader.create_tuple_iterator())
image_np = images.asnumpy().squeeze()
diff --git a/docs/zh/tutorials/inference.md b/docs/zh/tutorials/inference.md
index 1dee59bd6..c07de05c5 100644
--- a/docs/zh/tutorials/inference.md
+++ b/docs/zh/tutorials/inference.md
@@ -17,13 +17,13 @@ from mindcv.models import registry
registry.list_models()
```
- ['BiTresnet50',
- 'RepMLPNet_B224',
- 'RepMLPNet_B256',
- 'RepMLPNet_D256',
- 'RepMLPNet_L256',
- 'RepMLPNet_T224',
- 'RepMLPNet_T256',
+ ['BiT_resnet50',
+ 'repmlp_b224',
+ 'repmlp_b256',
+ 'repmlp_d256',
+ 'repmlp_l256',
+ 'repmlp_t224',
+ 'repmlp_t256',
'convit_base',
'convit_base_plus',
'convit_small',
diff --git a/mindcv/models/__init__.py b/mindcv/models/__init__.py
index 4245ffbbc..75fa2f6db 100644
--- a/mindcv/models/__init__.py
+++ b/mindcv/models/__init__.py
@@ -14,15 +14,15 @@
ghostnet,
googlenet,
hrnet,
- inception_v3,
- inception_v4,
+ inceptionv3,
+ inceptionv4,
layers,
mixnet,
mlpmixer,
mnasnet,
- mobilenet_v1,
- mobilenet_v2,
- mobilenet_v3,
+ mobilenetv1,
+ mobilenetv2,
+ mobilenetv3,
mobilevit,
model_factory,
nasnet,
@@ -45,8 +45,8 @@
shufflenetv2,
sknet,
squeezenet,
- swin_transformer,
- swin_transformer_v2,
+ swintransformer,
+ swintransformerv2,
vgg,
visformer,
vit,
@@ -69,15 +69,15 @@
from .googlenet import *
from .helpers import *
from .hrnet import *
-from .inception_v3 import *
-from .inception_v4 import *
+from .inceptionv3 import *
+from .inceptionv4 import *
from .layers import *
from .mixnet import *
from .mlpmixer import *
from .mnasnet import *
-from .mobilenet_v1 import *
-from .mobilenet_v2 import *
-from .mobilenet_v3 import *
+from .mobilenetv1 import *
+from .mobilenetv2 import *
+from .mobilenetv3 import *
from .mobilevit import *
from .model_factory import *
from .nasnet import *
@@ -100,8 +100,8 @@
from .shufflenetv2 import *
from .sknet import *
from .squeezenet import *
-from .swin_transformer import *
-from .swin_transformer_v2 import *
+from .swintransformer import *
+from .swintransformerv2 import *
from .vgg import *
from .visformer import *
from .vit import *
@@ -132,9 +132,9 @@
__all__.extend(mixnet.__all__)
__all__.extend(mlpmixer.__all__)
__all__.extend(mnasnet.__all__)
-__all__.extend(mobilenet_v1.__all__)
-__all__.extend(mobilenet_v2.__all__)
-__all__.extend(mobilenet_v3.__all__)
+__all__.extend(mobilenetv1.__all__)
+__all__.extend(mobilenetv2.__all__)
+__all__.extend(mobilenetv3.__all__)
__all__.extend(mobilevit.__all__)
__all__.extend(model_factory.__all__)
__all__.extend(nasnet.__all__)
@@ -157,8 +157,8 @@
__all__.extend(shufflenetv2.__all__)
__all__.extend(sknet.__all__)
__all__.extend(squeezenet.__all__)
-__all__.extend(swin_transformer.__all__)
-__all__.extend(swin_transformer_v2.__all__)
+__all__.extend(swintransformer.__all__)
+__all__.extend(swintransformerv2.__all__)
__all__.extend(vgg.__all__)
__all__.extend(visformer.__all__)
__all__.extend(vit.__all__)
diff --git a/mindcv/models/bit.py b/mindcv/models/bit.py
index 512194c38..2a0fd3a76 100644
--- a/mindcv/models/bit.py
+++ b/mindcv/models/bit.py
@@ -14,9 +14,9 @@
__all__ = [
"BiT_ResNet",
- "BiTresnet50",
- "BiTresnet50x3",
- "BiTresnet101",
+ "BiT_resnet50",
+ "BiT_resnet50x3",
+ "BiT_resnet101",
]
@@ -31,9 +31,9 @@ def _cfg(url="", **kwargs):
default_cfgs = {
- "BiTresnet50": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/bit/BiT_resnet50-1e4795a4.ckpt"),
- "BiTresnet50x3": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/bit/BiT_resnet50x3-a960f91f.ckpt"),
- "BiTresnet101": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/bit/BiT_resnet101-2efa9106.ckpt"),
+ "BiT_resnet50": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/bit/BiT_resnet50-1e4795a4.ckpt"),
+ "BiT_resnet50x3": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/bit/BiT_resnet50x3-a960f91f.ckpt"),
+ "BiT_resnet101": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/bit/BiT_resnet101-2efa9106.ckpt"),
}
@@ -268,11 +268,11 @@ def construct(self, x: Tensor) -> Tensor:
@register_model
-def BiTresnet50(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs):
+def BiT_resnet50(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs):
"""Get 50 layers ResNet model.
Refer to the base class `models.BiT_Resnet` for more details.
"""
- default_cfg = default_cfgs["BiTresnet50"]
+ default_cfg = default_cfgs["BiT_resnet50"]
model = BiT_ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -282,11 +282,11 @@ def BiTresnet50(pretrained: bool = False, num_classes: int = 1000, in_channels=3
@register_model
-def BiTresnet50x3(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs):
+def BiT_resnet50x3(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs):
"""Get 50 layers ResNet model.
Refer to the base class `models.BiT_Resnet` for more details.
"""
- default_cfg = default_cfgs["BiTresnet50x3"]
+ default_cfg = default_cfgs["BiT_resnet50x3"]
model = BiT_ResNet(Bottleneck, [3, 4, 6, 3], wf=3, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -296,11 +296,11 @@ def BiTresnet50x3(pretrained: bool = False, num_classes: int = 1000, in_channels
@register_model
-def BiTresnet101(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs):
+def BiT_resnet101(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs):
"""Get 101 layers ResNet model.
Refer to the base class `models.BiT_Resnet` for more details.
"""
- default_cfg = default_cfgs["BiTresnet101"]
+ default_cfg = default_cfgs["BiT_resnet101"]
model = BiT_ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
diff --git a/mindcv/models/crossvit.py b/mindcv/models/crossvit.py
index e1db068bd..76df2d46b 100644
--- a/mindcv/models/crossvit.py
+++ b/mindcv/models/crossvit.py
@@ -22,9 +22,9 @@
from .registry import register_model
__all__ = [
- "crossvit9",
- "crossvit15",
- "crossvit18",
+ "crossvit_9",
+ "crossvit_15",
+ "crossvit_18",
]
@@ -452,7 +452,7 @@ def construct(self, x: Tensor) -> Tensor:
@register_model
-def crossvit9(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs):
+def crossvit_9(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs):
model = VisionTransformer(img_size=[240, 224],
patch_size=[12, 16], embed_dim=[128, 256], depth=[[1, 3, 0], [1, 3, 0], [1, 3, 0]],
num_heads=[4, 4], mlp_ratio=[3, 3, 1], qkv_bias=True,
@@ -464,7 +464,7 @@ def crossvit9(pretrained: bool = False, num_classes: int = 1000, in_channels=3,
@register_model
-def crossvit15(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> VisionTransformer:
+def crossvit_15(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> VisionTransformer:
model = VisionTransformer(img_size=[240, 224],
patch_size=[12, 16], embed_dim=[192, 384], depth=[[1, 5, 0], [1, 5, 0], [1, 5, 0]],
num_heads=[6, 6], mlp_ratio=[3, 3, 1], qkv_bias=True,
@@ -476,7 +476,7 @@ def crossvit15(pretrained: bool = False, num_classes: int = 1000, in_channels=3,
@register_model
-def crossvit18(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> VisionTransformer:
+def crossvit_18(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> VisionTransformer:
model = VisionTransformer(img_size=[240, 224],
patch_size=[12, 16], embed_dim=[224, 448], depth=[[1, 6, 0], [1, 6, 0], [1, 6, 0]],
num_heads=[7, 7], mlp_ratio=[3, 3, 1], qkv_bias=True,
diff --git a/mindcv/models/inception_v3.py b/mindcv/models/inceptionv3.py
similarity index 100%
rename from mindcv/models/inception_v3.py
rename to mindcv/models/inceptionv3.py
diff --git a/mindcv/models/inception_v4.py b/mindcv/models/inceptionv4.py
similarity index 100%
rename from mindcv/models/inception_v4.py
rename to mindcv/models/inceptionv4.py
diff --git a/mindcv/models/mnasnet.py b/mindcv/models/mnasnet.py
index ce05cabc3..f77ce0ffd 100644
--- a/mindcv/models/mnasnet.py
+++ b/mindcv/models/mnasnet.py
@@ -15,11 +15,11 @@
__all__ = [
"Mnasnet",
- "mnasnet0_5",
- "mnasnet0_75",
- "mnasnet1_0",
- "mnasnet1_3",
- "mnasnet1_4",
+ "mnasnet_050",
+ "mnasnet_075",
+ "mnasnet_100",
+ "mnasnet_130",
+ "mnasnet_140",
]
@@ -34,11 +34,11 @@ def _cfg(url="", **kwargs):
default_cfgs = {
- "mnasnet0.5": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_050-7d8bf4db.ckpt"),
- "mnasnet0.75": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_075-465d366d.ckpt"),
- "mnasnet1.0": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_100-1bcf43f8.ckpt"),
- "mnasnet1.3": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_130-a43a150a.ckpt"),
- "mnasnet1.4": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_140-7e20bb30.ckpt"),
+ "mnasnet_050": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_050-7d8bf4db.ckpt"),
+ "mnasnet_075": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_075-465d366d.ckpt"),
+ "mnasnet_100": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_100-1bcf43f8.ckpt"),
+ "mnasnet_130": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_130-a43a150a.ckpt"),
+ "mnasnet_140": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/mnasnet/mnasnet_140-7e20bb30.ckpt"),
}
@@ -178,10 +178,10 @@ def construct(self, x: Tensor) -> Tensor:
@register_model
-def mnasnet0_5(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> Mnasnet:
+def mnasnet_050(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> Mnasnet:
"""Get MnasNet model with width scaled by 0.5.
Refer to the base class `models.Mnasnet` for more details."""
- default_cfg = default_cfgs["mnasnet0.5"]
+ default_cfg = default_cfgs["mnasnet_050"]
model = Mnasnet(alpha=0.5, in_channels=in_channels, num_classes=num_classes, **kwargs)
if pretrained:
@@ -191,10 +191,10 @@ def mnasnet0_5(pretrained: bool = False, num_classes: int = 1000, in_channels=3,
@register_model
-def mnasnet0_75(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> Mnasnet:
+def mnasnet_075(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> Mnasnet:
"""Get MnasNet model with width scaled by 0.75.
Refer to the base class `models.Mnasnet` for more details."""
- default_cfg = default_cfgs["mnasnet0.75"]
+ default_cfg = default_cfgs["mnasnet_075"]
model = Mnasnet(alpha=0.75, in_channels=in_channels, num_classes=num_classes, **kwargs)
if pretrained:
@@ -204,10 +204,10 @@ def mnasnet0_75(pretrained: bool = False, num_classes: int = 1000, in_channels=3
@register_model
-def mnasnet1_0(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> Mnasnet:
+def mnasnet_100(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> Mnasnet:
"""Get MnasNet model with width scaled by 1.0.
Refer to the base class `models.Mnasnet` for more details."""
- default_cfg = default_cfgs["mnasnet1.0"]
+ default_cfg = default_cfgs["mnasnet_100"]
model = Mnasnet(alpha=1.0, in_channels=in_channels, num_classes=num_classes, **kwargs)
if pretrained:
@@ -217,10 +217,10 @@ def mnasnet1_0(pretrained: bool = False, num_classes: int = 1000, in_channels=3,
@register_model
-def mnasnet1_3(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> Mnasnet:
+def mnasnet_130(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> Mnasnet:
"""Get MnasNet model with width scaled by 1.3.
Refer to the base class `models.Mnasnet` for more details."""
- default_cfg = default_cfgs["mnasnet1.3"]
+ default_cfg = default_cfgs["mnasnet_130"]
model = Mnasnet(alpha=1.3, in_channels=in_channels, num_classes=num_classes, **kwargs)
if pretrained:
@@ -230,10 +230,10 @@ def mnasnet1_3(pretrained: bool = False, num_classes: int = 1000, in_channels=3,
@register_model
-def mnasnet1_4(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> Mnasnet:
+def mnasnet_140(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> Mnasnet:
"""Get MnasNet model with width scaled by 1.4.
Refer to the base class `models.Mnasnet` for more details."""
- default_cfg = default_cfgs["mnasnet1.4"]
+ default_cfg = default_cfgs["mnasnet_140"]
model = Mnasnet(alpha=1.4, in_channels=in_channels, num_classes=num_classes, **kwargs)
if pretrained:
diff --git a/mindcv/models/mobilenet_v1.py b/mindcv/models/mobilenetv1.py
similarity index 85%
rename from mindcv/models/mobilenet_v1.py
rename to mindcv/models/mobilenetv1.py
index 03fa16c76..197417ac6 100644
--- a/mindcv/models/mobilenet_v1.py
+++ b/mindcv/models/mobilenetv1.py
@@ -12,10 +12,10 @@
__all__ = [
"MobileNetV1",
- "mobilenet_v1_025_224",
- "mobilenet_v1_050_224",
- "mobilenet_v1_075_224",
- "mobilenet_v1_100_224",
+ "mobilenet_v1_025",
+ "mobilenet_v1_050",
+ "mobilenet_v1_075",
+ "mobilenet_v1_100",
]
@@ -30,16 +30,16 @@ def _cfg(url="", **kwargs):
default_cfgs = {
- "mobilenet_v1_0.25_224": _cfg(
+ "mobilenet_v1_025": _cfg(
url="https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_025-d3377fba.ckpt"
),
- "mobilenet_v1_0.5_224": _cfg(
+ "mobilenet_v1_050": _cfg(
url="https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_050-23e9ddbe.ckpt"
),
- "mobilenet_v1_0.75_224": _cfg(
+ "mobilenet_v1_075": _cfg(
url="https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_075-5bed0c73.ckpt"
),
- "mobilenet_v1_1.0_224": _cfg(
+ "mobilenet_v1_100": _cfg(
url="https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv1/mobilenet_v1_100-91c7b206.ckpt"
),
}
@@ -135,11 +135,11 @@ def construct(self, x: Tensor) -> Tensor:
@register_model
-def mobilenet_v1_025_224(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> MobileNetV1:
+def mobilenet_v1_025(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> MobileNetV1:
"""Get MobileNetV1 model with width scaled by 0.25.
Refer to the base class `models.MobileNetV1` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v1_0.25_224"]
+ default_cfg = default_cfgs["mobilenet_v1_025"]
model = MobileNetV1(alpha=0.25, in_channels=in_channels, num_classes=num_classes, **kwargs)
if pretrained:
@@ -149,11 +149,11 @@ def mobilenet_v1_025_224(pretrained: bool = False, num_classes: int = 1000, in_c
@register_model
-def mobilenet_v1_050_224(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> MobileNetV1:
+def mobilenet_v1_050(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> MobileNetV1:
"""Get MobileNetV1 model with width scaled by 0.5.
Refer to the base class `models.MobileNetV1` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v1_0.5_224"]
+ default_cfg = default_cfgs["mobilenet_v1_050"]
model = MobileNetV1(alpha=0.5, in_channels=in_channels, num_classes=num_classes, **kwargs)
if pretrained:
@@ -163,11 +163,11 @@ def mobilenet_v1_050_224(pretrained: bool = False, num_classes: int = 1000, in_c
@register_model
-def mobilenet_v1_075_224(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> MobileNetV1:
+def mobilenet_v1_075(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> MobileNetV1:
"""Get MobileNetV1 model with width scaled by 0.75.
Refer to the base class `models.MobileNetV1` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v1_0.75_224"]
+ default_cfg = default_cfgs["mobilenet_v1_075"]
model = MobileNetV1(alpha=0.75, in_channels=in_channels, num_classes=num_classes, **kwargs)
if pretrained:
@@ -177,11 +177,11 @@ def mobilenet_v1_075_224(pretrained: bool = False, num_classes: int = 1000, in_c
@register_model
-def mobilenet_v1_100_224(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> MobileNetV1:
+def mobilenet_v1_100(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> MobileNetV1:
"""Get MobileNetV1 model without width scaling.
Refer to the base class `models.MobileNetV1` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v1_1.0_224"]
+ default_cfg = default_cfgs["mobilenet_v1_100"]
model = MobileNetV1(alpha=1.0, in_channels=in_channels, num_classes=num_classes, **kwargs)
if pretrained:
diff --git a/mindcv/models/mobilenet_v2.py b/mindcv/models/mobilenetv2.py
similarity index 88%
rename from mindcv/models/mobilenet_v2.py
rename to mindcv/models/mobilenetv2.py
index 7f7672a17..780841f6b 100644
--- a/mindcv/models/mobilenet_v2.py
+++ b/mindcv/models/mobilenetv2.py
@@ -15,14 +15,14 @@
__all__ = [
"MobileNetV2",
- "mobilenet_v2_140_224",
+ "mobilenet_v2_140",
"mobilenet_v2_130_224",
- "mobilenet_v2_100_224",
+ "mobilenet_v2_100",
"mobilenet_v2_100_192",
"mobilenet_v2_100_160",
"mobilenet_v2_100_128",
"mobilenet_v2_100_96",
- "mobilenet_v2_075_224",
+ "mobilenet_v2_075",
"mobilenet_v2_075_192",
"mobilenet_v2_075_160",
"mobilenet_v2_075_128",
@@ -51,70 +51,70 @@ def _cfg(url="", **kwargs):
default_cfgs = {
- "mobilenet_v2_1.4_224": _cfg(
+ "mobilenet_v2_140": _cfg(
url="https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv2/mobilenet_v2_140-98776171.ckpt"
),
- "mobilenet_v2_1.3_224": _cfg(
+ "mobilenet_v2_130_224": _cfg(
url=""
),
- "mobilenet_v2_1.0_224": _cfg(
+ "mobilenet_v2_100": _cfg(
url="https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv2/mobilenet_v2_100-d5532038.ckpt"
),
- "mobilenet_v2_1.0_192": _cfg(
+ "mobilenet_v2_100_192": _cfg(
url=""
),
- "mobilenet_v2_1.0_160": _cfg(
+ "mobilenet_v2_100_160": _cfg(
url=""
),
- "mobilenet_v2_1.0_128": _cfg(
+ "mobilenet_v2_100_128": _cfg(
url=""
),
- "mobilenet_v2_1.0_96": _cfg(
+ "mobilenet_v2_100_96": _cfg(
url=""
),
- "mobilenet_v2_0.75_224": _cfg(
+ "mobilenet_v2_075": _cfg(
url="https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv2/mobilenet_v2_075-bd7bd4c4.ckpt"
),
- "mobilenet_v2_0.75_192": _cfg(
+ "mobilenet_v2_075_192": _cfg(
url=""
),
- "mobilenet_v2_0.75_160": _cfg(
+ "mobilenet_v2_075_160": _cfg(
url=""
),
- "mobilenet_v2_0.75_128": _cfg(
+ "mobilenet_v2_075_128": _cfg(
url=""
),
- "mobilenet_v2_0.75_96": _cfg(
+ "mobilenet_v2_075_96": _cfg(
url=""
),
- "mobilenet_v2_0.5_224": _cfg(
+ "mobilenet_v2_050_224": _cfg(
url=""
),
- "mobilenet_v2_0.5_192": _cfg(
+ "mobilenet_v2_050_192": _cfg(
url=""
),
- "mobilenet_v2_0.5_160": _cfg(
+ "mobilenet_v2_050_160": _cfg(
url=""
),
- "mobilenet_v2_0.5_128": _cfg(
+ "mobilenet_v2_050_128": _cfg(
url=""
),
- "mobilenet_v2_0.5_96": _cfg(
+ "mobilenet_v2_050_96": _cfg(
url=""
),
- "mobilenet_v2_0.35_224": _cfg(
+ "mobilenet_v2_035_224": _cfg(
url=""
),
- "mobilenet_v2_0.35_192": _cfg(
+ "mobilenet_v2_035_192": _cfg(
url=""
),
- "mobilenet_v2_0.35_160": _cfg(
+ "mobilenet_v2_035_160": _cfg(
url=""
),
- "mobilenet_v2_0.35_128": _cfg(
+ "mobilenet_v2_035_128": _cfg(
url=""
),
- "mobilenet_v2_0.35_96": _cfg(
+ "mobilenet_v2_035_96": _cfg(
url=""
),
}
@@ -260,11 +260,11 @@ def construct(self, x: Tensor) -> Tensor:
@register_model
-def mobilenet_v2_140_224(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> MobileNetV2:
+def mobilenet_v2_140(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> MobileNetV2:
"""Get MobileNetV2 model with width scaled by 1.4 and input image size of 224.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_1.4_224"]
+ default_cfg = default_cfgs["mobilenet_v2_140"]
model = MobileNetV2(alpha=1.4, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -278,7 +278,7 @@ def mobilenet_v2_130_224(pretrained: bool = False, num_classes: int = 1000, in_c
"""Get MobileNetV2 model with width scaled by 1.3 and input image size of 224.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_1.3_224"]
+ default_cfg = default_cfgs["mobilenet_v2_130_224"]
model = MobileNetV2(alpha=1.3, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -288,11 +288,11 @@ def mobilenet_v2_130_224(pretrained: bool = False, num_classes: int = 1000, in_c
@register_model
-def mobilenet_v2_100_224(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> MobileNetV2:
+def mobilenet_v2_100(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> MobileNetV2:
"""Get MobileNetV2 model without width scaling and input image size of 224.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_1.0_224"]
+ default_cfg = default_cfgs["mobilenet_v2_100"]
model = MobileNetV2(alpha=1.0, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -306,7 +306,7 @@ def mobilenet_v2_100_192(pretrained: bool = False, num_classes: int = 1000, in_c
"""Get MobileNetV2 model without width scaling and input image size of 192.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_1.0_192"]
+ default_cfg = default_cfgs["mobilenet_v2_100_192"]
model = MobileNetV2(alpha=1.0, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -320,7 +320,7 @@ def mobilenet_v2_100_160(pretrained: bool = False, num_classes: int = 1000, in_c
"""Get MobileNetV2 model without width scaling and input image size of 160.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_1.0_160"]
+ default_cfg = default_cfgs["mobilenet_v2_100_160"]
model = MobileNetV2(alpha=1.0, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -334,7 +334,7 @@ def mobilenet_v2_100_128(pretrained: bool = False, num_classes: int = 1000, in_c
"""Get MobileNetV2 model without width scaling and input image size of 128.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_1.0_128"]
+ default_cfg = default_cfgs["mobilenet_v2_100_128"]
model = MobileNetV2(alpha=1.0, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -348,7 +348,7 @@ def mobilenet_v2_100_96(pretrained: bool = False, num_classes: int = 1000, in_ch
"""Get MobileNetV2 model without width scaling and input image size of 96.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_1.0_96"]
+ default_cfg = default_cfgs["mobilenet_v2_100_96"]
model = MobileNetV2(alpha=1.0, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -358,11 +358,11 @@ def mobilenet_v2_100_96(pretrained: bool = False, num_classes: int = 1000, in_ch
@register_model
-def mobilenet_v2_075_224(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> MobileNetV2:
+def mobilenet_v2_075(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> MobileNetV2:
"""Get MobileNetV2 model with width scaled by 0.75 and input image size of 224.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_0.75_224"]
+ default_cfg = default_cfgs["mobilenet_v2_075"]
model = MobileNetV2(alpha=0.75, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -376,7 +376,7 @@ def mobilenet_v2_075_192(pretrained: bool = False, num_classes: int = 1000, in_c
"""Get MobileNetV2 model with width scaled by 0.75 and input image size of 192.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_0.75_192"]
+ default_cfg = default_cfgs["mobilenet_v2_075_192"]
model = MobileNetV2(alpha=0.75, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -390,7 +390,7 @@ def mobilenet_v2_075_160(pretrained: bool = False, num_classes: int = 1000, in_c
"""Get MobileNetV2 model with width scaled by 0.75 and input image size of 160.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_0.75_160"]
+ default_cfg = default_cfgs["mobilenet_v2_075_160"]
model = MobileNetV2(alpha=0.75, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -404,7 +404,7 @@ def mobilenet_v2_075_128(pretrained: bool = False, num_classes: int = 1000, in_c
"""Get MobileNetV2 model with width scaled by 0.75 and input image size of 128.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_0.75_128"]
+ default_cfg = default_cfgs["mobilenet_v2_075_128"]
model = MobileNetV2(alpha=0.75, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -418,7 +418,7 @@ def mobilenet_v2_075_96(pretrained: bool = False, num_classes: int = 1000, in_ch
"""Get MobileNetV2 model with width scaled by 0.75 and input image size of 96.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_0.75_96"]
+ default_cfg = default_cfgs["mobilenet_v2_075_96"]
model = MobileNetV2(alpha=0.75, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -432,7 +432,7 @@ def mobilenet_v2_050_224(pretrained: bool = False, num_classes: int = 1000, in_c
"""Get MobileNetV2 model with width scaled by 0.5 and input image size of 224.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_0.5_224"]
+ default_cfg = default_cfgs["mobilenet_v2_050_224"]
model = MobileNetV2(alpha=0.5, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -446,7 +446,7 @@ def mobilenet_v2_050_192(pretrained: bool = False, num_classes: int = 1000, in_c
"""Get MobileNetV2 model with width scaled by 0.5 and input image size of 192.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_0.5_192"]
+ default_cfg = default_cfgs["mobilenet_v2_050_192"]
model = MobileNetV2(alpha=0.5, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -460,7 +460,7 @@ def mobilenet_v2_050_160(pretrained: bool = False, num_classes: int = 1000, in_c
"""Get MobileNetV2 model with width scaled by 0.5 and input image size of 160.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_0.5_160"]
+ default_cfg = default_cfgs["mobilenet_v2_050_160"]
model = MobileNetV2(alpha=0.5, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -474,7 +474,7 @@ def mobilenet_v2_050_128(pretrained: bool = False, num_classes: int = 1000, in_c
"""Get MobileNetV2 model with width scaled by 0.5 and input image size of 128.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_0.5_128"]
+ default_cfg = default_cfgs["mobilenet_v2_050_128"]
model = MobileNetV2(alpha=0.5, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -488,7 +488,7 @@ def mobilenet_v2_050_96(pretrained: bool = False, num_classes: int = 1000, in_ch
"""Get MobileNetV2 model with width scaled by 0.5 and input image size of 96.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_0.5_96"]
+ default_cfg = default_cfgs["mobilenet_v2_050_96"]
model = MobileNetV2(alpha=0.5, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -502,7 +502,7 @@ def mobilenet_v2_035_224(pretrained: bool = False, num_classes: int = 1000, in_c
"""Get MobileNetV2 model with width scaled by 0.35 and input image size of 224.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_0.35_224"]
+ default_cfg = default_cfgs["mobilenet_v2_035_224"]
model = MobileNetV2(alpha=0.35, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -516,7 +516,7 @@ def mobilenet_v2_035_192(pretrained: bool = False, num_classes: int = 1000, in_c
"""Get MobileNetV2 model with width scaled by 0.35 and input image size of 192.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_0.35_192"]
+ default_cfg = default_cfgs["mobilenet_v2_035_192"]
model = MobileNetV2(alpha=0.35, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -530,7 +530,7 @@ def mobilenet_v2_035_160(pretrained: bool = False, num_classes: int = 1000, in_c
"""Get MobileNetV2 model with width scaled by 0.35 and input image size of 160.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_0.35_160"]
+ default_cfg = default_cfgs["mobilenet_v2_035_160"]
model = MobileNetV2(alpha=0.35, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -544,7 +544,7 @@ def mobilenet_v2_035_128(pretrained: bool = False, num_classes: int = 1000, in_c
"""Get MobileNetV2 model with width scaled by 0.35 and input image size of 128.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_0.35_128"]
+ default_cfg = default_cfgs["mobilenet_v2_035_128"]
model = MobileNetV2(alpha=0.35, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -558,7 +558,7 @@ def mobilenet_v2_035_96(pretrained: bool = False, num_classes: int = 1000, in_ch
"""Get MobileNetV2 model with width scaled by 0.35 and input image size of 96.
Refer to the base class `models.MobileNetV2` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v2_0.35_96"]
+ default_cfg = default_cfgs["mobilenet_v2_035_96"]
model = MobileNetV2(alpha=0.35, num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
diff --git a/mindcv/models/mobilenet_v3.py b/mindcv/models/mobilenetv3.py
similarity index 96%
rename from mindcv/models/mobilenet_v3.py
rename to mindcv/models/mobilenetv3.py
index d489cb9c0..6d911d4e8 100644
--- a/mindcv/models/mobilenet_v3.py
+++ b/mindcv/models/mobilenetv3.py
@@ -34,14 +34,14 @@ def _cfg(url="", **kwargs):
default_cfgs = {
- "mobilenet_v3_small_1.0": _cfg(
+ "mobilenet_v3_small_100": _cfg(
url="https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv3/mobilenet_v3_small_100-509c6047.ckpt"
),
- "mobilenet_v3_large_1.0": _cfg(
+ "mobilenet_v3_large_100": _cfg(
url="https://download.mindspore.cn/toolkits/mindcv/mobilenet/mobilenetv3/mobilenet_v3_large_100-1279ad5f.ckpt"
),
- "mobilenet_v3_small_0.75": _cfg(url=""),
- "mobilenet_v3_large_0.75": _cfg(url=""),
+ "mobilenet_v3_small_075": _cfg(url=""),
+ "mobilenet_v3_large_075": _cfg(url=""),
}
@@ -251,7 +251,7 @@ def mobilenet_v3_small_100(pretrained: bool = False, num_classes: int = 1000, in
"""Get small MobileNetV3 model without width scaling.
Refer to the base class `models.MobileNetV3` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v3_small_1.0"]
+ default_cfg = default_cfgs["mobilenet_v3_small_100"]
model_args = dict(arch="small", alpha=1.0, in_channels=in_channels, num_classes=num_classes, **kwargs)
return _create_mobilenet_v3(pretrained, **dict(default_cfg=default_cfg, **model_args))
@@ -261,7 +261,7 @@ def mobilenet_v3_large_100(pretrained: bool = False, num_classes: int = 1000, in
"""Get large MobileNetV3 model without width scaling.
Refer to the base class `models.MobileNetV3` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v3_large_1.0"]
+ default_cfg = default_cfgs["mobilenet_v3_large_100"]
model_args = dict(arch="large", alpha=1.0, in_channels=in_channels, num_classes=num_classes, **kwargs)
return _create_mobilenet_v3(pretrained, **dict(default_cfg=default_cfg, **model_args))
@@ -271,7 +271,7 @@ def mobilenet_v3_small_075(pretrained: bool = False, num_classes: int = 1000, in
"""Get small MobileNetV3 model with width scaled by 0.75.
Refer to the base class `models.MobileNetV3` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v3_small_0.75"]
+ default_cfg = default_cfgs["mobilenet_v3_small_075"]
model_args = dict(arch="small", alpha=0.75, in_channels=in_channels, num_classes=num_classes, **kwargs)
return _create_mobilenet_v3(pretrained, **dict(default_cfg=default_cfg, **model_args))
@@ -281,6 +281,6 @@ def mobilenet_v3_large_075(pretrained: bool = False, num_classes: int = 1000, in
"""Get large MobileNetV3 model with width scaled by 0.75.
Refer to the base class `models.MobileNetV3` for more details.
"""
- default_cfg = default_cfgs["mobilenet_v3_large_0.75"]
+ default_cfg = default_cfgs["mobilenet_v3_large_075"]
model_args = dict(arch="large", alpha=0.75, in_channels=in_channels, num_classes=num_classes, **kwargs)
return _create_mobilenet_v3(pretrained, **dict(default_cfg=default_cfg, **model_args))
diff --git a/mindcv/models/repmlp.py b/mindcv/models/repmlp.py
index 035306001..0fa9bd6e1 100644
--- a/mindcv/models/repmlp.py
+++ b/mindcv/models/repmlp.py
@@ -1,5 +1,5 @@
"""
-MindSpore implementation of `RepMLP`.
+MindSpore implementation of `RepMLPNet`.
Refer to RepMLPNet: Hierarchical Vision MLP with Re-parameterized Locality.
"""
@@ -15,12 +15,12 @@
__all__ = [
"RepMLPNet",
- "RepMLPNet_T224",
- "RepMLPNet_T256",
- "RepMLPNet_B224",
- "RepMLPNet_B256",
- "RepMLPNet_D256",
- "RepMLPNet_L256",
+ "repmlp_t224",
+ "repmlp_t256",
+ "repmlp_b224",
+ "repmlp_b256",
+ "repmlp_d256",
+ "repmlp_l256",
]
@@ -35,12 +35,12 @@ def _cfg(url="", **kwargs):
default_cfgs = {
- "RepMLPNet_T224": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/repmlp/repmlp_t224-8dbedd00.ckpt"),
- "RepMLPNet_T256": _cfg(url="", input_size=(3, 256, 256)),
- "RepMLPNet_B224": _cfg(url=""),
- "RepMLPNet_B256": _cfg(url="", input_size=(3, 256, 256)),
- "RepMLPNet_D256": _cfg(url="", input_size=(3, 256, 256)),
- "RepMLPNet_L256": _cfg(url="", input_size=(3, 256, 256)),
+ "repmlp_t224": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/repmlp/repmlp_t224-8dbedd00.ckpt"),
+ "repmlp_t256": _cfg(url="", input_size=(3, 256, 256)),
+ "repmlp_b224": _cfg(url=""),
+ "repmlp_b256": _cfg(url="", input_size=(3, 256, 256)),
+ "repmlp_d256": _cfg(url="", input_size=(3, 256, 256)),
+ "repmlp_l256": _cfg(url="", input_size=(3, 256, 256)),
}
@@ -342,13 +342,13 @@ def _initialize_weights(self):
for name, cell in self.cells_and_names():
if isinstance(cell, nn.Conv2d):
k = cell.group / (cell.in_channels * cell.kernel_size[0] * cell.kernel_size[1])
- k = k**0.5
+ k = k ** 0.5
cell.weight.set_data(init.initializer(init.Uniform(k), cell.weight.shape, cell.weight.dtype))
if cell.bias is not None:
cell.bias.set_data(init.initializer(init.Uniform(k), cell.bias.shape, cell.bias.dtype))
elif isinstance(cell, nn.Dense):
k = 1 / cell.in_channels
- k = k**0.5
+ k = k ** 0.5
cell.weight.set_data(init.initializer(init.Uniform(k), cell.weight.shape, cell.weight.dtype))
if cell.bias is not None:
cell.bias.set_data(init.initializer(init.Uniform(k), cell.bias.shape, cell.bias.dtype))
@@ -384,11 +384,10 @@ def locality_injection(self):
@register_model
-def RepMLPNet_T224(pretrained: bool = False, image_size: int = 224, num_classes: int = 1000, in_channels=3,
- deploy=False, **kwargs):
- """Get RepMLPNet_T224 model.
- Refer to the base class `models.RepMLPNet` for more details."""
- default_cfg = default_cfgs["RepMLPNet_T224"]
+def repmlp_t224(pretrained: bool = False, image_size: int = 224, num_classes: int = 1000, in_channels=3,
+ deploy=False, **kwargs):
+ """Get repmlp_t224 model. Refer to the base class `models.RepMLPNet` for more details."""
+ default_cfg = default_cfgs["repmlp_t224"]
model = RepMLPNet(in_channels=in_channels, num_class=num_classes, channels=(64, 128, 256, 512), hs=(56, 28, 14, 7),
ws=(56, 28, 14, 7),
num_blocks=(2, 2, 6, 2), reparam_conv_k=(1, 3), sharesets_nums=(1, 4, 16, 128),
@@ -401,11 +400,11 @@ def RepMLPNet_T224(pretrained: bool = False, image_size: int = 224, num_classes:
@register_model
-def RepMLPNet_T256(pretrained: bool = False, image_size: int = 256, num_classes: int = 1000, in_channels=3,
- deploy=False, **kwargs):
- """Get RepMLPNet_T256 model.
+def repmlp_t256(pretrained: bool = False, image_size: int = 256, num_classes: int = 1000, in_channels=3,
+ deploy=False, **kwargs):
+ """Get repmlp_t256 model.
Refer to the base class `models.RepMLPNet` for more details."""
- default_cfg = default_cfgs["RepMLPNet_T256"]
+ default_cfg = default_cfgs["repmlp_t256"]
model = RepMLPNet(in_channels=in_channels, num_class=num_classes, channels=(64, 128, 256, 512), hs=(64, 32, 16, 8),
ws=(64, 32, 16, 8),
num_blocks=(2, 2, 6, 2), reparam_conv_k=(1, 3), sharesets_nums=(1, 4, 16, 128),
@@ -417,11 +416,11 @@ def RepMLPNet_T256(pretrained: bool = False, image_size: int = 256, num_classes:
@register_model
-def RepMLPNet_B224(pretrained: bool = False, image_size: int = 224, num_classes: int = 1000, in_channels=3,
- deploy=False, **kwargs):
- """Get RepMLPNet_B224 model.
+def repmlp_b224(pretrained: bool = False, image_size: int = 224, num_classes: int = 1000, in_channels=3,
+ deploy=False, **kwargs):
+ """Get repmlp_b224 model.
Refer to the base class `models.RepMLPNet` for more details."""
- default_cfg = default_cfgs["RepMLPNet_B224"]
+ default_cfg = default_cfgs["repmlp_b224"]
model = RepMLPNet(in_channels=in_channels, num_class=num_classes, channels=(96, 192, 384, 768), hs=(56, 28, 14, 7),
ws=(56, 28, 14, 7),
num_blocks=(2, 2, 12, 2), reparam_conv_k=(1, 3), sharesets_nums=(1, 4, 32, 128),
@@ -433,11 +432,11 @@ def RepMLPNet_B224(pretrained: bool = False, image_size: int = 224, num_classes:
@register_model
-def RepMLPNet_B256(pretrained: bool = False, image_size: int = 256, num_classes: int = 1000, in_channels=3,
- deploy=False, **kwargs):
- """Get RepMLPNet_B256 model.
+def repmlp_b256(pretrained: bool = False, image_size: int = 256, num_classes: int = 1000, in_channels=3,
+ deploy=False, **kwargs):
+ """Get repmlp_b256 model.
Refer to the base class `models.RepMLPNet` for more details."""
- default_cfg = default_cfgs["RepMLPNet_B256"]
+ default_cfg = default_cfgs["repmlp_b256"]
model = RepMLPNet(in_channels=in_channels, num_class=num_classes, channels=(96, 192, 384, 768), hs=(64, 32, 16, 8),
ws=(64, 32, 16, 8),
num_blocks=(2, 2, 12, 2), reparam_conv_k=(1, 3), sharesets_nums=(1, 4, 32, 128),
@@ -449,11 +448,11 @@ def RepMLPNet_B256(pretrained: bool = False, image_size: int = 256, num_classes:
@register_model
-def RepMLPNet_D256(pretrained: bool = False, image_size: int = 256, num_classes: int = 1000, in_channels=3,
- deploy=False, **kwargs):
- """Get RepMLPNet_D256 model.
+def repmlp_d256(pretrained: bool = False, image_size: int = 256, num_classes: int = 1000, in_channels=3,
+ deploy=False, **kwargs):
+ """Get repmlp_d256 model.
Refer to the base class `models.RepMLPNet` for more details."""
- default_cfg = default_cfgs["RepMLPNet_D256"]
+ default_cfg = default_cfgs["repmlp_d256"]
model = RepMLPNet(in_channels=in_channels, num_class=num_classes, channels=(80, 160, 320, 640), hs=(64, 32, 16, 8),
ws=(64, 32, 16, 8),
num_blocks=(2, 2, 18, 2), reparam_conv_k=(1, 3), sharesets_nums=(1, 4, 16, 128),
@@ -465,11 +464,11 @@ def RepMLPNet_D256(pretrained: bool = False, image_size: int = 256, num_classes:
@register_model
-def RepMLPNet_L256(pretrained: bool = False, image_size: int = 256, num_classes: int = 1000, in_channels=3,
- deploy=False, **kwargs):
- """Get RepMLPNet_L256 model.
+def repmlp_l256(pretrained: bool = False, image_size: int = 256, num_classes: int = 1000, in_channels=3,
+ deploy=False, **kwargs):
+ """Get repmlp_l256 model.
Refer to the base class `models.RepMLPNet` for more details."""
- default_cfg = default_cfgs["RepMLPNet_L256"]
+ default_cfg = default_cfgs["repmlp_l256"]
model = RepMLPNet(in_channels=in_channels, num_class=num_classes, channels=(96, 192, 384, 768), hs=(64, 32, 16, 8),
ws=(64, 32, 16, 8),
num_blocks=(2, 2, 18, 2), reparam_conv_k=(1, 3), sharesets_nums=(1, 4, 32, 256),
@@ -484,7 +483,7 @@ def RepMLPNet_L256(pretrained: bool = False, image_size: int = 256, num_classes:
if __name__ == "__main__":
# x = Tensor(np.ones([1, 3, 3, 3]).astype(np.float32))
dummy_input = Tensor(np.ones([1, 3, 256, 256]).astype(np.float32))
- model = RepMLPNet_B256()
+ model = repmlp_b256()
origin_y = model(dummy_input)
model.locality_injection()
diff --git a/mindcv/models/rexnet.py b/mindcv/models/rexnet.py
index 45fe40929..ba2850649 100644
--- a/mindcv/models/rexnet.py
+++ b/mindcv/models/rexnet.py
@@ -16,11 +16,11 @@
__all__ = [
"ReXNetV1",
- "rexnet_x09",
- "rexnet_x10",
- "rexnet_x13",
- "rexnet_x15",
- "rexnet_x20",
+ "rexnet_09",
+ "rexnet_10",
+ "rexnet_13",
+ "rexnet_15",
+ "rexnet_20",
]
@@ -36,11 +36,11 @@ def _cfg(url="", **kwargs):
default_cfgs = {
- "rexnet_x09": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_09-da498331.ckpt"),
- "rexnet_x10": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_10-c5fb2dc7.ckpt"),
- "rexnet_x13": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_13-a49c41e5.ckpt"),
- "rexnet_x15": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_15-37a931d3.ckpt"),
- "rexnet_x20": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_20-c5810914.ckpt"),
+ "rexnet_09": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_09-da498331.ckpt"),
+ "rexnet_10": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_10-c5fb2dc7.ckpt"),
+ "rexnet_13": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_13-a49c41e5.ckpt"),
+ "rexnet_15": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_15-37a931d3.ckpt"),
+ "rexnet_20": _cfg(url="https://download.mindspore.cn/toolkits/mindcv/rexnet/rexnet_20-c5810914.ckpt"),
}
@@ -267,40 +267,40 @@ def _rexnet(
@register_model
-def rexnet_x09(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ReXNetV1:
+def rexnet_09(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ReXNetV1:
"""Get ReXNet model with width multiplier of 0.9.
Refer to the base class `models.ReXNetV1` for more details.
"""
- return _rexnet("rexnet_x09", 0.9, in_channels, num_classes, pretrained, **kwargs)
+ return _rexnet("rexnet_09", 0.9, in_channels, num_classes, pretrained, **kwargs)
@register_model
-def rexnet_x10(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ReXNetV1:
+def rexnet_10(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ReXNetV1:
"""Get ReXNet model with width multiplier of 1.0.
Refer to the base class `models.ReXNetV1` for more details.
"""
- return _rexnet("rexnet_x10", 1.0, in_channels, num_classes, pretrained, **kwargs)
+ return _rexnet("rexnet_10", 1.0, in_channels, num_classes, pretrained, **kwargs)
@register_model
-def rexnet_x13(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ReXNetV1:
+def rexnet_13(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ReXNetV1:
"""Get ReXNet model with width multiplier of 1.3.
Refer to the base class `models.ReXNetV1` for more details.
"""
- return _rexnet("rexnet_x13", 1.3, in_channels, num_classes, pretrained, **kwargs)
+ return _rexnet("rexnet_13", 1.3, in_channels, num_classes, pretrained, **kwargs)
@register_model
-def rexnet_x15(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ReXNetV1:
+def rexnet_15(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ReXNetV1:
"""Get ReXNet model with width multiplier of 1.5.
Refer to the base class `models.ReXNetV1` for more details.
"""
- return _rexnet("rexnet_x15", 1.5, in_channels, num_classes, pretrained, **kwargs)
+ return _rexnet("rexnet_15", 1.5, in_channels, num_classes, pretrained, **kwargs)
@register_model
-def rexnet_x20(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ReXNetV1:
+def rexnet_20(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ReXNetV1:
"""Get ReXNet model with width multiplier of 2.0.
Refer to the base class `models.ReXNetV1` for more details.
"""
- return _rexnet("rexnet_x20", 2.0, in_channels, num_classes, pretrained, **kwargs)
+ return _rexnet("rexnet_20", 2.0, in_channels, num_classes, pretrained, **kwargs)
diff --git a/mindcv/models/shufflenetv1.py b/mindcv/models/shufflenetv1.py
index 3ce135fba..02a6f0ea5 100644
--- a/mindcv/models/shufflenetv1.py
+++ b/mindcv/models/shufflenetv1.py
@@ -12,14 +12,14 @@
__all__ = [
"ShuffleNetV1",
- "shufflenet_v1_g3_x0_5",
- "shufflenet_v1_g3_x1_0",
- "shufflenet_v1_g3_x1_5",
- "shufflenet_v1_g3_x2_0",
- "shufflenet_v1_g8_x0_5",
- "shufflenet_v1_g8_x1_0",
- "shufflenet_v1_g8_x1_5",
- "shufflenet_v1_g8_x2_0",
+ "shufflenet_v1_g3_05",
+ "shufflenet_v1_g3_10",
+ "shufflenet_v1_g3_15",
+ "shufflenet_v1_g3_20",
+ "shufflenet_v1_g8_05",
+ "shufflenet_v1_g8_10",
+ "shufflenet_v1_g8_15",
+ "shufflenet_v1_g8_20",
]
@@ -34,18 +34,18 @@ def _cfg(url="", **kwargs):
default_cfgs = {
- "shufflenet_v1_g3_0.5": _cfg(
+ "shufflenet_v1_g3_05": _cfg(
url="https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv1/shufflenet_v1_g3_05-42cfe109.ckpt"
),
- "shufflenet_v1_g3_1.0": _cfg(
+ "shufflenet_v1_g3_10": _cfg(
url="https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv1/shufflenet_v1_g3_10-245f0ccf.ckpt"
),
- "shufflenet_v1_g3_1.5": _cfg(url=""),
- "shufflenet_v1_g3_2.0": _cfg(url=""),
- "shufflenet_v1_g8_0.5": _cfg(url=""),
- "shufflenet_v1_g8_1.0": _cfg(url=""),
- "shufflenet_v1_g8_1.5": _cfg(url=""),
- "shufflenet_v1_g8_2.0": _cfg(url=""),
+ "shufflenet_v1_g3_15": _cfg(url=""),
+ "shufflenet_v1_g3_20": _cfg(url=""),
+ "shufflenet_v1_g8_05": _cfg(url=""),
+ "shufflenet_v1_g8_10": _cfg(url=""),
+ "shufflenet_v1_g8_15": _cfg(url=""),
+ "shufflenet_v1_g8_20": _cfg(url=""),
}
@@ -224,11 +224,11 @@ def construct(self, x: Tensor) -> Tensor:
@register_model
-def shufflenet_v1_g3_x0_5(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ShuffleNetV1:
+def shufflenet_v1_g3_05(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ShuffleNetV1:
"""Get ShuffleNetV1 model with width scaled by 0.5 and 3 groups of GPConv.
Refer to the base class `models.ShuffleNetV1` for more details.
"""
- default_cfg = default_cfgs["shufflenet_v1_g3_0.5"]
+ default_cfg = default_cfgs["shufflenet_v1_g3_05"]
model = ShuffleNetV1(group=3, model_size="0.5x", num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -238,11 +238,11 @@ def shufflenet_v1_g3_x0_5(pretrained: bool = False, num_classes: int = 1000, in_
@register_model
-def shufflenet_v1_g3_x1_0(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ShuffleNetV1:
+def shufflenet_v1_g3_10(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ShuffleNetV1:
"""Get ShuffleNetV1 model with width scaled by 1.0 and 3 groups of GPConv.
Refer to the base class `models.ShuffleNetV1` for more details.
"""
- default_cfg = default_cfgs["shufflenet_v1_g3_1.0"]
+ default_cfg = default_cfgs["shufflenet_v1_g3_10"]
model = ShuffleNetV1(group=3, model_size="1.0x", num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -252,11 +252,11 @@ def shufflenet_v1_g3_x1_0(pretrained: bool = False, num_classes: int = 1000, in_
@register_model
-def shufflenet_v1_g3_x1_5(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ShuffleNetV1:
+def shufflenet_v1_g3_15(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ShuffleNetV1:
"""Get ShuffleNetV1 model with width scaled by 1.5 and 3 groups of GPConv.
Refer to the base class `models.ShuffleNetV1` for more details.
"""
- default_cfg = default_cfgs["shufflenet_v1_g3_1.5"]
+ default_cfg = default_cfgs["shufflenet_v1_g3_15"]
model = ShuffleNetV1(group=3, model_size="1.5x", num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -266,11 +266,11 @@ def shufflenet_v1_g3_x1_5(pretrained: bool = False, num_classes: int = 1000, in_
@register_model
-def shufflenet_v1_g3_x2_0(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ShuffleNetV1:
+def shufflenet_v1_g3_20(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ShuffleNetV1:
"""Get ShuffleNetV1 model with width scaled by 2.0 and 3 groups of GPConv.
Refer to the base class `models.ShuffleNetV1` for more details.
"""
- default_cfg = default_cfgs["shufflenet_v1_g3_2.0"]
+ default_cfg = default_cfgs["shufflenet_v1_g3_20"]
model = ShuffleNetV1(group=3, model_size="2.0x", num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -280,11 +280,11 @@ def shufflenet_v1_g3_x2_0(pretrained: bool = False, num_classes: int = 1000, in_
@register_model
-def shufflenet_v1_g8_x0_5(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ShuffleNetV1:
+def shufflenet_v1_g8_05(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ShuffleNetV1:
"""Get ShuffleNetV1 model with width scaled by 0.5 and 8 groups of GPConv.
Refer to the base class `models.ShuffleNetV1` for more details.
"""
- default_cfg = default_cfgs["shufflenet_v1_g8_0.5"]
+ default_cfg = default_cfgs["shufflenet_v1_g8_05"]
model = ShuffleNetV1(group=8, model_size="0.5x", num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -294,11 +294,11 @@ def shufflenet_v1_g8_x0_5(pretrained: bool = False, num_classes: int = 1000, in_
@register_model
-def shufflenet_v1_g8_x1_0(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ShuffleNetV1:
+def shufflenet_v1_g8_10(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ShuffleNetV1:
"""Get ShuffleNetV1 model with width scaled by 1.0 and 8 groups of GPConv.
Refer to the base class `models.ShuffleNetV1` for more details.
"""
- default_cfg = default_cfgs["shufflenet_v1_g8_1.0"]
+ default_cfg = default_cfgs["shufflenet_v1_g8_10"]
model = ShuffleNetV1(group=8, model_size="1.0x", num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -308,11 +308,11 @@ def shufflenet_v1_g8_x1_0(pretrained: bool = False, num_classes: int = 1000, in_
@register_model
-def shufflenet_v1_g8_x1_5(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ShuffleNetV1:
+def shufflenet_v1_g8_15(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ShuffleNetV1:
"""Get ShuffleNetV1 model with width scaled by 1.5 and 8 groups of GPConv.
Refer to the base class `models.ShuffleNetV1` for more details.
"""
- default_cfg = default_cfgs["shufflenet_v1_g8_1.5"]
+ default_cfg = default_cfgs["shufflenet_v1_g8_15"]
model = ShuffleNetV1(group=8, model_size="1.5x", num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -322,11 +322,11 @@ def shufflenet_v1_g8_x1_5(pretrained: bool = False, num_classes: int = 1000, in_
@register_model
-def shufflenet_v1_g8_x2_0(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ShuffleNetV1:
+def shufflenet_v1_g8_20(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> ShuffleNetV1:
"""Get ShuffleNetV1 model with width scaled by 2.0 and 8 groups of GPConv.
Refer to the base class `models.ShuffleNetV1` for more details.
"""
- default_cfg = default_cfgs["shufflenet_v1_g8_2.0"]
+ default_cfg = default_cfgs["shufflenet_v1_g8_20"]
model = ShuffleNetV1(group=8, model_size="2.0x", num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
diff --git a/mindcv/models/shufflenetv2.py b/mindcv/models/shufflenetv2.py
index 15dc670ff..bc49fc2ff 100644
--- a/mindcv/models/shufflenetv2.py
+++ b/mindcv/models/shufflenetv2.py
@@ -32,16 +32,16 @@ def _cfg(url="", **kwargs):
default_cfgs = {
- "shufflenet_v2_0.5": _cfg(
+ "shufflenet_v2_x0_5": _cfg(
url="https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv2/shufflenet_v2_x0_5-8c841061.ckpt"
),
- "shufflenet_v2_1.0": _cfg(
+ "shufflenet_v2_x1_0": _cfg(
url="https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv2/shufflenet_v2_x1_0-0da4b7fa.ckpt"
),
- "shufflenet_v2_1.5": _cfg(
+ "shufflenet_v2_x1_5": _cfg(
url="https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv2/shufflenet_v2_x1_5-00b56131.ckpt"
),
- "shufflenet_v2_2.0": _cfg(
+ "shufflenet_v2_x2_0": _cfg(
url="https://download.mindspore.cn/toolkits/mindcv/shufflenet/shufflenetv2/shufflenet_v2_x2_0-ed8e698d.ckpt"
),
}
@@ -222,7 +222,7 @@ def shufflenet_v2_x0_5(pretrained: bool = False, num_classes: int = 1000, in_cha
"""Get ShuffleNetV2 model with width scaled by 0.5.
Refer to the base class `models.ShuffleNetV2` for more details.
"""
- default_cfg = default_cfgs["shufflenet_v2_0.5"]
+ default_cfg = default_cfgs["shufflenet_v2_x0_5"]
model = ShuffleNetV2(model_size="0.5x", num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -236,7 +236,7 @@ def shufflenet_v2_x1_0(pretrained: bool = False, num_classes: int = 1000, in_cha
"""Get ShuffleNetV2 model with width scaled by 1.0.
Refer to the base class `models.ShuffleNetV2` for more details.
"""
- default_cfg = default_cfgs["shufflenet_v2_1.0"]
+ default_cfg = default_cfgs["shufflenet_v2_x1_0"]
model = ShuffleNetV2(model_size="1.0x", num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -250,7 +250,7 @@ def shufflenet_v2_x1_5(pretrained: bool = False, num_classes: int = 1000, in_cha
"""Get ShuffleNetV2 model with width scaled by 1.5.
Refer to the base class `models.ShuffleNetV2` for more details.
"""
- default_cfg = default_cfgs["shufflenet_v2_1.5"]
+ default_cfg = default_cfgs["shufflenet_v2_x1_5"]
model = ShuffleNetV2(model_size="1.5x", num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
@@ -264,7 +264,7 @@ def shufflenet_v2_x2_0(pretrained: bool = False, num_classes: int = 1000, in_cha
"""Get ShuffleNetV2 model with width scaled by 2.0.
Refer to the base class `models.ShuffleNetV2` for more details.
"""
- default_cfg = default_cfgs["shufflenet_v2_2.0"]
+ default_cfg = default_cfgs["shufflenet_v2_x2_0"]
model = ShuffleNetV2(model_size="2.0x", num_classes=num_classes, in_channels=in_channels, **kwargs)
if pretrained:
diff --git a/mindcv/models/swin_transformer.py b/mindcv/models/swintransformer.py
similarity index 100%
rename from mindcv/models/swin_transformer.py
rename to mindcv/models/swintransformer.py
diff --git a/mindcv/models/swin_transformer_v2.py b/mindcv/models/swintransformerv2.py
similarity index 100%
rename from mindcv/models/swin_transformer_v2.py
rename to mindcv/models/swintransformerv2.py
diff --git a/mindcv/models/xcit.py b/mindcv/models/xcit.py
index a02ca7a69..2c3b6966e 100644
--- a/mindcv/models/xcit.py
+++ b/mindcv/models/xcit.py
@@ -20,7 +20,7 @@
__all__ = [
'XCiT',
- 'xcit_tiny_12_p16',
+ 'xcit_tiny_12_p16_224',
]
@@ -35,7 +35,7 @@ def _cfg(url='', **kwargs):
default_cfgs = {
- 'xcit_tiny_12_p16': _cfg(
+ 'xcit_tiny_12_p16_224': _cfg(
url='https://download.mindspore.cn/toolkits/mindcv/xcit/xcit_tiny_12_p16_224-1b1c9301.ckpt'),
}
@@ -476,11 +476,11 @@ def construct(self, x):
@register_model
-def xcit_tiny_12_p16(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> XCiT:
- """Get xcit_tiny_12_p16 model.
+def xcit_tiny_12_p16_224(pretrained: bool = False, num_classes: int = 1000, in_channels=3, **kwargs) -> XCiT:
+ """Get xcit_tiny_12_p16_224 model.
Refer to the base class 'models.XCiT' for more details.
"""
- default_cfg = default_cfgs['xcit_tiny_12_p16']
+ default_cfg = default_cfgs['xcit_tiny_12_p16_224']
model = XCiT(
patch_size=16, num_classes=num_classes, embed_dim=192, depth=12, num_heads=4, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, epsilon=1e-6), eta=1.0, tokens_norm=True, **kwargs)
diff --git a/tests/modules/test_feature_extraction.py b/tests/modules/test_feature_extraction.py
index 710985156..d7e36965e 100644
--- a/tests/modules/test_feature_extraction.py
+++ b/tests/modules/test_feature_extraction.py
@@ -193,7 +193,7 @@ def test_feature_extraction_indices_using_feature_wrapper(mode):
5,
),
(
- "rexnet_x10",
+ "rexnet_10",
5,
),
],
diff --git a/tests/modules/test_models.py b/tests/modules/test_models.py
index 8130ed7f6..49607d87d 100644
--- a/tests/modules/test_models.py
+++ b/tests/modules/test_models.py
@@ -20,11 +20,11 @@
# TODO: the global avg pooling op used in EfficientNet is not supported for CPU.
# TODO: memory resource is limited on free github action runner, ask the PM for self-hosted runners!
model_name_list = [
- "BiTresnet50",
- "RepMLPNet_T224",
+ "BiT_resnet50",
+ "repmlp_t224",
"convit_tiny",
"convnext_tiny",
- "crossvit9",
+ "crossvit_9",
"densenet121",
"dpn92",
"edgenext_small",
@@ -34,8 +34,8 @@
"inception_v3",
"inception_v4",
"mixnet_s",
- "mnasnet0_5",
- "mobilenet_v1_025_224",
+ "mnasnet_050",
+ "mobilenet_v1_025",
"mobilenet_v2_035_128",
"mobilenet_v3_small_075",
"nasnet_a_4x1056",
@@ -48,9 +48,9 @@
"res2net50",
"resnet18",
"resnext50_32x4d",
- "rexnet_x09",
+ "rexnet_09",
"seresnet18",
- "shufflenet_v1_g3_x0_5",
+ "shufflenet_v1_g3_05",
"shufflenet_v2_x0_5",
"skresnet18",
"squeezenet1_0",