diff --git a/torchvision/models/mnasnet.py b/torchvision/models/mnasnet.py index 65462154ab6..5846111ab1c 100644 --- a/torchvision/models/mnasnet.py +++ b/torchvision/models/mnasnet.py @@ -95,7 +95,7 @@ def _get_depths(alpha: float) -> List[int]: class MNASNet(torch.nn.Module): - """MNASNet, as described in https://arxiv.org/pdf/1807.11626.pdf. This + """MNASNet, as described in https://arxiv.org/abs/1807.11626. This implements the B1 variant of the model. >>> model = MNASNet(1.0, num_classes=1000) >>> x = torch.rand(1, 3, 224, 224) @@ -327,7 +327,7 @@ def _mnasnet(alpha: float, weights: Optional[WeightsEnum], progress: bool, **kwa def mnasnet0_5(*, weights: Optional[MNASNet0_5_Weights] = None, progress: bool = True, **kwargs: Any) -> MNASNet: """MNASNet with depth multiplier of 0.5 from `MnasNet: Platform-Aware Neural Architecture Search for Mobile - `_ paper. + `_ paper. Args: weights (:class:`~torchvision.models.MNASNet0_5_Weights`, optional): The @@ -355,7 +355,7 @@ def mnasnet0_5(*, weights: Optional[MNASNet0_5_Weights] = None, progress: bool = def mnasnet0_75(*, weights: Optional[MNASNet0_75_Weights] = None, progress: bool = True, **kwargs: Any) -> MNASNet: """MNASNet with depth multiplier of 0.75 from `MnasNet: Platform-Aware Neural Architecture Search for Mobile - `_ paper. + `_ paper. Args: weights (:class:`~torchvision.models.MNASNet0_75_Weights`, optional): The @@ -383,7 +383,7 @@ def mnasnet0_75(*, weights: Optional[MNASNet0_75_Weights] = None, progress: bool def mnasnet1_0(*, weights: Optional[MNASNet1_0_Weights] = None, progress: bool = True, **kwargs: Any) -> MNASNet: """MNASNet with depth multiplier of 1.0 from `MnasNet: Platform-Aware Neural Architecture Search for Mobile - `_ paper. + `_ paper. Args: weights (:class:`~torchvision.models.MNASNet1_0_Weights`, optional): The @@ -411,7 +411,7 @@ def mnasnet1_0(*, weights: Optional[MNASNet1_0_Weights] = None, progress: bool = def mnasnet1_3(*, weights: Optional[MNASNet1_3_Weights] = None, progress: bool = True, **kwargs: Any) -> MNASNet: """MNASNet with depth multiplier of 1.3 from `MnasNet: Platform-Aware Neural Architecture Search for Mobile - `_ paper. + `_ paper. Args: weights (:class:`~torchvision.models.MNASNet1_3_Weights`, optional): The diff --git a/torchvision/models/resnet.py b/torchvision/models/resnet.py index 367fc62eba9..83c0340cef7 100644 --- a/torchvision/models/resnet.py +++ b/torchvision/models/resnet.py @@ -682,7 +682,7 @@ class Wide_ResNet101_2_Weights(WeightsEnum): @register_model() @handle_legacy_interface(weights=("pretrained", ResNet18_Weights.IMAGENET1K_V1)) def resnet18(*, weights: Optional[ResNet18_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet: - """ResNet-18 from `Deep Residual Learning for Image Recognition `__. + """ResNet-18 from `Deep Residual Learning for Image Recognition `__. Args: weights (:class:`~torchvision.models.ResNet18_Weights`, optional): The @@ -708,7 +708,7 @@ def resnet18(*, weights: Optional[ResNet18_Weights] = None, progress: bool = Tru @register_model() @handle_legacy_interface(weights=("pretrained", ResNet34_Weights.IMAGENET1K_V1)) def resnet34(*, weights: Optional[ResNet34_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet: - """ResNet-34 from `Deep Residual Learning for Image Recognition `__. + """ResNet-34 from `Deep Residual Learning for Image Recognition `__. Args: weights (:class:`~torchvision.models.ResNet34_Weights`, optional): The @@ -734,7 +734,7 @@ def resnet34(*, weights: Optional[ResNet34_Weights] = None, progress: bool = Tru @register_model() @handle_legacy_interface(weights=("pretrained", ResNet50_Weights.IMAGENET1K_V1)) def resnet50(*, weights: Optional[ResNet50_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet: - """ResNet-50 from `Deep Residual Learning for Image Recognition `__. + """ResNet-50 from `Deep Residual Learning for Image Recognition `__. .. note:: The bottleneck of TorchVision places the stride for downsampling to the second 3x3 @@ -766,7 +766,7 @@ def resnet50(*, weights: Optional[ResNet50_Weights] = None, progress: bool = Tru @register_model() @handle_legacy_interface(weights=("pretrained", ResNet101_Weights.IMAGENET1K_V1)) def resnet101(*, weights: Optional[ResNet101_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet: - """ResNet-101 from `Deep Residual Learning for Image Recognition `__. + """ResNet-101 from `Deep Residual Learning for Image Recognition `__. .. note:: The bottleneck of TorchVision places the stride for downsampling to the second 3x3 @@ -798,7 +798,7 @@ def resnet101(*, weights: Optional[ResNet101_Weights] = None, progress: bool = T @register_model() @handle_legacy_interface(weights=("pretrained", ResNet152_Weights.IMAGENET1K_V1)) def resnet152(*, weights: Optional[ResNet152_Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet: - """ResNet-152 from `Deep Residual Learning for Image Recognition `__. + """ResNet-152 from `Deep Residual Learning for Image Recognition `__. .. note:: The bottleneck of TorchVision places the stride for downsampling to the second 3x3 diff --git a/torchvision/models/swin_transformer.py b/torchvision/models/swin_transformer.py index df897e2b991..2035f659bfc 100644 --- a/torchvision/models/swin_transformer.py +++ b/torchvision/models/swin_transformer.py @@ -508,7 +508,7 @@ def forward(self, x: Tensor): class SwinTransformer(nn.Module): """ Implements Swin Transformer from the `"Swin Transformer: Hierarchical Vision Transformer using - Shifted Windows" `_ paper. + Shifted Windows" `_ paper. Args: patch_size (List[int]): Patch size. embed_dim (int): Patch embedding dimension. @@ -804,7 +804,7 @@ class Swin_V2_B_Weights(WeightsEnum): def swin_t(*, weights: Optional[Swin_T_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer: """ Constructs a swin_tiny architecture from - `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows `_. + `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows `_. Args: weights (:class:`~torchvision.models.Swin_T_Weights`, optional): The @@ -842,7 +842,7 @@ def swin_t(*, weights: Optional[Swin_T_Weights] = None, progress: bool = True, * def swin_s(*, weights: Optional[Swin_S_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer: """ Constructs a swin_small architecture from - `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows `_. + `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows `_. Args: weights (:class:`~torchvision.models.Swin_S_Weights`, optional): The @@ -880,7 +880,7 @@ def swin_s(*, weights: Optional[Swin_S_Weights] = None, progress: bool = True, * def swin_b(*, weights: Optional[Swin_B_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer: """ Constructs a swin_base architecture from - `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows `_. + `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows `_. Args: weights (:class:`~torchvision.models.Swin_B_Weights`, optional): The @@ -918,7 +918,7 @@ def swin_b(*, weights: Optional[Swin_B_Weights] = None, progress: bool = True, * def swin_v2_t(*, weights: Optional[Swin_V2_T_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer: """ Constructs a swin_v2_tiny architecture from - `Swin Transformer V2: Scaling Up Capacity and Resolution `_. + `Swin Transformer V2: Scaling Up Capacity and Resolution `_. Args: weights (:class:`~torchvision.models.Swin_V2_T_Weights`, optional): The @@ -958,7 +958,7 @@ def swin_v2_t(*, weights: Optional[Swin_V2_T_Weights] = None, progress: bool = T def swin_v2_s(*, weights: Optional[Swin_V2_S_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer: """ Constructs a swin_v2_small architecture from - `Swin Transformer V2: Scaling Up Capacity and Resolution `_. + `Swin Transformer V2: Scaling Up Capacity and Resolution `_. Args: weights (:class:`~torchvision.models.Swin_V2_S_Weights`, optional): The @@ -998,7 +998,7 @@ def swin_v2_s(*, weights: Optional[Swin_V2_S_Weights] = None, progress: bool = T def swin_v2_b(*, weights: Optional[Swin_V2_B_Weights] = None, progress: bool = True, **kwargs: Any) -> SwinTransformer: """ Constructs a swin_v2_base architecture from - `Swin Transformer V2: Scaling Up Capacity and Resolution `_. + `Swin Transformer V2: Scaling Up Capacity and Resolution `_. Args: weights (:class:`~torchvision.models.Swin_V2_B_Weights`, optional): The