Skip to content

Module sagemaker_defect_detection.models.ddn

None

None

View Source
import torch

import torch.nn as nn

import torch.nn.functional as F

import torchvision

from torchvision.models.detection.faster_rcnn import FastRCNNPredictor

from torchvision.models.detection.transform import GeneralizedRCNNTransform

from torchvision.models.detection.generalized_rcnn import GeneralizedRCNN

from torchvision.models.detection.roi_heads import RoIHeads

from torchvision.models.detection.rpn import AnchorGenerator, RPNHead, RegionProposalNetwork

from torchvision.ops import MultiScaleRoIAlign

def get_backbone(name: str) -> nn.Module:

    """

    Get official pretrained ResNet34 and ResNet50 as backbones

    Parameters

    ----------

    name : str

        Either `resnet34` or `resnet50`

    Returns

    -------

    nn.Module

        resnet34 or resnet50 pytorch modules

    Raises

    ------

    ValueError

        If unsupported name is used

    """

    if name == "resnet34":

        return torchvision.models.resnet34(pretrained=True)

    elif name == "resnet50":

        return torchvision.models.resnet50(pretrained=True)

    else:

        raise ValueError("Unsupported backbone")

def init_weights(m) -> None:

    """

    Weight initialization

    Parameters

    ----------

    m : [type]

        Module used in recursive call

    """

    if isinstance(m, nn.Conv2d):

        nn.init.xavier_normal_(m.weight)

    elif isinstance(m, nn.Linear):

        m.weight.data.normal_(0.0, 0.02)

        m.bias.data.fill_(0.0)

    return

class MFN(nn.Module):

    def __init__(self, backbone: str):

        """

        Implementation of MFN model as described in

        Yu He, Kechen Song, Qinggang Meng, Yunhui Yan,

        “An End-to-end Steel Surface Defect Detection Approach via Fusing Multiple Hierarchical Features,”

        IEEE Transactions on Instrumentation and Measuremente, 2020,69(4),1493-1504.

        Parameters

        ----------

        backbone : str

            Either `resnet34` or `resnet50`

        """

        super().__init__()

        self.backbone = get_backbone(backbone)

        # input 224x224 -> conv1 output size 112x112

        self.start_layer = nn.Sequential(

            self.backbone.conv1,  # type: ignore

            self.backbone.bn1,  # type: ignore

            self.backbone.relu,  # type: ignore

            self.backbone.maxpool,  # type: ignore

        )

        self.r2 = self.backbone.layer1  # 64/256x56x56 <- (resnet34/resnet50)

        self.r3 = self.backbone.layer2  # 128/512x28x28

        self.r4 = self.backbone.layer3  # 256/1024x14x14

        self.r5 = self.backbone.layer4  # 512/2048x7x7

        in_channel = 64 if backbone == "resnet34" else 256

        self.b2 = nn.Sequential(

            nn.Conv2d(

                in_channel, in_channel, kernel_size=3, padding=1, stride=2

            ),  # 56 -> 28 without Relu or batchnorm not in the paper ???

            nn.BatchNorm2d(in_channel),

            nn.ReLU(inplace=True),

            nn.Conv2d(in_channel, in_channel, kernel_size=3, padding=1, stride=2),  # 28 -> 14

            nn.BatchNorm2d(in_channel),

            nn.ReLU(inplace=True),

            nn.Conv2d(in_channel, in_channel * 2, kernel_size=1, padding=0),

            nn.BatchNorm2d(in_channel * 2),

            nn.ReLU(inplace=True),

        ).apply(

            init_weights

        )  # after r2: 128/512x14x14  <-

        self.b3 = nn.MaxPool2d(2)  # after r3: 128/512x14x14  <-

        in_channel *= 2  # 128/512

        self.b4 = nn.Sequential(

            nn.Conv2d(in_channel * 2, in_channel, kernel_size=1, padding=0),

            nn.BatchNorm2d(in_channel),

            nn.ReLU(inplace=True),

        ).apply(

            init_weights

        )  # after r4: 128/512x14x14

        in_channel *= 4  # 512 / 2048

        self.b5 = nn.Sequential(

            nn.ConvTranspose2d(

                in_channel, in_channel, kernel_size=3, stride=2, padding=1, output_padding=1

            ),  # <- after r5 which is 512x7x7 -> 512x14x14

            nn.BatchNorm2d(in_channel),

            nn.ReLU(inplace=True),

            nn.Conv2d(in_channel, in_channel // 4, kernel_size=1, padding=0),

            nn.BatchNorm2d(in_channel // 4),

            nn.ReLU(inplace=True),

        ).apply(init_weights)

        self.out_channels = 512 if backbone == "resnet34" else 2048  # required for FasterRCNN

    def forward(self, x):

        x = self.start_layer(x)

        x = self.r2(x)

        b2_out = self.b2(x)

        x = self.r3(x)

        b3_out = self.b3(x)

        x = self.r4(x)

        b4_out = self.b4(x)

        x = self.r5(x)

        b5_out = self.b5(x)

        # BatchNorm works better than L2 normalize

        # out = torch.cat([F.normalize(o, p=2, dim=1) for o in (b2_out, b3_out, b4_out, b5_out)], dim=1)

        out = torch.cat((b2_out, b3_out, b4_out, b5_out), dim=1)

        return out

class Classification(nn.Module):

    """

    Classification network

    Parameters

    ----------

    backbone : str

        Either `resnet34` or `resnet50`

    num_classes : int

        Number of classes

    """

    def __init__(self, backbone: str, num_classes: int) -> None:

        super().__init__()

        self.mfn = MFN(backbone)

        self.flatten = nn.Flatten()

        self.fc = nn.Linear(self.mfn.out_channels * 14 ** 2, num_classes)

    def forward(self, x):

        return self.fc(self.flatten(self.mfn(x)))

class RPN(nn.Module):

    """

    RPN Module as described in

    Yu He, Kechen Song, Qinggang Meng, Yunhui Yan,

    “An End-to-end Steel Surface Defect Detection Approach via Fusing Multiple Hierarchical Features,”

    IEEE Transactions on Instrumentation and Measuremente, 2020,69(4),1493-1504.

    """

    def __init__(

        self,

        out_channels: int = 512,

        rpn_pre_nms_top_n_train: int = 1000,  # torchvision default 2000,

        rpn_pre_nms_top_n_test: int = 500,  # torchvision default 1000,

        rpn_post_nms_top_n_train: int = 1000,  # torchvision default 2000,

        rpn_post_nms_top_n_test: int = 500,  # torchvision default 1000,

        rpn_nms_thresh: float = 0.7,

        rpn_fg_iou_thresh: float = 0.7,

        rpn_bg_iou_thresh: float = 0.3,

        rpn_batch_size_per_image: int = 256,

        rpn_positive_fraction: float = 0.5,

    ) -> None:

        super().__init__()

        rpn_anchor_generator = AnchorGenerator(sizes=((64, 128, 256, 512),), aspect_ratios=((0.5, 1.0, 2.0),))

        rpn_head = RPNHead(out_channels, rpn_anchor_generator.num_anchors_per_location()[0])

        rpn_pre_nms_top_n = dict(training=rpn_pre_nms_top_n_train, testing=rpn_pre_nms_top_n_test)

        rpn_post_nms_top_n = dict(training=rpn_post_nms_top_n_train, testing=rpn_post_nms_top_n_test)

        self.rpn = RegionProposalNetwork(

            rpn_anchor_generator,

            rpn_head,

            rpn_fg_iou_thresh,

            rpn_bg_iou_thresh,

            rpn_batch_size_per_image,

            rpn_positive_fraction,

            rpn_pre_nms_top_n,

            rpn_post_nms_top_n,

            rpn_nms_thresh,

        )

    def forward(self, *args, **kwargs):

        return self.rpn(*args, **kwargs)

class CustomTwoMLPHead(nn.Module):

    def __init__(self, in_channels: int, representation_size: int):

        super().__init__()

        self.avgpool = nn.AdaptiveAvgPool2d(7)

        self.mlp = nn.Sequential(

            nn.Linear(in_channels, representation_size),

            nn.ReLU(inplace=True),

            nn.Linear(representation_size, representation_size),

            nn.ReLU(inplace=True),

        )

    def forward(self, x):

        x = self.avgpool(x)

        x = x.flatten(start_dim=1)

        x = self.mlp(x)

        return x

class RoI(nn.Module):

    """

    ROI Module as described in

    Yu He, Kechen Song, Qinggang Meng, Yunhui Yan,

    “An End-to-end Steel Surface Defect Detection Approach via Fusing Multiple Hierarchical Features,”

    IEEE Transactions on Instrumentation and Measuremente, 2020,69(4),1493-1504.

    """

    def __init__(

        self,

        num_classes: int,

        box_fg_iou_thresh=0.5,

        box_bg_iou_thresh=0.5,

        box_batch_size_per_image=512,

        box_positive_fraction=0.25,

        bbox_reg_weights=None,

        box_score_thresh=0.05,

        box_nms_thresh=0.5,

        box_detections_per_img=100,

    ) -> None:

        super().__init__()

        roi_pooler = MultiScaleRoIAlign(featmap_names=["0"], output_size=7, sampling_ratio=2)

        box_head = CustomTwoMLPHead(512 * 7 ** 2, 1024)

        box_predictor = FastRCNNPredictor(1024, num_classes=num_classes)

        self.roi_head = RoIHeads(

            roi_pooler,

            box_head,

            box_predictor,

            box_fg_iou_thresh,

            box_bg_iou_thresh,

            box_batch_size_per_image,

            box_positive_fraction,

            bbox_reg_weights,

            box_score_thresh,

            box_nms_thresh,

            box_detections_per_img,

        )

    def forward(self, *args, **kwargs):

        return self.roi_head(*args, **kwargs)

class Detection(GeneralizedRCNN):

    """

    Detection network as described in

    Yu He, Kechen Song, Qinggang Meng, Yunhui Yan,

    “An End-to-end Steel Surface Defect Detection Approach via Fusing Multiple Hierarchical Features,”

    IEEE Transactions on Instrumentation and Measuremente, 2020,69(4),1493-1504.

    """

    def __init__(self, mfn, rpn, roi):

        dummy_transform = GeneralizedRCNNTransform(800, 1333, [00.0, 0.0, 0.0], [1.0, 1.0, 1.0])

        super().__init__(mfn, rpn, roi, dummy_transform)

Functions

get_backbone

def get_backbone(
    name: str
) -> torch.nn.modules.module.Module

Get official pretrained ResNet34 and ResNet50 as backbones

Parameters:

Name Type Description Default
name str Either resnet34 or resnet50 None

Returns:

Type Description
nn.Module resnet34 or resnet50 pytorch modules

Raises:

Type Description
ValueError If unsupported name is used
View Source
def get_backbone(name: str) -> nn.Module:

    """

    Get official pretrained ResNet34 and ResNet50 as backbones

    Parameters

    ----------

    name : str

        Either `resnet34` or `resnet50`

    Returns

    -------

    nn.Module

        resnet34 or resnet50 pytorch modules

    Raises

    ------

    ValueError

        If unsupported name is used

    """

    if name == "resnet34":

        return torchvision.models.resnet34(pretrained=True)

    elif name == "resnet50":

        return torchvision.models.resnet50(pretrained=True)

    else:

        raise ValueError("Unsupported backbone")

init_weights

def init_weights(
    m
) -> None

Weight initialization

Parameters:

Name Type Description Default
m [type] Module used in recursive call None
View Source
def init_weights(m) -> None:

    """

    Weight initialization

    Parameters

    ----------

    m : [type]

        Module used in recursive call

    """

    if isinstance(m, nn.Conv2d):

        nn.init.xavier_normal_(m.weight)

    elif isinstance(m, nn.Linear):

        m.weight.data.normal_(0.0, 0.02)

        m.bias.data.fill_(0.0)

    return

Classes

Classification

class Classification(
    backbone: str,
    num_classes: int
)

Attributes

Name Type Description Default
backbone str Either resnet34 or resnet50 None
num_classes int Number of classes None

Ancestors (in MRO)

  • torch.nn.modules.module.Module

Class variables

T_destination
dump_patches

Methods

add_module

def add_module(
    self,
    name: str,
    module: 'Module'
) -> None

Adds a child module to the current module.

The module can be accessed as an attribute using the given name.

Parameters:

Name Type Description Default
name string name of the child module. The child module can be
accessed from this module using the given name None
module Module child module to be added to the module. None
View Source
    def add_module(self, name: str, module: 'Module') -> None:

        r"""Adds a child module to the current module.

        The module can be accessed as an attribute using the given name.

        Args:

            name (string): name of the child module. The child module can be

                accessed from this module using the given name

            module (Module): child module to be added to the module.

        """

        if not isinstance(module, Module) and module is not None:

            raise TypeError("{} is not a Module subclass".format(

                torch.typename(module)))

        elif not isinstance(name, torch._six.string_classes):

            raise TypeError("module name should be a string. Got {}".format(

                torch.typename(name)))

        elif hasattr(self, name) and name not in self._modules:

            raise KeyError("attribute '{}' already exists".format(name))

        elif '.' in name:

            raise KeyError("module name can't contain \".\"")

        elif name == '':

            raise KeyError("module name can't be empty string \"\"")

        self._modules[name] = module

apply

def apply(
    self: ~T,
    fn: Callable[[ForwardRef('Module')], NoneType]
) -> ~T

Applies fn recursively to every submodule (as returned by .children())

as well as self. Typical use includes initializing the parameters of a model (see also :ref:nn-init-doc).

Parameters:

Name Type Description Default
fn ( None class:Module -> None): function to be applied to each submodule None

Returns:

Type Description
Module self
Example::
>>> @torch.no_grad()
>>> def init_weights(m):
>>>     print(m)
>>>     if type(m) == nn.Linear:
>>>         m.weight.fill_(1.0)
>>>         print(m.weight)
>>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
>>> net.apply(init_weights)
Linear(in_features=2, out_features=2, bias=True)
Parameter containing:
tensor([[ 1.,  1.],
        [ 1.,  1.]])
Linear(in_features=2, out_features=2, bias=True)
Parameter containing:
tensor([[ 1.,  1.],
        [ 1.,  1.]])
Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
)
Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
) |
View Source
    def apply(self: T, fn: Callable[['Module'], None]) -> T:

        r"""Applies ``fn`` recursively to every submodule (as returned by ``.children()``)

        as well as self. Typical use includes initializing the parameters of a model

        (see also :ref:`nn-init-doc`).

        Args:

            fn (:class:`Module` -> None): function to be applied to each submodule

        Returns:

            Module: self

        Example::

            >>> @torch.no_grad()

            >>> def init_weights(m):

            >>>     print(m)

            >>>     if type(m) == nn.Linear:

            >>>         m.weight.fill_(1.0)

            >>>         print(m.weight)

            >>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))

            >>> net.apply(init_weights)

            Linear(in_features=2, out_features=2, bias=True)

            Parameter containing:

            tensor([[ 1.,  1.],

                    [ 1.,  1.]])

            Linear(in_features=2, out_features=2, bias=True)

            Parameter containing:

            tensor([[ 1.,  1.],

                    [ 1.,  1.]])

            Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            )

            Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            )

        """

        for module in self.children():

            module.apply(fn)

        fn(self)

        return self

bfloat16

def bfloat16(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to bfloat16 datatype.

Returns:

Type Description
Module self
View Source
    def bfloat16(self: T) -> T:

        r"""Casts all floating point parameters and buffers to ``bfloat16`` datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.bfloat16() if t.is_floating_point() else t)

buffers

def buffers(
    self,
    recurse: bool = True
) -> Iterator[torch.Tensor]

Returns an iterator over module buffers.

Parameters:

Name Type Description Default
recurse bool if True, then yields buffers of this module
and all submodules. Otherwise, yields only buffers that
are direct members of this module. None

Yields:

Type Description
torch.Tensor module buffer
Example::
>>> for buf in model.buffers():
>>>     print(type(buf), buf.size())
<class 'torch.Tensor'> (20L,)
<class 'torch.Tensor'> (20L, 1L, 5L, 5L) |
View Source
    def buffers(self, recurse: bool = True) -> Iterator[Tensor]:

        r"""Returns an iterator over module buffers.

        Args:

            recurse (bool): if True, then yields buffers of this module

                and all submodules. Otherwise, yields only buffers that

                are direct members of this module.

        Yields:

            torch.Tensor: module buffer

        Example::

            >>> for buf in model.buffers():

            >>>     print(type(buf), buf.size())

            <class 'torch.Tensor'> (20L,)

            <class 'torch.Tensor'> (20L, 1L, 5L, 5L)

        """

        for name, buf in self.named_buffers(recurse=recurse):

            yield buf

children

def children(
    self
) -> Iterator[ForwardRef('Module')]

Returns an iterator over immediate children modules.

Yields:

Type Description
Module a child module
View Source
    def children(self) -> Iterator['Module']:

        r"""Returns an iterator over immediate children modules.

        Yields:

            Module: a child module

        """

        for name, module in self.named_children():

            yield module

cpu

def cpu(
    self: ~T
) -> ~T

Moves all model parameters and buffers to the CPU.

Returns:

Type Description
Module self
View Source
    def cpu(self: T) -> T:

        r"""Moves all model parameters and buffers to the CPU.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.cpu())

cuda

def cuda(
    self: ~T,
    device: Union[int, torch.device, NoneType] = None
) -> ~T

Moves all model parameters and buffers to the GPU.

This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized.

Parameters:

Name Type Description Default
device int if specified, all parameters will be
copied to that device None

Returns:

Type Description
Module self
View Source
    def cuda(self: T, device: Optional[Union[int, device]] = None) -> T:

        r"""Moves all model parameters and buffers to the GPU.

        This also makes associated parameters and buffers different objects. So

        it should be called before constructing optimizer if the module will

        live on GPU while being optimized.

        Arguments:

            device (int, optional): if specified, all parameters will be

                copied to that device

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.cuda(device))

double

def double(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to double datatype.

Returns:

Type Description
Module self
View Source
    def double(self: T) -> T:

        r"""Casts all floating point parameters and buffers to ``double`` datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.double() if t.is_floating_point() else t)

eval

def eval(
    self: ~T
) -> ~T

Sets the module in evaluation mode.

This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:Dropout, :class:BatchNorm, etc.

This is equivalent with :meth:self.train(False) <torch.nn.Module.train>.

Returns:

Type Description
Module self
View Source
    def eval(self: T) -> T:

        r"""Sets the module in evaluation mode.

        This has any effect only on certain modules. See documentations of

        particular modules for details of their behaviors in training/evaluation

        mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,

        etc.

        This is equivalent with :meth:`self.train(False) <torch.nn.Module.train>`.

        Returns:

            Module: self

        """

        return self.train(False)

extra_repr

def extra_repr(
    self
) -> str

Set the extra representation of the module

To print customized extra information, you should reimplement this method in your own modules. Both single-line and multi-line strings are acceptable.

View Source
    def extra_repr(self) -> str:

        r"""Set the extra representation of the module

        To print customized extra information, you should reimplement

        this method in your own modules. Both single-line and multi-line

        strings are acceptable.

        """

        return ''

float

def float(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to float datatype.

Returns:

Type Description
Module self
View Source
    def float(self: T) -> T:

        r"""Casts all floating point parameters and buffers to float datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.float() if t.is_floating_point() else t)

forward

def forward(
    self,
    x
)
View Source
    def forward(self, x):

        return self.fc(self.flatten(self.mfn(x)))

half

def half(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to half datatype.

Returns:

Type Description
Module self
View Source
    def half(self: T) -> T:

        r"""Casts all floating point parameters and buffers to ``half`` datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.half() if t.is_floating_point() else t)

load_state_dict

def load_state_dict(
    self,
    state_dict: Dict[str, torch.Tensor],
    strict: bool = True
)

Copies parameters and buffers from :attr:state_dict into

this module and its descendants. If :attr:strict is True, then the keys of :attr:state_dict must exactly match the keys returned by this module's :meth:~torch.nn.Module.state_dict function.

Parameters:

Name Type Description Default
state_dict dict a dict containing parameters and
persistent buffers. None
strict bool whether to strictly enforce that the keys
in :attr:state_dict match the keys returned by this module's
:meth:~torch.nn.Module.state_dict function. Default: True None

Returns:

Type Description
None NamedTuple with missing_keys and unexpected_keys fields:
* missing_keys is a list of str containing the missing keys
* unexpected_keys is a list of str containing the unexpected keys
View Source
    def load_state_dict(self, state_dict: Union[Dict[str, Tensor], Dict[str, Tensor]],

                        strict: bool = True):

        r"""Copies parameters and buffers from :attr:`state_dict` into

        this module and its descendants. If :attr:`strict` is ``True``, then

        the keys of :attr:`state_dict` must exactly match the keys returned

        by this module's :meth:`~torch.nn.Module.state_dict` function.

        Arguments:

            state_dict (dict): a dict containing parameters and

                persistent buffers.

            strict (bool, optional): whether to strictly enforce that the keys

                in :attr:`state_dict` match the keys returned by this module's

                :meth:`~torch.nn.Module.state_dict` function. Default: ``True``

        Returns:

            ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields:

                * **missing_keys** is a list of str containing the missing keys

                * **unexpected_keys** is a list of str containing the unexpected keys

        """

        missing_keys = []

        unexpected_keys = []

        error_msgs = []

        # copy state_dict so _load_from_state_dict can modify it

        metadata = getattr(state_dict, '_metadata', None)

        state_dict = state_dict.copy()

        if metadata is not None:

            state_dict._metadata = metadata

        def load(module, prefix=''):

            local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})

            module._load_from_state_dict(

                state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)

            for name, child in module._modules.items():

                if child is not None:

                    load(child, prefix + name + '.')

        load(self)

        load = None  # break load->load reference cycle

        if strict:

            if len(unexpected_keys) > 0:

                error_msgs.insert(

                    0, 'Unexpected key(s) in state_dict: {}. '.format(

                        ', '.join('"{}"'.format(k) for k in unexpected_keys)))

            if len(missing_keys) > 0:

                error_msgs.insert(

                    0, 'Missing key(s) in state_dict: {}. '.format(

                        ', '.join('"{}"'.format(k) for k in missing_keys)))

        if len(error_msgs) > 0:

            raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(

                               self.__class__.__name__, "\n\t".join(error_msgs)))

        return _IncompatibleKeys(missing_keys, unexpected_keys)

modules

def modules(
    self
) -> Iterator[ForwardRef('Module')]

Returns an iterator over all modules in the network.

Yields:

Type Description
Module a module in the network
Note:
Duplicate modules are returned only once. In the following
example, l will be returned only once.

Example::

>>> l = nn.Linear(2, 2)
>>> net = nn.Sequential(l, l)
>>> for idx, m in enumerate(net.modules()):
        print(idx, '->', m)

0 -> Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
)
1 -> Linear(in_features=2, out_features=2, bias=True) |
View Source
    def modules(self) -> Iterator['Module']:

        r"""Returns an iterator over all modules in the network.

        Yields:

            Module: a module in the network

        Note:

            Duplicate modules are returned only once. In the following

            example, ``l`` will be returned only once.

        Example::

            >>> l = nn.Linear(2, 2)

            >>> net = nn.Sequential(l, l)

            >>> for idx, m in enumerate(net.modules()):

                    print(idx, '->', m)

            0 -> Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            )

            1 -> Linear(in_features=2, out_features=2, bias=True)

        """

        for name, module in self.named_modules():

            yield module

named_buffers

def named_buffers(
    self,
    prefix: str = '',
    recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]

Returns an iterator over module buffers, yielding both the

name of the buffer as well as the buffer itself.

Parameters:

Name Type Description Default
prefix str prefix to prepend to all buffer names. None
recurse bool if True, then yields buffers of this module
and all submodules. Otherwise, yields only buffers that
are direct members of this module. None

Yields:

Type Description
None (string, torch.Tensor): Tuple containing the name and buffer

Example::

>>> for name, buf in self.named_buffers():
>>>    if name in ['running_var']:
>>>        print(buf.size()) |
View Source
    def named_buffers(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, Tensor]]:

        r"""Returns an iterator over module buffers, yielding both the

        name of the buffer as well as the buffer itself.

        Args:

            prefix (str): prefix to prepend to all buffer names.

            recurse (bool): if True, then yields buffers of this module

                and all submodules. Otherwise, yields only buffers that

                are direct members of this module.

        Yields:

            (string, torch.Tensor): Tuple containing the name and buffer

        Example::

            >>> for name, buf in self.named_buffers():

            >>>    if name in ['running_var']:

            >>>        print(buf.size())

        """

        gen = self._named_members(

            lambda module: module._buffers.items(),

            prefix=prefix, recurse=recurse)

        for elem in gen:

            yield elem

named_children

def named_children(
    self
) -> Iterator[Tuple[str, ForwardRef('Module')]]

Returns an iterator over immediate children modules, yielding both

the name of the module as well as the module itself.

Yields:

Type Description
None (string, Module): Tuple containing a name and child module

Example::

>>> for name, module in model.named_children():
>>>     if name in ['conv4', 'conv5']:
>>>         print(module) |
View Source
    def named_children(self) -> Iterator[Tuple[str, 'Module']]:

        r"""Returns an iterator over immediate children modules, yielding both

        the name of the module as well as the module itself.

        Yields:

            (string, Module): Tuple containing a name and child module

        Example::

            >>> for name, module in model.named_children():

            >>>     if name in ['conv4', 'conv5']:

            >>>         print(module)

        """

        memo = set()

        for name, module in self._modules.items():

            if module is not None and module not in memo:

                memo.add(module)

                yield name, module

named_modules

def named_modules(
    self,
    memo: Union[Set[ForwardRef('Module')], NoneType] = None,
    prefix: str = ''
)

Returns an iterator over all modules in the network, yielding

both the name of the module as well as the module itself.

Yields:

Type Description
None (string, Module): Tuple of name and module

Note: Duplicate modules are returned only once. In the following example, l will be returned only once.

Example::

>>> l = nn.Linear(2, 2)
>>> net = nn.Sequential(l, l)
>>> for idx, m in enumerate(net.named_modules()):
        print(idx, '->', m)

0 -> ('', Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
))
1 -> ('0', Linear(in_features=2, out_features=2, bias=True)) |
View Source
    def named_modules(self, memo: Optional[Set['Module']] = None, prefix: str = ''):

        r"""Returns an iterator over all modules in the network, yielding

        both the name of the module as well as the module itself.

        Yields:

            (string, Module): Tuple of name and module

        Note:

            Duplicate modules are returned only once. In the following

            example, ``l`` will be returned only once.

        Example::

            >>> l = nn.Linear(2, 2)

            >>> net = nn.Sequential(l, l)

            >>> for idx, m in enumerate(net.named_modules()):

                    print(idx, '->', m)

            0 -> ('', Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            ))

            1 -> ('0', Linear(in_features=2, out_features=2, bias=True))

        """

        if memo is None:

            memo = set()

        if self not in memo:

            memo.add(self)

            yield prefix, self

            for name, module in self._modules.items():

                if module is None:

                    continue

                submodule_prefix = prefix + ('.' if prefix else '') + name

                for m in module.named_modules(memo, submodule_prefix):

                    yield m

named_parameters

def named_parameters(
    self,
    prefix: str = '',
    recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]

Returns an iterator over module parameters, yielding both the

name of the parameter as well as the parameter itself.

Parameters:

Name Type Description Default
prefix str prefix to prepend to all parameter names. None
recurse bool if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module. None

Yields:

Type Description
None (string, Parameter): Tuple containing the name and parameter

Example::

>>> for name, param in self.named_parameters():
>>>    if name in ['bias']:
>>>        print(param.size()) |
View Source
    def named_parameters(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, Tensor]]:

        r"""Returns an iterator over module parameters, yielding both the

        name of the parameter as well as the parameter itself.

        Args:

            prefix (str): prefix to prepend to all parameter names.

            recurse (bool): if True, then yields parameters of this module

                and all submodules. Otherwise, yields only parameters that

                are direct members of this module.

        Yields:

            (string, Parameter): Tuple containing the name and parameter

        Example::

            >>> for name, param in self.named_parameters():

            >>>    if name in ['bias']:

            >>>        print(param.size())

        """

        gen = self._named_members(

            lambda module: module._parameters.items(),

            prefix=prefix, recurse=recurse)

        for elem in gen:

            yield elem

parameters

def parameters(
    self,
    recurse: bool = True
) -> Iterator[torch.nn.parameter.Parameter]

Returns an iterator over module parameters.

This is typically passed to an optimizer.

Parameters:

Name Type Description Default
recurse bool if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module. None

Yields:

Type Description
Parameter module parameter
Example::
>>> for param in model.parameters():
>>>     print(type(param), param.size())
<class 'torch.Tensor'> (20L,)
<class 'torch.Tensor'> (20L, 1L, 5L, 5L) |
View Source
    def parameters(self, recurse: bool = True) -> Iterator[Parameter]:

        r"""Returns an iterator over module parameters.

        This is typically passed to an optimizer.

        Args:

            recurse (bool): if True, then yields parameters of this module

                and all submodules. Otherwise, yields only parameters that

                are direct members of this module.

        Yields:

            Parameter: module parameter

        Example::

            >>> for param in model.parameters():

            >>>     print(type(param), param.size())

            <class 'torch.Tensor'> (20L,)

            <class 'torch.Tensor'> (20L, 1L, 5L, 5L)

        """

        for name, param in self.named_parameters(recurse=recurse):

            yield param

register_backward_hook

def register_backward_hook(
    self,
    hook: Callable[[ForwardRef('Module'), Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[NoneType, torch.Tensor]]
) -> torch.utils.hooks.RemovableHandle

Registers a backward hook on the module.

.. warning ::

The current implementation will not have the presented behavior
for complex :class:`Module` that perform many operations.
In some failure cases, :attr:`grad_input` and :attr:`grad_output` will only
contain the gradients for a subset of the inputs and outputs.
For such :class:`Module`, you should use :func:`torch.Tensor.register_hook`
directly on a specific input or output to get the required gradients.

The hook will be called every time the gradients with respect to module inputs are computed. The hook should have the following signature::

hook(module, grad_input, grad_output) -> Tensor or None

The :attr:grad_input and :attr:grad_output may be tuples if the module has multiple inputs or outputs. The hook should not modify its arguments, but it can optionally return a new gradient with respect to input that will be used in place of :attr:grad_input in subsequent computations. :attr:grad_input will only correspond to the inputs given as positional arguments.

Returns:

Type Description
None :class:torch.utils.hooks.RemovableHandle:
a handle that can be used to remove the added hook by calling
handle.remove()
View Source
    def register_backward_hook(

        self, hook: Callable[['Module', _grad_t, _grad_t], Union[None, Tensor]]

    ) -> RemovableHandle:

        r"""Registers a backward hook on the module.

        .. warning ::

            The current implementation will not have the presented behavior

            for complex :class:`Module` that perform many operations.

            In some failure cases, :attr:`grad_input` and :attr:`grad_output` will only

            contain the gradients for a subset of the inputs and outputs.

            For such :class:`Module`, you should use :func:`torch.Tensor.register_hook`

            directly on a specific input or output to get the required gradients.

        The hook will be called every time the gradients with respect to module

        inputs are computed. The hook should have the following signature::

            hook(module, grad_input, grad_output) -> Tensor or None

        The :attr:`grad_input` and :attr:`grad_output` may be tuples if the

        module has multiple inputs or outputs. The hook should not modify its

        arguments, but it can optionally return a new gradient with respect to

        input that will be used in place of :attr:`grad_input` in subsequent

        computations. :attr:`grad_input` will only correspond to the inputs given

        as positional arguments.

        Returns:

            :class:`torch.utils.hooks.RemovableHandle`:

                a handle that can be used to remove the added hook by calling

                ``handle.remove()``

        """

        handle = hooks.RemovableHandle(self._backward_hooks)

        self._backward_hooks[handle.id] = hook

        return handle

register_buffer

def register_buffer(
    self,
    name: str,
    tensor: torch.Tensor,
    persistent: bool = True
) -> None

Adds a buffer to the module.

This is typically used to register a buffer that should not to be considered a model parameter. For example, BatchNorm's running_mean is not a parameter, but is part of the module's state. Buffers, by default, are persistent and will be saved alongside parameters. This behavior can be changed by setting :attr:persistent to False. The only difference between a persistent buffer and a non-persistent buffer is that the latter will not be a part of this module's :attr:state_dict.

Buffers can be accessed as attributes using given names.

Args: name (string): name of the buffer. The buffer can be accessed from this module using the given name tensor (Tensor): buffer to be registered. persistent (bool): whether the buffer is part of this module's :attr:state_dict.

Example::

>>> self.register_buffer('running_mean', torch.zeros(num_features))
View Source
    def register_buffer(self, name: str, tensor: Tensor, persistent: bool = True) -> None:

        r"""Adds a buffer to the module.

        This is typically used to register a buffer that should not to be

        considered a model parameter. For example, BatchNorm's ``running_mean``

        is not a parameter, but is part of the module's state. Buffers, by

        default, are persistent and will be saved alongside parameters. This

        behavior can be changed by setting :attr:`persistent` to ``False``. The

        only difference between a persistent buffer and a non-persistent buffer

        is that the latter will not be a part of this module's

        :attr:`state_dict`.

        Buffers can be accessed as attributes using given names.

        Args:

            name (string): name of the buffer. The buffer can be accessed

                from this module using the given name

            tensor (Tensor): buffer to be registered.

            persistent (bool): whether the buffer is part of this module's

                :attr:`state_dict`.

        Example::

            >>> self.register_buffer('running_mean', torch.zeros(num_features))

        """

        if persistent is False and isinstance(self, torch.jit.ScriptModule):

            raise RuntimeError("ScriptModule does not support non-persistent buffers")

        if '_buffers' not in self.__dict__:

            raise AttributeError(

                "cannot assign buffer before Module.__init__() call")

        elif not isinstance(name, torch._six.string_classes):

            raise TypeError("buffer name should be a string. "

                            "Got {}".format(torch.typename(name)))

        elif '.' in name:

            raise KeyError("buffer name can't contain \".\"")

        elif name == '':

            raise KeyError("buffer name can't be empty string \"\"")

        elif hasattr(self, name) and name not in self._buffers:

            raise KeyError("attribute '{}' already exists".format(name))

        elif tensor is not None and not isinstance(tensor, torch.Tensor):

            raise TypeError("cannot assign '{}' object to buffer '{}' "

                            "(torch Tensor or None required)"

                            .format(torch.typename(tensor), name))

        else:

            self._buffers[name] = tensor

            if persistent:

                self._non_persistent_buffers_set.discard(name)

            else:

                self._non_persistent_buffers_set.add(name)

register_forward_hook

def register_forward_hook(
    self,
    hook: Callable[..., NoneType]
) -> torch.utils.hooks.RemovableHandle

Registers a forward hook on the module.

The hook will be called every time after :func:forward has computed an output. It should have the following signature::

hook(module, input, output) -> None or modified output

The input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the forward. The hook can modify the output. It can modify the input inplace but it will not have effect on forward since this is called after

View Source
    def register_forward_hook(self, hook: Callable[..., None]) -> RemovableHandle:

        r"""Registers a forward hook on the module.

        The hook will be called every time after :func:`forward` has computed an output.

        It should have the following signature::

            hook(module, input, output) -> None or modified output

        The input contains only the positional arguments given to the module.

        Keyword arguments won't be passed to the hooks and only to the ``forward``.

        The hook can modify the output. It can modify the input inplace but

        it will not have effect on forward since this is called after

        :func:`forward` is called.

        Returns:

            :class:`torch.utils.hooks.RemovableHandle`:

                a handle that can be used to remove the added hook by calling

                ``handle.remove()``

        """

        handle = hooks.RemovableHandle(self._forward_hooks)

        self._forward_hooks[handle.id] = hook

        return handle

register_forward_pre_hook

def register_forward_pre_hook(
    self,
    hook: Callable[..., NoneType]
) -> torch.utils.hooks.RemovableHandle

Registers a forward pre-hook on the module.

The hook will be called every time before :func:forward is invoked. It should have the following signature::

hook(module, input) -> None or modified input

The input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the forward. The hook can modify the input. User can either return a tuple or a single modified value in the hook. We will wrap the value into a tuple if a single value is returned(unless that value is already a tuple).

Returns:

Type Description
None :class:torch.utils.hooks.RemovableHandle:
a handle that can be used to remove the added hook by calling
handle.remove()
View Source
    def register_forward_pre_hook(self, hook: Callable[..., None]) -> RemovableHandle:

        r"""Registers a forward pre-hook on the module.

        The hook will be called every time before :func:`forward` is invoked.

        It should have the following signature::

            hook(module, input) -> None or modified input

        The input contains only the positional arguments given to the module.

        Keyword arguments won't be passed to the hooks and only to the ``forward``.

        The hook can modify the input. User can either return a tuple or a

        single modified value in the hook. We will wrap the value into a tuple

        if a single value is returned(unless that value is already a tuple).

        Returns:

            :class:`torch.utils.hooks.RemovableHandle`:

                a handle that can be used to remove the added hook by calling

                ``handle.remove()``

        """

        handle = hooks.RemovableHandle(self._forward_pre_hooks)

        self._forward_pre_hooks[handle.id] = hook

        return handle

register_parameter

def register_parameter(
    self,
    name: str,
    param: torch.nn.parameter.Parameter
) -> None

Adds a parameter to the module.

The parameter can be accessed as an attribute using given name.

Parameters:

Name Type Description Default
name string name of the parameter. The parameter can be accessed
from this module using the given name None
param Parameter parameter to be added to the module. None
View Source
    def register_parameter(self, name: str, param: Parameter) -> None:

        r"""Adds a parameter to the module.

        The parameter can be accessed as an attribute using given name.

        Args:

            name (string): name of the parameter. The parameter can be accessed

                from this module using the given name

            param (Parameter): parameter to be added to the module.

        """

        if '_parameters' not in self.__dict__:

            raise AttributeError(

                "cannot assign parameter before Module.__init__() call")

        elif not isinstance(name, torch._six.string_classes):

            raise TypeError("parameter name should be a string. "

                            "Got {}".format(torch.typename(name)))

        elif '.' in name:

            raise KeyError("parameter name can't contain \".\"")

        elif name == '':

            raise KeyError("parameter name can't be empty string \"\"")

        elif hasattr(self, name) and name not in self._parameters:

            raise KeyError("attribute '{}' already exists".format(name))

        if param is None:

            self._parameters[name] = None

        elif not isinstance(param, Parameter):

            raise TypeError("cannot assign '{}' object to parameter '{}' "

                            "(torch.nn.Parameter or None required)"

                            .format(torch.typename(param), name))

        elif param.grad_fn:

            raise ValueError(

                "Cannot assign non-leaf Tensor to parameter '{0}'. Model "

                "parameters must be created explicitly. To express '{0}' "

                "as a function of another Tensor, compute the value in "

                "the forward() method.".format(name))

        else:

            self._parameters[name] = param

requires_grad_

def requires_grad_(
    self: ~T,
    requires_grad: bool = True
) -> ~T

Change if autograd should record operations on parameters in this

module.

This method sets the parameters' :attr:requires_grad attributes in-place.

This method is helpful for freezing part of the module for finetuning or training parts of a model individually (e.g., GAN training).

Parameters:

Name Type Description Default
requires_grad bool whether autograd should record operations on
parameters in this module. Default: True. None

Returns:

Type Description
Module self
View Source
    def requires_grad_(self: T, requires_grad: bool = True) -> T:

        r"""Change if autograd should record operations on parameters in this

        module.

        This method sets the parameters' :attr:`requires_grad` attributes

        in-place.

        This method is helpful for freezing part of the module for finetuning

        or training parts of a model individually (e.g., GAN training).

        Args:

            requires_grad (bool): whether autograd should record operations on

                                  parameters in this module. Default: ``True``.

        Returns:

            Module: self

        """

        for p in self.parameters():

            p.requires_grad_(requires_grad)

        return self

share_memory

def share_memory(
    self: ~T
) -> ~T
View Source
    def share_memory(self: T) -> T:

        return self._apply(lambda t: t.share_memory_())

state_dict

def state_dict(
    self,
    destination=None,
    prefix='',
    keep_vars=False
)

Returns a dictionary containing a whole state of the module.

Both parameters and persistent buffers (e.g. running averages) are included. Keys are corresponding parameter and buffer names.

Returns:

Type Description
dict a dictionary containing a whole state of the module

Example::

>>> module.state_dict().keys()
['bias', 'weight'] |
View Source
    def state_dict(self, destination=None, prefix='', keep_vars=False):

        r"""Returns a dictionary containing a whole state of the module.

        Both parameters and persistent buffers (e.g. running averages) are

        included. Keys are corresponding parameter and buffer names.

        Returns:

            dict:

                a dictionary containing a whole state of the module

        Example::

            >>> module.state_dict().keys()

            ['bias', 'weight']

        """

        if destination is None:

            destination = OrderedDict()

            destination._metadata = OrderedDict()

        destination._metadata[prefix[:-1]] = local_metadata = dict(version=self._version)

        self._save_to_state_dict(destination, prefix, keep_vars)

        for name, module in self._modules.items():

            if module is not None:

                module.state_dict(destination, prefix + name + '.', keep_vars=keep_vars)

        for hook in self._state_dict_hooks.values():

            hook_result = hook(self, destination, prefix, local_metadata)

            if hook_result is not None:

                destination = hook_result

        return destination

to

def to(
    self,
    *args,
    **kwargs
)

Moves and/or casts the parameters and buffers.

This can be called as

.. function:: to(device=None, dtype=None, non_blocking=False)

.. function:: to(dtype, non_blocking=False)

.. function:: to(tensor, non_blocking=False)

.. function:: to(memory_format=torch.channels_last)

Its signature is similar to :meth:torch.Tensor.to, but only accepts floating point desired :attr:dtype s. In addition, this method will only cast the floating point parameters and buffers to :attr:dtype (if given). The integral parameters and buffers will be moved :attr:device, if that is given, but with dtypes unchanged. When :attr:non_blocking is set, it tries to convert/move asynchronously with respect to the host if possible, e.g., moving CPU Tensors with pinned memory to CUDA devices.

See below for examples.

.. note:: This method modifies the module in-place.

Parameters:

Name Type Description Default
device ( None class:torch.device): the desired device of the parameters
and buffers in this module None
dtype ( None class:torch.dtype): the desired floating point type of
the floating point parameters and buffers in this module None
tensor torch.Tensor Tensor whose dtype and device are the desired
dtype and device for all parameters and buffers in this module None
memory_format ( None class:torch.memory_format): the desired memory
format for 4D parameters and buffers in this module (keyword
only argument) None

Returns:

Type Description
Module self
Example::
>>> linear = nn.Linear(2, 2)
>>> linear.weight
Parameter containing:
tensor([[ 0.1913, -0.3420],
        [-0.5113, -0.2325]])
>>> linear.to(torch.double)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1913, -0.3420],
        [-0.5113, -0.2325]], dtype=torch.float64)
>>> gpu1 = torch.device("cuda:1")
>>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1914, -0.3420],
        [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
>>> cpu = torch.device("cpu")
>>> linear.to(cpu)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1914, -0.3420],
        [-0.5112, -0.2324]], dtype=torch.float16) |
View Source
    def to(self, *args, **kwargs):

        r"""Moves and/or casts the parameters and buffers.

        This can be called as

        .. function:: to(device=None, dtype=None, non_blocking=False)

        .. function:: to(dtype, non_blocking=False)

        .. function:: to(tensor, non_blocking=False)

        .. function:: to(memory_format=torch.channels_last)

        Its signature is similar to :meth:`torch.Tensor.to`, but only accepts

        floating point desired :attr:`dtype` s. In addition, this method will

        only cast the floating point parameters and buffers to :attr:`dtype`

        (if given). The integral parameters and buffers will be moved

        :attr:`device`, if that is given, but with dtypes unchanged. When

        :attr:`non_blocking` is set, it tries to convert/move asynchronously

        with respect to the host if possible, e.g., moving CPU Tensors with

        pinned memory to CUDA devices.

        See below for examples.

        .. note::

            This method modifies the module in-place.

        Args:

            device (:class:`torch.device`): the desired device of the parameters

                and buffers in this module

            dtype (:class:`torch.dtype`): the desired floating point type of

                the floating point parameters and buffers in this module

            tensor (torch.Tensor): Tensor whose dtype and device are the desired

                dtype and device for all parameters and buffers in this module

            memory_format (:class:`torch.memory_format`): the desired memory

                format for 4D parameters and buffers in this module (keyword

                only argument)

        Returns:

            Module: self

        Example::

            >>> linear = nn.Linear(2, 2)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1913, -0.3420],

                    [-0.5113, -0.2325]])

            >>> linear.to(torch.double)

            Linear(in_features=2, out_features=2, bias=True)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1913, -0.3420],

                    [-0.5113, -0.2325]], dtype=torch.float64)

            >>> gpu1 = torch.device("cuda:1")

            >>> linear.to(gpu1, dtype=torch.half, non_blocking=True)

            Linear(in_features=2, out_features=2, bias=True)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1914, -0.3420],

                    [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')

            >>> cpu = torch.device("cpu")

            >>> linear.to(cpu)

            Linear(in_features=2, out_features=2, bias=True)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1914, -0.3420],

                    [-0.5112, -0.2324]], dtype=torch.float16)

        """

        device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)

        if dtype is not None:

            if not dtype.is_floating_point:

                raise TypeError('nn.Module.to only accepts floating point '

                                'dtypes, but got desired dtype={}'.format(dtype))

        def convert(t):

            if convert_to_format is not None and t.dim() == 4:

                return t.to(device, dtype if t.is_floating_point() else None, non_blocking, memory_format=convert_to_format)

            return t.to(device, dtype if t.is_floating_point() else None, non_blocking)

        return self._apply(convert)

train

def train(
    self: ~T,
    mode: bool = True
) -> ~T

Sets the module in training mode.

This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:Dropout, :class:BatchNorm, etc.

Parameters:

Name Type Description Default
mode bool whether to set training mode (True) or evaluation
mode (False). Default: True. None

Returns:

Type Description
Module self
View Source
    def train(self: T, mode: bool = True) -> T:

        r"""Sets the module in training mode.

        This has any effect only on certain modules. See documentations of

        particular modules for details of their behaviors in training/evaluation

        mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,

        etc.

        Args:

            mode (bool): whether to set training mode (``True``) or evaluation

                         mode (``False``). Default: ``True``.

        Returns:

            Module: self

        """

        self.training = mode

        for module in self.children():

            module.train(mode)

        return self

type

def type(
    self: ~T,
    dst_type: Union[torch.dtype, str]
) -> ~T

Casts all parameters and buffers to :attr:dst_type.

Parameters:

Name Type Description Default
dst_type type or string the desired type None

Returns:

Type Description
Module self
View Source
    def type(self: T, dst_type: Union[dtype, str]) -> T:

        r"""Casts all parameters and buffers to :attr:`dst_type`.

        Arguments:

            dst_type (type or string): the desired type

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.type(dst_type))

zero_grad

def zero_grad(
    self
) -> None

Sets gradients of all model parameters to zero.

View Source
    def zero_grad(self) -> None:

        r"""Sets gradients of all model parameters to zero."""

        if getattr(self, '_is_replica', False):

            warnings.warn(

                "Calling .zero_grad() from a module created with nn.DataParallel() has no effect. "

                "The parameters are copied (in a differentiable manner) from the original module. "

                "This means they are not leaf nodes in autograd and so don't accumulate gradients. "

                "If you need gradients in your forward method, consider using autograd.grad instead.")

        for p in self.parameters():

            if p.grad is not None:

                p.grad.detach_()

                p.grad.zero_()

CustomTwoMLPHead

class CustomTwoMLPHead(
    in_channels: int,
    representation_size: int
)

Ancestors (in MRO)

  • torch.nn.modules.module.Module

Class variables

T_destination
dump_patches

Methods

add_module

def add_module(
    self,
    name: str,
    module: 'Module'
) -> None

Adds a child module to the current module.

The module can be accessed as an attribute using the given name.

Parameters:

Name Type Description Default
name string name of the child module. The child module can be
accessed from this module using the given name None
module Module child module to be added to the module. None
View Source
    def add_module(self, name: str, module: 'Module') -> None:

        r"""Adds a child module to the current module.

        The module can be accessed as an attribute using the given name.

        Args:

            name (string): name of the child module. The child module can be

                accessed from this module using the given name

            module (Module): child module to be added to the module.

        """

        if not isinstance(module, Module) and module is not None:

            raise TypeError("{} is not a Module subclass".format(

                torch.typename(module)))

        elif not isinstance(name, torch._six.string_classes):

            raise TypeError("module name should be a string. Got {}".format(

                torch.typename(name)))

        elif hasattr(self, name) and name not in self._modules:

            raise KeyError("attribute '{}' already exists".format(name))

        elif '.' in name:

            raise KeyError("module name can't contain \".\"")

        elif name == '':

            raise KeyError("module name can't be empty string \"\"")

        self._modules[name] = module

apply

def apply(
    self: ~T,
    fn: Callable[[ForwardRef('Module')], NoneType]
) -> ~T

Applies fn recursively to every submodule (as returned by .children())

as well as self. Typical use includes initializing the parameters of a model (see also :ref:nn-init-doc).

Parameters:

Name Type Description Default
fn ( None class:Module -> None): function to be applied to each submodule None

Returns:

Type Description
Module self
Example::
>>> @torch.no_grad()
>>> def init_weights(m):
>>>     print(m)
>>>     if type(m) == nn.Linear:
>>>         m.weight.fill_(1.0)
>>>         print(m.weight)
>>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
>>> net.apply(init_weights)
Linear(in_features=2, out_features=2, bias=True)
Parameter containing:
tensor([[ 1.,  1.],
        [ 1.,  1.]])
Linear(in_features=2, out_features=2, bias=True)
Parameter containing:
tensor([[ 1.,  1.],
        [ 1.,  1.]])
Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
)
Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
) |
View Source
    def apply(self: T, fn: Callable[['Module'], None]) -> T:

        r"""Applies ``fn`` recursively to every submodule (as returned by ``.children()``)

        as well as self. Typical use includes initializing the parameters of a model

        (see also :ref:`nn-init-doc`).

        Args:

            fn (:class:`Module` -> None): function to be applied to each submodule

        Returns:

            Module: self

        Example::

            >>> @torch.no_grad()

            >>> def init_weights(m):

            >>>     print(m)

            >>>     if type(m) == nn.Linear:

            >>>         m.weight.fill_(1.0)

            >>>         print(m.weight)

            >>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))

            >>> net.apply(init_weights)

            Linear(in_features=2, out_features=2, bias=True)

            Parameter containing:

            tensor([[ 1.,  1.],

                    [ 1.,  1.]])

            Linear(in_features=2, out_features=2, bias=True)

            Parameter containing:

            tensor([[ 1.,  1.],

                    [ 1.,  1.]])

            Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            )

            Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            )

        """

        for module in self.children():

            module.apply(fn)

        fn(self)

        return self

bfloat16

def bfloat16(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to bfloat16 datatype.

Returns:

Type Description
Module self
View Source
    def bfloat16(self: T) -> T:

        r"""Casts all floating point parameters and buffers to ``bfloat16`` datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.bfloat16() if t.is_floating_point() else t)

buffers

def buffers(
    self,
    recurse: bool = True
) -> Iterator[torch.Tensor]

Returns an iterator over module buffers.

Parameters:

Name Type Description Default
recurse bool if True, then yields buffers of this module
and all submodules. Otherwise, yields only buffers that
are direct members of this module. None

Yields:

Type Description
torch.Tensor module buffer
Example::
>>> for buf in model.buffers():
>>>     print(type(buf), buf.size())
<class 'torch.Tensor'> (20L,)
<class 'torch.Tensor'> (20L, 1L, 5L, 5L) |
View Source
    def buffers(self, recurse: bool = True) -> Iterator[Tensor]:

        r"""Returns an iterator over module buffers.

        Args:

            recurse (bool): if True, then yields buffers of this module

                and all submodules. Otherwise, yields only buffers that

                are direct members of this module.

        Yields:

            torch.Tensor: module buffer

        Example::

            >>> for buf in model.buffers():

            >>>     print(type(buf), buf.size())

            <class 'torch.Tensor'> (20L,)

            <class 'torch.Tensor'> (20L, 1L, 5L, 5L)

        """

        for name, buf in self.named_buffers(recurse=recurse):

            yield buf

children

def children(
    self
) -> Iterator[ForwardRef('Module')]

Returns an iterator over immediate children modules.

Yields:

Type Description
Module a child module
View Source
    def children(self) -> Iterator['Module']:

        r"""Returns an iterator over immediate children modules.

        Yields:

            Module: a child module

        """

        for name, module in self.named_children():

            yield module

cpu

def cpu(
    self: ~T
) -> ~T

Moves all model parameters and buffers to the CPU.

Returns:

Type Description
Module self
View Source
    def cpu(self: T) -> T:

        r"""Moves all model parameters and buffers to the CPU.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.cpu())

cuda

def cuda(
    self: ~T,
    device: Union[int, torch.device, NoneType] = None
) -> ~T

Moves all model parameters and buffers to the GPU.

This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized.

Parameters:

Name Type Description Default
device int if specified, all parameters will be
copied to that device None

Returns:

Type Description
Module self
View Source
    def cuda(self: T, device: Optional[Union[int, device]] = None) -> T:

        r"""Moves all model parameters and buffers to the GPU.

        This also makes associated parameters and buffers different objects. So

        it should be called before constructing optimizer if the module will

        live on GPU while being optimized.

        Arguments:

            device (int, optional): if specified, all parameters will be

                copied to that device

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.cuda(device))

double

def double(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to double datatype.

Returns:

Type Description
Module self
View Source
    def double(self: T) -> T:

        r"""Casts all floating point parameters and buffers to ``double`` datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.double() if t.is_floating_point() else t)

eval

def eval(
    self: ~T
) -> ~T

Sets the module in evaluation mode.

This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:Dropout, :class:BatchNorm, etc.

This is equivalent with :meth:self.train(False) <torch.nn.Module.train>.

Returns:

Type Description
Module self
View Source
    def eval(self: T) -> T:

        r"""Sets the module in evaluation mode.

        This has any effect only on certain modules. See documentations of

        particular modules for details of their behaviors in training/evaluation

        mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,

        etc.

        This is equivalent with :meth:`self.train(False) <torch.nn.Module.train>`.

        Returns:

            Module: self

        """

        return self.train(False)

extra_repr

def extra_repr(
    self
) -> str

Set the extra representation of the module

To print customized extra information, you should reimplement this method in your own modules. Both single-line and multi-line strings are acceptable.

View Source
    def extra_repr(self) -> str:

        r"""Set the extra representation of the module

        To print customized extra information, you should reimplement

        this method in your own modules. Both single-line and multi-line

        strings are acceptable.

        """

        return ''

float

def float(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to float datatype.

Returns:

Type Description
Module self
View Source
    def float(self: T) -> T:

        r"""Casts all floating point parameters and buffers to float datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.float() if t.is_floating_point() else t)

forward

def forward(
    self,
    x
)
View Source
    def forward(self, x):

        x = self.avgpool(x)

        x = x.flatten(start_dim=1)

        x = self.mlp(x)

        return x

half

def half(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to half datatype.

Returns:

Type Description
Module self
View Source
    def half(self: T) -> T:

        r"""Casts all floating point parameters and buffers to ``half`` datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.half() if t.is_floating_point() else t)

load_state_dict

def load_state_dict(
    self,
    state_dict: Dict[str, torch.Tensor],
    strict: bool = True
)

Copies parameters and buffers from :attr:state_dict into

this module and its descendants. If :attr:strict is True, then the keys of :attr:state_dict must exactly match the keys returned by this module's :meth:~torch.nn.Module.state_dict function.

Parameters:

Name Type Description Default
state_dict dict a dict containing parameters and
persistent buffers. None
strict bool whether to strictly enforce that the keys
in :attr:state_dict match the keys returned by this module's
:meth:~torch.nn.Module.state_dict function. Default: True None

Returns:

Type Description
None NamedTuple with missing_keys and unexpected_keys fields:
* missing_keys is a list of str containing the missing keys
* unexpected_keys is a list of str containing the unexpected keys
View Source
    def load_state_dict(self, state_dict: Union[Dict[str, Tensor], Dict[str, Tensor]],

                        strict: bool = True):

        r"""Copies parameters and buffers from :attr:`state_dict` into

        this module and its descendants. If :attr:`strict` is ``True``, then

        the keys of :attr:`state_dict` must exactly match the keys returned

        by this module's :meth:`~torch.nn.Module.state_dict` function.

        Arguments:

            state_dict (dict): a dict containing parameters and

                persistent buffers.

            strict (bool, optional): whether to strictly enforce that the keys

                in :attr:`state_dict` match the keys returned by this module's

                :meth:`~torch.nn.Module.state_dict` function. Default: ``True``

        Returns:

            ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields:

                * **missing_keys** is a list of str containing the missing keys

                * **unexpected_keys** is a list of str containing the unexpected keys

        """

        missing_keys = []

        unexpected_keys = []

        error_msgs = []

        # copy state_dict so _load_from_state_dict can modify it

        metadata = getattr(state_dict, '_metadata', None)

        state_dict = state_dict.copy()

        if metadata is not None:

            state_dict._metadata = metadata

        def load(module, prefix=''):

            local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})

            module._load_from_state_dict(

                state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)

            for name, child in module._modules.items():

                if child is not None:

                    load(child, prefix + name + '.')

        load(self)

        load = None  # break load->load reference cycle

        if strict:

            if len(unexpected_keys) > 0:

                error_msgs.insert(

                    0, 'Unexpected key(s) in state_dict: {}. '.format(

                        ', '.join('"{}"'.format(k) for k in unexpected_keys)))

            if len(missing_keys) > 0:

                error_msgs.insert(

                    0, 'Missing key(s) in state_dict: {}. '.format(

                        ', '.join('"{}"'.format(k) for k in missing_keys)))

        if len(error_msgs) > 0:

            raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(

                               self.__class__.__name__, "\n\t".join(error_msgs)))

        return _IncompatibleKeys(missing_keys, unexpected_keys)

modules

def modules(
    self
) -> Iterator[ForwardRef('Module')]

Returns an iterator over all modules in the network.

Yields:

Type Description
Module a module in the network
Note:
Duplicate modules are returned only once. In the following
example, l will be returned only once.

Example::

>>> l = nn.Linear(2, 2)
>>> net = nn.Sequential(l, l)
>>> for idx, m in enumerate(net.modules()):
        print(idx, '->', m)

0 -> Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
)
1 -> Linear(in_features=2, out_features=2, bias=True) |
View Source
    def modules(self) -> Iterator['Module']:

        r"""Returns an iterator over all modules in the network.

        Yields:

            Module: a module in the network

        Note:

            Duplicate modules are returned only once. In the following

            example, ``l`` will be returned only once.

        Example::

            >>> l = nn.Linear(2, 2)

            >>> net = nn.Sequential(l, l)

            >>> for idx, m in enumerate(net.modules()):

                    print(idx, '->', m)

            0 -> Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            )

            1 -> Linear(in_features=2, out_features=2, bias=True)

        """

        for name, module in self.named_modules():

            yield module

named_buffers

def named_buffers(
    self,
    prefix: str = '',
    recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]

Returns an iterator over module buffers, yielding both the

name of the buffer as well as the buffer itself.

Parameters:

Name Type Description Default
prefix str prefix to prepend to all buffer names. None
recurse bool if True, then yields buffers of this module
and all submodules. Otherwise, yields only buffers that
are direct members of this module. None

Yields:

Type Description
None (string, torch.Tensor): Tuple containing the name and buffer

Example::

>>> for name, buf in self.named_buffers():
>>>    if name in ['running_var']:
>>>        print(buf.size()) |
View Source
    def named_buffers(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, Tensor]]:

        r"""Returns an iterator over module buffers, yielding both the

        name of the buffer as well as the buffer itself.

        Args:

            prefix (str): prefix to prepend to all buffer names.

            recurse (bool): if True, then yields buffers of this module

                and all submodules. Otherwise, yields only buffers that

                are direct members of this module.

        Yields:

            (string, torch.Tensor): Tuple containing the name and buffer

        Example::

            >>> for name, buf in self.named_buffers():

            >>>    if name in ['running_var']:

            >>>        print(buf.size())

        """

        gen = self._named_members(

            lambda module: module._buffers.items(),

            prefix=prefix, recurse=recurse)

        for elem in gen:

            yield elem

named_children

def named_children(
    self
) -> Iterator[Tuple[str, ForwardRef('Module')]]

Returns an iterator over immediate children modules, yielding both

the name of the module as well as the module itself.

Yields:

Type Description
None (string, Module): Tuple containing a name and child module

Example::

>>> for name, module in model.named_children():
>>>     if name in ['conv4', 'conv5']:
>>>         print(module) |
View Source
    def named_children(self) -> Iterator[Tuple[str, 'Module']]:

        r"""Returns an iterator over immediate children modules, yielding both

        the name of the module as well as the module itself.

        Yields:

            (string, Module): Tuple containing a name and child module

        Example::

            >>> for name, module in model.named_children():

            >>>     if name in ['conv4', 'conv5']:

            >>>         print(module)

        """

        memo = set()

        for name, module in self._modules.items():

            if module is not None and module not in memo:

                memo.add(module)

                yield name, module

named_modules

def named_modules(
    self,
    memo: Union[Set[ForwardRef('Module')], NoneType] = None,
    prefix: str = ''
)

Returns an iterator over all modules in the network, yielding

both the name of the module as well as the module itself.

Yields:

Type Description
None (string, Module): Tuple of name and module

Note: Duplicate modules are returned only once. In the following example, l will be returned only once.

Example::

>>> l = nn.Linear(2, 2)
>>> net = nn.Sequential(l, l)
>>> for idx, m in enumerate(net.named_modules()):
        print(idx, '->', m)

0 -> ('', Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
))
1 -> ('0', Linear(in_features=2, out_features=2, bias=True)) |
View Source
    def named_modules(self, memo: Optional[Set['Module']] = None, prefix: str = ''):

        r"""Returns an iterator over all modules in the network, yielding

        both the name of the module as well as the module itself.

        Yields:

            (string, Module): Tuple of name and module

        Note:

            Duplicate modules are returned only once. In the following

            example, ``l`` will be returned only once.

        Example::

            >>> l = nn.Linear(2, 2)

            >>> net = nn.Sequential(l, l)

            >>> for idx, m in enumerate(net.named_modules()):

                    print(idx, '->', m)

            0 -> ('', Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            ))

            1 -> ('0', Linear(in_features=2, out_features=2, bias=True))

        """

        if memo is None:

            memo = set()

        if self not in memo:

            memo.add(self)

            yield prefix, self

            for name, module in self._modules.items():

                if module is None:

                    continue

                submodule_prefix = prefix + ('.' if prefix else '') + name

                for m in module.named_modules(memo, submodule_prefix):

                    yield m

named_parameters

def named_parameters(
    self,
    prefix: str = '',
    recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]

Returns an iterator over module parameters, yielding both the

name of the parameter as well as the parameter itself.

Parameters:

Name Type Description Default
prefix str prefix to prepend to all parameter names. None
recurse bool if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module. None

Yields:

Type Description
None (string, Parameter): Tuple containing the name and parameter

Example::

>>> for name, param in self.named_parameters():
>>>    if name in ['bias']:
>>>        print(param.size()) |
View Source
    def named_parameters(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, Tensor]]:

        r"""Returns an iterator over module parameters, yielding both the

        name of the parameter as well as the parameter itself.

        Args:

            prefix (str): prefix to prepend to all parameter names.

            recurse (bool): if True, then yields parameters of this module

                and all submodules. Otherwise, yields only parameters that

                are direct members of this module.

        Yields:

            (string, Parameter): Tuple containing the name and parameter

        Example::

            >>> for name, param in self.named_parameters():

            >>>    if name in ['bias']:

            >>>        print(param.size())

        """

        gen = self._named_members(

            lambda module: module._parameters.items(),

            prefix=prefix, recurse=recurse)

        for elem in gen:

            yield elem

parameters

def parameters(
    self,
    recurse: bool = True
) -> Iterator[torch.nn.parameter.Parameter]

Returns an iterator over module parameters.

This is typically passed to an optimizer.

Parameters:

Name Type Description Default
recurse bool if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module. None

Yields:

Type Description
Parameter module parameter
Example::
>>> for param in model.parameters():
>>>     print(type(param), param.size())
<class 'torch.Tensor'> (20L,)
<class 'torch.Tensor'> (20L, 1L, 5L, 5L) |
View Source
    def parameters(self, recurse: bool = True) -> Iterator[Parameter]:

        r"""Returns an iterator over module parameters.

        This is typically passed to an optimizer.

        Args:

            recurse (bool): if True, then yields parameters of this module

                and all submodules. Otherwise, yields only parameters that

                are direct members of this module.

        Yields:

            Parameter: module parameter

        Example::

            >>> for param in model.parameters():

            >>>     print(type(param), param.size())

            <class 'torch.Tensor'> (20L,)

            <class 'torch.Tensor'> (20L, 1L, 5L, 5L)

        """

        for name, param in self.named_parameters(recurse=recurse):

            yield param

register_backward_hook

def register_backward_hook(
    self,
    hook: Callable[[ForwardRef('Module'), Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[NoneType, torch.Tensor]]
) -> torch.utils.hooks.RemovableHandle

Registers a backward hook on the module.

.. warning ::

The current implementation will not have the presented behavior
for complex :class:`Module` that perform many operations.
In some failure cases, :attr:`grad_input` and :attr:`grad_output` will only
contain the gradients for a subset of the inputs and outputs.
For such :class:`Module`, you should use :func:`torch.Tensor.register_hook`
directly on a specific input or output to get the required gradients.

The hook will be called every time the gradients with respect to module inputs are computed. The hook should have the following signature::

hook(module, grad_input, grad_output) -> Tensor or None

The :attr:grad_input and :attr:grad_output may be tuples if the module has multiple inputs or outputs. The hook should not modify its arguments, but it can optionally return a new gradient with respect to input that will be used in place of :attr:grad_input in subsequent computations. :attr:grad_input will only correspond to the inputs given as positional arguments.

Returns:

Type Description
None :class:torch.utils.hooks.RemovableHandle:
a handle that can be used to remove the added hook by calling
handle.remove()
View Source
    def register_backward_hook(

        self, hook: Callable[['Module', _grad_t, _grad_t], Union[None, Tensor]]

    ) -> RemovableHandle:

        r"""Registers a backward hook on the module.

        .. warning ::

            The current implementation will not have the presented behavior

            for complex :class:`Module` that perform many operations.

            In some failure cases, :attr:`grad_input` and :attr:`grad_output` will only

            contain the gradients for a subset of the inputs and outputs.

            For such :class:`Module`, you should use :func:`torch.Tensor.register_hook`

            directly on a specific input or output to get the required gradients.

        The hook will be called every time the gradients with respect to module

        inputs are computed. The hook should have the following signature::

            hook(module, grad_input, grad_output) -> Tensor or None

        The :attr:`grad_input` and :attr:`grad_output` may be tuples if the

        module has multiple inputs or outputs. The hook should not modify its

        arguments, but it can optionally return a new gradient with respect to

        input that will be used in place of :attr:`grad_input` in subsequent

        computations. :attr:`grad_input` will only correspond to the inputs given

        as positional arguments.

        Returns:

            :class:`torch.utils.hooks.RemovableHandle`:

                a handle that can be used to remove the added hook by calling

                ``handle.remove()``

        """

        handle = hooks.RemovableHandle(self._backward_hooks)

        self._backward_hooks[handle.id] = hook

        return handle

register_buffer

def register_buffer(
    self,
    name: str,
    tensor: torch.Tensor,
    persistent: bool = True
) -> None

Adds a buffer to the module.

This is typically used to register a buffer that should not to be considered a model parameter. For example, BatchNorm's running_mean is not a parameter, but is part of the module's state. Buffers, by default, are persistent and will be saved alongside parameters. This behavior can be changed by setting :attr:persistent to False. The only difference between a persistent buffer and a non-persistent buffer is that the latter will not be a part of this module's :attr:state_dict.

Buffers can be accessed as attributes using given names.

Args: name (string): name of the buffer. The buffer can be accessed from this module using the given name tensor (Tensor): buffer to be registered. persistent (bool): whether the buffer is part of this module's :attr:state_dict.

Example::

>>> self.register_buffer('running_mean', torch.zeros(num_features))
View Source
    def register_buffer(self, name: str, tensor: Tensor, persistent: bool = True) -> None:

        r"""Adds a buffer to the module.

        This is typically used to register a buffer that should not to be

        considered a model parameter. For example, BatchNorm's ``running_mean``

        is not a parameter, but is part of the module's state. Buffers, by

        default, are persistent and will be saved alongside parameters. This

        behavior can be changed by setting :attr:`persistent` to ``False``. The

        only difference between a persistent buffer and a non-persistent buffer

        is that the latter will not be a part of this module's

        :attr:`state_dict`.

        Buffers can be accessed as attributes using given names.

        Args:

            name (string): name of the buffer. The buffer can be accessed

                from this module using the given name

            tensor (Tensor): buffer to be registered.

            persistent (bool): whether the buffer is part of this module's

                :attr:`state_dict`.

        Example::

            >>> self.register_buffer('running_mean', torch.zeros(num_features))

        """

        if persistent is False and isinstance(self, torch.jit.ScriptModule):

            raise RuntimeError("ScriptModule does not support non-persistent buffers")

        if '_buffers' not in self.__dict__:

            raise AttributeError(

                "cannot assign buffer before Module.__init__() call")

        elif not isinstance(name, torch._six.string_classes):

            raise TypeError("buffer name should be a string. "

                            "Got {}".format(torch.typename(name)))

        elif '.' in name:

            raise KeyError("buffer name can't contain \".\"")

        elif name == '':

            raise KeyError("buffer name can't be empty string \"\"")

        elif hasattr(self, name) and name not in self._buffers:

            raise KeyError("attribute '{}' already exists".format(name))

        elif tensor is not None and not isinstance(tensor, torch.Tensor):

            raise TypeError("cannot assign '{}' object to buffer '{}' "

                            "(torch Tensor or None required)"

                            .format(torch.typename(tensor), name))

        else:

            self._buffers[name] = tensor

            if persistent:

                self._non_persistent_buffers_set.discard(name)

            else:

                self._non_persistent_buffers_set.add(name)

register_forward_hook

def register_forward_hook(
    self,
    hook: Callable[..., NoneType]
) -> torch.utils.hooks.RemovableHandle

Registers a forward hook on the module.

The hook will be called every time after :func:forward has computed an output. It should have the following signature::

hook(module, input, output) -> None or modified output

The input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the forward. The hook can modify the output. It can modify the input inplace but it will not have effect on forward since this is called after

View Source
    def register_forward_hook(self, hook: Callable[..., None]) -> RemovableHandle:

        r"""Registers a forward hook on the module.

        The hook will be called every time after :func:`forward` has computed an output.

        It should have the following signature::

            hook(module, input, output) -> None or modified output

        The input contains only the positional arguments given to the module.

        Keyword arguments won't be passed to the hooks and only to the ``forward``.

        The hook can modify the output. It can modify the input inplace but

        it will not have effect on forward since this is called after

        :func:`forward` is called.

        Returns:

            :class:`torch.utils.hooks.RemovableHandle`:

                a handle that can be used to remove the added hook by calling

                ``handle.remove()``

        """

        handle = hooks.RemovableHandle(self._forward_hooks)

        self._forward_hooks[handle.id] = hook

        return handle

register_forward_pre_hook

def register_forward_pre_hook(
    self,
    hook: Callable[..., NoneType]
) -> torch.utils.hooks.RemovableHandle

Registers a forward pre-hook on the module.

The hook will be called every time before :func:forward is invoked. It should have the following signature::

hook(module, input) -> None or modified input

The input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the forward. The hook can modify the input. User can either return a tuple or a single modified value in the hook. We will wrap the value into a tuple if a single value is returned(unless that value is already a tuple).

Returns:

Type Description
None :class:torch.utils.hooks.RemovableHandle:
a handle that can be used to remove the added hook by calling
handle.remove()
View Source
    def register_forward_pre_hook(self, hook: Callable[..., None]) -> RemovableHandle:

        r"""Registers a forward pre-hook on the module.

        The hook will be called every time before :func:`forward` is invoked.

        It should have the following signature::

            hook(module, input) -> None or modified input

        The input contains only the positional arguments given to the module.

        Keyword arguments won't be passed to the hooks and only to the ``forward``.

        The hook can modify the input. User can either return a tuple or a

        single modified value in the hook. We will wrap the value into a tuple

        if a single value is returned(unless that value is already a tuple).

        Returns:

            :class:`torch.utils.hooks.RemovableHandle`:

                a handle that can be used to remove the added hook by calling

                ``handle.remove()``

        """

        handle = hooks.RemovableHandle(self._forward_pre_hooks)

        self._forward_pre_hooks[handle.id] = hook

        return handle

register_parameter

def register_parameter(
    self,
    name: str,
    param: torch.nn.parameter.Parameter
) -> None

Adds a parameter to the module.

The parameter can be accessed as an attribute using given name.

Parameters:

Name Type Description Default
name string name of the parameter. The parameter can be accessed
from this module using the given name None
param Parameter parameter to be added to the module. None
View Source
    def register_parameter(self, name: str, param: Parameter) -> None:

        r"""Adds a parameter to the module.

        The parameter can be accessed as an attribute using given name.

        Args:

            name (string): name of the parameter. The parameter can be accessed

                from this module using the given name

            param (Parameter): parameter to be added to the module.

        """

        if '_parameters' not in self.__dict__:

            raise AttributeError(

                "cannot assign parameter before Module.__init__() call")

        elif not isinstance(name, torch._six.string_classes):

            raise TypeError("parameter name should be a string. "

                            "Got {}".format(torch.typename(name)))

        elif '.' in name:

            raise KeyError("parameter name can't contain \".\"")

        elif name == '':

            raise KeyError("parameter name can't be empty string \"\"")

        elif hasattr(self, name) and name not in self._parameters:

            raise KeyError("attribute '{}' already exists".format(name))

        if param is None:

            self._parameters[name] = None

        elif not isinstance(param, Parameter):

            raise TypeError("cannot assign '{}' object to parameter '{}' "

                            "(torch.nn.Parameter or None required)"

                            .format(torch.typename(param), name))

        elif param.grad_fn:

            raise ValueError(

                "Cannot assign non-leaf Tensor to parameter '{0}'. Model "

                "parameters must be created explicitly. To express '{0}' "

                "as a function of another Tensor, compute the value in "

                "the forward() method.".format(name))

        else:

            self._parameters[name] = param

requires_grad_

def requires_grad_(
    self: ~T,
    requires_grad: bool = True
) -> ~T

Change if autograd should record operations on parameters in this

module.

This method sets the parameters' :attr:requires_grad attributes in-place.

This method is helpful for freezing part of the module for finetuning or training parts of a model individually (e.g., GAN training).

Parameters:

Name Type Description Default
requires_grad bool whether autograd should record operations on
parameters in this module. Default: True. None

Returns:

Type Description
Module self
View Source
    def requires_grad_(self: T, requires_grad: bool = True) -> T:

        r"""Change if autograd should record operations on parameters in this

        module.

        This method sets the parameters' :attr:`requires_grad` attributes

        in-place.

        This method is helpful for freezing part of the module for finetuning

        or training parts of a model individually (e.g., GAN training).

        Args:

            requires_grad (bool): whether autograd should record operations on

                                  parameters in this module. Default: ``True``.

        Returns:

            Module: self

        """

        for p in self.parameters():

            p.requires_grad_(requires_grad)

        return self

share_memory

def share_memory(
    self: ~T
) -> ~T
View Source
    def share_memory(self: T) -> T:

        return self._apply(lambda t: t.share_memory_())

state_dict

def state_dict(
    self,
    destination=None,
    prefix='',
    keep_vars=False
)

Returns a dictionary containing a whole state of the module.

Both parameters and persistent buffers (e.g. running averages) are included. Keys are corresponding parameter and buffer names.

Returns:

Type Description
dict a dictionary containing a whole state of the module

Example::

>>> module.state_dict().keys()
['bias', 'weight'] |
View Source
    def state_dict(self, destination=None, prefix='', keep_vars=False):

        r"""Returns a dictionary containing a whole state of the module.

        Both parameters and persistent buffers (e.g. running averages) are

        included. Keys are corresponding parameter and buffer names.

        Returns:

            dict:

                a dictionary containing a whole state of the module

        Example::

            >>> module.state_dict().keys()

            ['bias', 'weight']

        """

        if destination is None:

            destination = OrderedDict()

            destination._metadata = OrderedDict()

        destination._metadata[prefix[:-1]] = local_metadata = dict(version=self._version)

        self._save_to_state_dict(destination, prefix, keep_vars)

        for name, module in self._modules.items():

            if module is not None:

                module.state_dict(destination, prefix + name + '.', keep_vars=keep_vars)

        for hook in self._state_dict_hooks.values():

            hook_result = hook(self, destination, prefix, local_metadata)

            if hook_result is not None:

                destination = hook_result

        return destination

to

def to(
    self,
    *args,
    **kwargs
)

Moves and/or casts the parameters and buffers.

This can be called as

.. function:: to(device=None, dtype=None, non_blocking=False)

.. function:: to(dtype, non_blocking=False)

.. function:: to(tensor, non_blocking=False)

.. function:: to(memory_format=torch.channels_last)

Its signature is similar to :meth:torch.Tensor.to, but only accepts floating point desired :attr:dtype s. In addition, this method will only cast the floating point parameters and buffers to :attr:dtype (if given). The integral parameters and buffers will be moved :attr:device, if that is given, but with dtypes unchanged. When :attr:non_blocking is set, it tries to convert/move asynchronously with respect to the host if possible, e.g., moving CPU Tensors with pinned memory to CUDA devices.

See below for examples.

.. note:: This method modifies the module in-place.

Parameters:

Name Type Description Default
device ( None class:torch.device): the desired device of the parameters
and buffers in this module None
dtype ( None class:torch.dtype): the desired floating point type of
the floating point parameters and buffers in this module None
tensor torch.Tensor Tensor whose dtype and device are the desired
dtype and device for all parameters and buffers in this module None
memory_format ( None class:torch.memory_format): the desired memory
format for 4D parameters and buffers in this module (keyword
only argument) None

Returns:

Type Description
Module self
Example::
>>> linear = nn.Linear(2, 2)
>>> linear.weight
Parameter containing:
tensor([[ 0.1913, -0.3420],
        [-0.5113, -0.2325]])
>>> linear.to(torch.double)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1913, -0.3420],
        [-0.5113, -0.2325]], dtype=torch.float64)
>>> gpu1 = torch.device("cuda:1")
>>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1914, -0.3420],
        [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
>>> cpu = torch.device("cpu")
>>> linear.to(cpu)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1914, -0.3420],
        [-0.5112, -0.2324]], dtype=torch.float16) |
View Source
    def to(self, *args, **kwargs):

        r"""Moves and/or casts the parameters and buffers.

        This can be called as

        .. function:: to(device=None, dtype=None, non_blocking=False)

        .. function:: to(dtype, non_blocking=False)

        .. function:: to(tensor, non_blocking=False)

        .. function:: to(memory_format=torch.channels_last)

        Its signature is similar to :meth:`torch.Tensor.to`, but only accepts

        floating point desired :attr:`dtype` s. In addition, this method will

        only cast the floating point parameters and buffers to :attr:`dtype`

        (if given). The integral parameters and buffers will be moved

        :attr:`device`, if that is given, but with dtypes unchanged. When

        :attr:`non_blocking` is set, it tries to convert/move asynchronously

        with respect to the host if possible, e.g., moving CPU Tensors with

        pinned memory to CUDA devices.

        See below for examples.

        .. note::

            This method modifies the module in-place.

        Args:

            device (:class:`torch.device`): the desired device of the parameters

                and buffers in this module

            dtype (:class:`torch.dtype`): the desired floating point type of

                the floating point parameters and buffers in this module

            tensor (torch.Tensor): Tensor whose dtype and device are the desired

                dtype and device for all parameters and buffers in this module

            memory_format (:class:`torch.memory_format`): the desired memory

                format for 4D parameters and buffers in this module (keyword

                only argument)

        Returns:

            Module: self

        Example::

            >>> linear = nn.Linear(2, 2)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1913, -0.3420],

                    [-0.5113, -0.2325]])

            >>> linear.to(torch.double)

            Linear(in_features=2, out_features=2, bias=True)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1913, -0.3420],

                    [-0.5113, -0.2325]], dtype=torch.float64)

            >>> gpu1 = torch.device("cuda:1")

            >>> linear.to(gpu1, dtype=torch.half, non_blocking=True)

            Linear(in_features=2, out_features=2, bias=True)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1914, -0.3420],

                    [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')

            >>> cpu = torch.device("cpu")

            >>> linear.to(cpu)

            Linear(in_features=2, out_features=2, bias=True)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1914, -0.3420],

                    [-0.5112, -0.2324]], dtype=torch.float16)

        """

        device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)

        if dtype is not None:

            if not dtype.is_floating_point:

                raise TypeError('nn.Module.to only accepts floating point '

                                'dtypes, but got desired dtype={}'.format(dtype))

        def convert(t):

            if convert_to_format is not None and t.dim() == 4:

                return t.to(device, dtype if t.is_floating_point() else None, non_blocking, memory_format=convert_to_format)

            return t.to(device, dtype if t.is_floating_point() else None, non_blocking)

        return self._apply(convert)

train

def train(
    self: ~T,
    mode: bool = True
) -> ~T

Sets the module in training mode.

This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:Dropout, :class:BatchNorm, etc.

Parameters:

Name Type Description Default
mode bool whether to set training mode (True) or evaluation
mode (False). Default: True. None

Returns:

Type Description
Module self
View Source
    def train(self: T, mode: bool = True) -> T:

        r"""Sets the module in training mode.

        This has any effect only on certain modules. See documentations of

        particular modules for details of their behaviors in training/evaluation

        mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,

        etc.

        Args:

            mode (bool): whether to set training mode (``True``) or evaluation

                         mode (``False``). Default: ``True``.

        Returns:

            Module: self

        """

        self.training = mode

        for module in self.children():

            module.train(mode)

        return self

type

def type(
    self: ~T,
    dst_type: Union[torch.dtype, str]
) -> ~T

Casts all parameters and buffers to :attr:dst_type.

Parameters:

Name Type Description Default
dst_type type or string the desired type None

Returns:

Type Description
Module self
View Source
    def type(self: T, dst_type: Union[dtype, str]) -> T:

        r"""Casts all parameters and buffers to :attr:`dst_type`.

        Arguments:

            dst_type (type or string): the desired type

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.type(dst_type))

zero_grad

def zero_grad(
    self
) -> None

Sets gradients of all model parameters to zero.

View Source
    def zero_grad(self) -> None:

        r"""Sets gradients of all model parameters to zero."""

        if getattr(self, '_is_replica', False):

            warnings.warn(

                "Calling .zero_grad() from a module created with nn.DataParallel() has no effect. "

                "The parameters are copied (in a differentiable manner) from the original module. "

                "This means they are not leaf nodes in autograd and so don't accumulate gradients. "

                "If you need gradients in your forward method, consider using autograd.grad instead.")

        for p in self.parameters():

            if p.grad is not None:

                p.grad.detach_()

                p.grad.zero_()

Detection

class Detection(
    mfn,
    rpn,
    roi
)

Ancestors (in MRO)

  • torchvision.models.detection.generalized_rcnn.GeneralizedRCNN
  • torch.nn.modules.module.Module

Class variables

T_destination
dump_patches

Methods

add_module

def add_module(
    self,
    name: str,
    module: 'Module'
) -> None

Adds a child module to the current module.

The module can be accessed as an attribute using the given name.

Parameters:

Name Type Description Default
name string name of the child module. The child module can be
accessed from this module using the given name None
module Module child module to be added to the module. None
View Source
    def add_module(self, name: str, module: 'Module') -> None:

        r"""Adds a child module to the current module.

        The module can be accessed as an attribute using the given name.

        Args:

            name (string): name of the child module. The child module can be

                accessed from this module using the given name

            module (Module): child module to be added to the module.

        """

        if not isinstance(module, Module) and module is not None:

            raise TypeError("{} is not a Module subclass".format(

                torch.typename(module)))

        elif not isinstance(name, torch._six.string_classes):

            raise TypeError("module name should be a string. Got {}".format(

                torch.typename(name)))

        elif hasattr(self, name) and name not in self._modules:

            raise KeyError("attribute '{}' already exists".format(name))

        elif '.' in name:

            raise KeyError("module name can't contain \".\"")

        elif name == '':

            raise KeyError("module name can't be empty string \"\"")

        self._modules[name] = module

apply

def apply(
    self: ~T,
    fn: Callable[[ForwardRef('Module')], NoneType]
) -> ~T

Applies fn recursively to every submodule (as returned by .children())

as well as self. Typical use includes initializing the parameters of a model (see also :ref:nn-init-doc).

Parameters:

Name Type Description Default
fn ( None class:Module -> None): function to be applied to each submodule None

Returns:

Type Description
Module self
Example::
>>> @torch.no_grad()
>>> def init_weights(m):
>>>     print(m)
>>>     if type(m) == nn.Linear:
>>>         m.weight.fill_(1.0)
>>>         print(m.weight)
>>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
>>> net.apply(init_weights)
Linear(in_features=2, out_features=2, bias=True)
Parameter containing:
tensor([[ 1.,  1.],
        [ 1.,  1.]])
Linear(in_features=2, out_features=2, bias=True)
Parameter containing:
tensor([[ 1.,  1.],
        [ 1.,  1.]])
Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
)
Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
) |
View Source
    def apply(self: T, fn: Callable[['Module'], None]) -> T:

        r"""Applies ``fn`` recursively to every submodule (as returned by ``.children()``)

        as well as self. Typical use includes initializing the parameters of a model

        (see also :ref:`nn-init-doc`).

        Args:

            fn (:class:`Module` -> None): function to be applied to each submodule

        Returns:

            Module: self

        Example::

            >>> @torch.no_grad()

            >>> def init_weights(m):

            >>>     print(m)

            >>>     if type(m) == nn.Linear:

            >>>         m.weight.fill_(1.0)

            >>>         print(m.weight)

            >>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))

            >>> net.apply(init_weights)

            Linear(in_features=2, out_features=2, bias=True)

            Parameter containing:

            tensor([[ 1.,  1.],

                    [ 1.,  1.]])

            Linear(in_features=2, out_features=2, bias=True)

            Parameter containing:

            tensor([[ 1.,  1.],

                    [ 1.,  1.]])

            Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            )

            Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            )

        """

        for module in self.children():

            module.apply(fn)

        fn(self)

        return self

bfloat16

def bfloat16(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to bfloat16 datatype.

Returns:

Type Description
Module self
View Source
    def bfloat16(self: T) -> T:

        r"""Casts all floating point parameters and buffers to ``bfloat16`` datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.bfloat16() if t.is_floating_point() else t)

buffers

def buffers(
    self,
    recurse: bool = True
) -> Iterator[torch.Tensor]

Returns an iterator over module buffers.

Parameters:

Name Type Description Default
recurse bool if True, then yields buffers of this module
and all submodules. Otherwise, yields only buffers that
are direct members of this module. None

Yields:

Type Description
torch.Tensor module buffer
Example::
>>> for buf in model.buffers():
>>>     print(type(buf), buf.size())
<class 'torch.Tensor'> (20L,)
<class 'torch.Tensor'> (20L, 1L, 5L, 5L) |
View Source
    def buffers(self, recurse: bool = True) -> Iterator[Tensor]:

        r"""Returns an iterator over module buffers.

        Args:

            recurse (bool): if True, then yields buffers of this module

                and all submodules. Otherwise, yields only buffers that

                are direct members of this module.

        Yields:

            torch.Tensor: module buffer

        Example::

            >>> for buf in model.buffers():

            >>>     print(type(buf), buf.size())

            <class 'torch.Tensor'> (20L,)

            <class 'torch.Tensor'> (20L, 1L, 5L, 5L)

        """

        for name, buf in self.named_buffers(recurse=recurse):

            yield buf

children

def children(
    self
) -> Iterator[ForwardRef('Module')]

Returns an iterator over immediate children modules.

Yields:

Type Description
Module a child module
View Source
    def children(self) -> Iterator['Module']:

        r"""Returns an iterator over immediate children modules.

        Yields:

            Module: a child module

        """

        for name, module in self.named_children():

            yield module

cpu

def cpu(
    self: ~T
) -> ~T

Moves all model parameters and buffers to the CPU.

Returns:

Type Description
Module self
View Source
    def cpu(self: T) -> T:

        r"""Moves all model parameters and buffers to the CPU.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.cpu())

cuda

def cuda(
    self: ~T,
    device: Union[int, torch.device, NoneType] = None
) -> ~T

Moves all model parameters and buffers to the GPU.

This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized.

Parameters:

Name Type Description Default
device int if specified, all parameters will be
copied to that device None

Returns:

Type Description
Module self
View Source
    def cuda(self: T, device: Optional[Union[int, device]] = None) -> T:

        r"""Moves all model parameters and buffers to the GPU.

        This also makes associated parameters and buffers different objects. So

        it should be called before constructing optimizer if the module will

        live on GPU while being optimized.

        Arguments:

            device (int, optional): if specified, all parameters will be

                copied to that device

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.cuda(device))

double

def double(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to double datatype.

Returns:

Type Description
Module self
View Source
    def double(self: T) -> T:

        r"""Casts all floating point parameters and buffers to ``double`` datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.double() if t.is_floating_point() else t)

eager_outputs

def eager_outputs(
    self,
    losses,
    detections
)
View Source
    @torch.jit.unused

    def eager_outputs(self, losses, detections):

        # type: (Dict[str, Tensor], List[Dict[str, Tensor]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]

        if self.training:

            return losses

        return detections

eval

def eval(
    self: ~T
) -> ~T

Sets the module in evaluation mode.

This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:Dropout, :class:BatchNorm, etc.

This is equivalent with :meth:self.train(False) <torch.nn.Module.train>.

Returns:

Type Description
Module self
View Source
    def eval(self: T) -> T:

        r"""Sets the module in evaluation mode.

        This has any effect only on certain modules. See documentations of

        particular modules for details of their behaviors in training/evaluation

        mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,

        etc.

        This is equivalent with :meth:`self.train(False) <torch.nn.Module.train>`.

        Returns:

            Module: self

        """

        return self.train(False)

extra_repr

def extra_repr(
    self
) -> str

Set the extra representation of the module

To print customized extra information, you should reimplement this method in your own modules. Both single-line and multi-line strings are acceptable.

View Source
    def extra_repr(self) -> str:

        r"""Set the extra representation of the module

        To print customized extra information, you should reimplement

        this method in your own modules. Both single-line and multi-line

        strings are acceptable.

        """

        return ''

float

def float(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to float datatype.

Returns:

Type Description
Module self
View Source
    def float(self: T) -> T:

        r"""Casts all floating point parameters and buffers to float datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.float() if t.is_floating_point() else t)

forward

def forward(
    self,
    images,
    targets=None
)

Parameters:

Name Type Description Default
images list[Tensor] images to be processed None
targets list[Dict[Tensor]] ground-truth boxes present in the image (optional) None

Returns:

Type Description
None result (list[BoxList] or dict[Tensor]): the output from the model.
During training, it returns a dict[Tensor] which contains the losses.
During testing, it returns list[BoxList] contains additional fields
like scores, labels and mask (for Mask R-CNN models).
View Source
    def forward(self, images, targets=None):

        # type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]

        """

        Arguments:

            images (list[Tensor]): images to be processed

            targets (list[Dict[Tensor]]): ground-truth boxes present in the image (optional)

        Returns:

            result (list[BoxList] or dict[Tensor]): the output from the model.

                During training, it returns a dict[Tensor] which contains the losses.

                During testing, it returns list[BoxList] contains additional fields

                like `scores`, `labels` and `mask` (for Mask R-CNN models).

        """

        if self.training and targets is None:

            raise ValueError("In training mode, targets should be passed")

        if self.training:

            assert targets is not None

            for target in targets:

                boxes = target["boxes"]

                if isinstance(boxes, torch.Tensor):

                    if len(boxes.shape) != 2 or boxes.shape[-1] != 4:

                        raise ValueError("Expected target boxes to be a tensor"

                                         "of shape [N, 4], got {:}.".format(

                                             boxes.shape))

                else:

                    raise ValueError("Expected target boxes to be of type "

                                     "Tensor, got {:}.".format(type(boxes)))

        original_image_sizes = torch.jit.annotate(List[Tuple[int, int]], [])

        for img in images:

            val = img.shape[-2:]

            assert len(val) == 2

            original_image_sizes.append((val[0], val[1]))

        images, targets = self.transform(images, targets)

        # Check for degenerate boxes

        # TODO: Move this to a function

        if targets is not None:

            for target_idx, target in enumerate(targets):

                boxes = target["boxes"]

                degenerate_boxes = boxes[:, 2:] <= boxes[:, :2]

                if degenerate_boxes.any():

                    # print the first degenrate box

                    bb_idx = degenerate_boxes.any(dim=1).nonzero().view(-1)[0]

                    degen_bb: List[float] = boxes[bb_idx].tolist()

                    raise ValueError("All bounding boxes should have positive height and width."

                                     " Found invaid box {} for target at index {}."

                                     .format(degen_bb, target_idx))

        features = self.backbone(images.tensors)

        if isinstance(features, torch.Tensor):

            features = OrderedDict([('0', features)])

        proposals, proposal_losses = self.rpn(images, features, targets)

        detections, detector_losses = self.roi_heads(features, proposals, images.image_sizes, targets)

        detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)

        losses = {}

        losses.update(detector_losses)

        losses.update(proposal_losses)

        if torch.jit.is_scripting():

            if not self._has_warned:

                warnings.warn("RCNN always returns a (Losses, Detections) tuple in scripting")

                self._has_warned = True

            return (losses, detections)

        else:

            return self.eager_outputs(losses, detections)

half

def half(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to half datatype.

Returns:

Type Description
Module self
View Source
    def half(self: T) -> T:

        r"""Casts all floating point parameters and buffers to ``half`` datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.half() if t.is_floating_point() else t)

load_state_dict

def load_state_dict(
    self,
    state_dict: Dict[str, torch.Tensor],
    strict: bool = True
)

Copies parameters and buffers from :attr:state_dict into

this module and its descendants. If :attr:strict is True, then the keys of :attr:state_dict must exactly match the keys returned by this module's :meth:~torch.nn.Module.state_dict function.

Parameters:

Name Type Description Default
state_dict dict a dict containing parameters and
persistent buffers. None
strict bool whether to strictly enforce that the keys
in :attr:state_dict match the keys returned by this module's
:meth:~torch.nn.Module.state_dict function. Default: True None

Returns:

Type Description
None NamedTuple with missing_keys and unexpected_keys fields:
* missing_keys is a list of str containing the missing keys
* unexpected_keys is a list of str containing the unexpected keys
View Source
    def load_state_dict(self, state_dict: Union[Dict[str, Tensor], Dict[str, Tensor]],

                        strict: bool = True):

        r"""Copies parameters and buffers from :attr:`state_dict` into

        this module and its descendants. If :attr:`strict` is ``True``, then

        the keys of :attr:`state_dict` must exactly match the keys returned

        by this module's :meth:`~torch.nn.Module.state_dict` function.

        Arguments:

            state_dict (dict): a dict containing parameters and

                persistent buffers.

            strict (bool, optional): whether to strictly enforce that the keys

                in :attr:`state_dict` match the keys returned by this module's

                :meth:`~torch.nn.Module.state_dict` function. Default: ``True``

        Returns:

            ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields:

                * **missing_keys** is a list of str containing the missing keys

                * **unexpected_keys** is a list of str containing the unexpected keys

        """

        missing_keys = []

        unexpected_keys = []

        error_msgs = []

        # copy state_dict so _load_from_state_dict can modify it

        metadata = getattr(state_dict, '_metadata', None)

        state_dict = state_dict.copy()

        if metadata is not None:

            state_dict._metadata = metadata

        def load(module, prefix=''):

            local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})

            module._load_from_state_dict(

                state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)

            for name, child in module._modules.items():

                if child is not None:

                    load(child, prefix + name + '.')

        load(self)

        load = None  # break load->load reference cycle

        if strict:

            if len(unexpected_keys) > 0:

                error_msgs.insert(

                    0, 'Unexpected key(s) in state_dict: {}. '.format(

                        ', '.join('"{}"'.format(k) for k in unexpected_keys)))

            if len(missing_keys) > 0:

                error_msgs.insert(

                    0, 'Missing key(s) in state_dict: {}. '.format(

                        ', '.join('"{}"'.format(k) for k in missing_keys)))

        if len(error_msgs) > 0:

            raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(

                               self.__class__.__name__, "\n\t".join(error_msgs)))

        return _IncompatibleKeys(missing_keys, unexpected_keys)

modules

def modules(
    self
) -> Iterator[ForwardRef('Module')]

Returns an iterator over all modules in the network.

Yields:

Type Description
Module a module in the network
Note:
Duplicate modules are returned only once. In the following
example, l will be returned only once.

Example::

>>> l = nn.Linear(2, 2)
>>> net = nn.Sequential(l, l)
>>> for idx, m in enumerate(net.modules()):
        print(idx, '->', m)

0 -> Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
)
1 -> Linear(in_features=2, out_features=2, bias=True) |
View Source
    def modules(self) -> Iterator['Module']:

        r"""Returns an iterator over all modules in the network.

        Yields:

            Module: a module in the network

        Note:

            Duplicate modules are returned only once. In the following

            example, ``l`` will be returned only once.

        Example::

            >>> l = nn.Linear(2, 2)

            >>> net = nn.Sequential(l, l)

            >>> for idx, m in enumerate(net.modules()):

                    print(idx, '->', m)

            0 -> Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            )

            1 -> Linear(in_features=2, out_features=2, bias=True)

        """

        for name, module in self.named_modules():

            yield module

named_buffers

def named_buffers(
    self,
    prefix: str = '',
    recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]

Returns an iterator over module buffers, yielding both the

name of the buffer as well as the buffer itself.

Parameters:

Name Type Description Default
prefix str prefix to prepend to all buffer names. None
recurse bool if True, then yields buffers of this module
and all submodules. Otherwise, yields only buffers that
are direct members of this module. None

Yields:

Type Description
None (string, torch.Tensor): Tuple containing the name and buffer

Example::

>>> for name, buf in self.named_buffers():
>>>    if name in ['running_var']:
>>>        print(buf.size()) |
View Source
    def named_buffers(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, Tensor]]:

        r"""Returns an iterator over module buffers, yielding both the

        name of the buffer as well as the buffer itself.

        Args:

            prefix (str): prefix to prepend to all buffer names.

            recurse (bool): if True, then yields buffers of this module

                and all submodules. Otherwise, yields only buffers that

                are direct members of this module.

        Yields:

            (string, torch.Tensor): Tuple containing the name and buffer

        Example::

            >>> for name, buf in self.named_buffers():

            >>>    if name in ['running_var']:

            >>>        print(buf.size())

        """

        gen = self._named_members(

            lambda module: module._buffers.items(),

            prefix=prefix, recurse=recurse)

        for elem in gen:

            yield elem

named_children

def named_children(
    self
) -> Iterator[Tuple[str, ForwardRef('Module')]]

Returns an iterator over immediate children modules, yielding both

the name of the module as well as the module itself.

Yields:

Type Description
None (string, Module): Tuple containing a name and child module

Example::

>>> for name, module in model.named_children():
>>>     if name in ['conv4', 'conv5']:
>>>         print(module) |
View Source
    def named_children(self) -> Iterator[Tuple[str, 'Module']]:

        r"""Returns an iterator over immediate children modules, yielding both

        the name of the module as well as the module itself.

        Yields:

            (string, Module): Tuple containing a name and child module

        Example::

            >>> for name, module in model.named_children():

            >>>     if name in ['conv4', 'conv5']:

            >>>         print(module)

        """

        memo = set()

        for name, module in self._modules.items():

            if module is not None and module not in memo:

                memo.add(module)

                yield name, module

named_modules

def named_modules(
    self,
    memo: Union[Set[ForwardRef('Module')], NoneType] = None,
    prefix: str = ''
)

Returns an iterator over all modules in the network, yielding

both the name of the module as well as the module itself.

Yields:

Type Description
None (string, Module): Tuple of name and module

Note: Duplicate modules are returned only once. In the following example, l will be returned only once.

Example::

>>> l = nn.Linear(2, 2)
>>> net = nn.Sequential(l, l)
>>> for idx, m in enumerate(net.named_modules()):
        print(idx, '->', m)

0 -> ('', Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
))
1 -> ('0', Linear(in_features=2, out_features=2, bias=True)) |
View Source
    def named_modules(self, memo: Optional[Set['Module']] = None, prefix: str = ''):

        r"""Returns an iterator over all modules in the network, yielding

        both the name of the module as well as the module itself.

        Yields:

            (string, Module): Tuple of name and module

        Note:

            Duplicate modules are returned only once. In the following

            example, ``l`` will be returned only once.

        Example::

            >>> l = nn.Linear(2, 2)

            >>> net = nn.Sequential(l, l)

            >>> for idx, m in enumerate(net.named_modules()):

                    print(idx, '->', m)

            0 -> ('', Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            ))

            1 -> ('0', Linear(in_features=2, out_features=2, bias=True))

        """

        if memo is None:

            memo = set()

        if self not in memo:

            memo.add(self)

            yield prefix, self

            for name, module in self._modules.items():

                if module is None:

                    continue

                submodule_prefix = prefix + ('.' if prefix else '') + name

                for m in module.named_modules(memo, submodule_prefix):

                    yield m

named_parameters

def named_parameters(
    self,
    prefix: str = '',
    recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]

Returns an iterator over module parameters, yielding both the

name of the parameter as well as the parameter itself.

Parameters:

Name Type Description Default
prefix str prefix to prepend to all parameter names. None
recurse bool if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module. None

Yields:

Type Description
None (string, Parameter): Tuple containing the name and parameter

Example::

>>> for name, param in self.named_parameters():
>>>    if name in ['bias']:
>>>        print(param.size()) |
View Source
    def named_parameters(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, Tensor]]:

        r"""Returns an iterator over module parameters, yielding both the

        name of the parameter as well as the parameter itself.

        Args:

            prefix (str): prefix to prepend to all parameter names.

            recurse (bool): if True, then yields parameters of this module

                and all submodules. Otherwise, yields only parameters that

                are direct members of this module.

        Yields:

            (string, Parameter): Tuple containing the name and parameter

        Example::

            >>> for name, param in self.named_parameters():

            >>>    if name in ['bias']:

            >>>        print(param.size())

        """

        gen = self._named_members(

            lambda module: module._parameters.items(),

            prefix=prefix, recurse=recurse)

        for elem in gen:

            yield elem

parameters

def parameters(
    self,
    recurse: bool = True
) -> Iterator[torch.nn.parameter.Parameter]

Returns an iterator over module parameters.

This is typically passed to an optimizer.

Parameters:

Name Type Description Default
recurse bool if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module. None

Yields:

Type Description
Parameter module parameter
Example::
>>> for param in model.parameters():
>>>     print(type(param), param.size())
<class 'torch.Tensor'> (20L,)
<class 'torch.Tensor'> (20L, 1L, 5L, 5L) |
View Source
    def parameters(self, recurse: bool = True) -> Iterator[Parameter]:

        r"""Returns an iterator over module parameters.

        This is typically passed to an optimizer.

        Args:

            recurse (bool): if True, then yields parameters of this module

                and all submodules. Otherwise, yields only parameters that

                are direct members of this module.

        Yields:

            Parameter: module parameter

        Example::

            >>> for param in model.parameters():

            >>>     print(type(param), param.size())

            <class 'torch.Tensor'> (20L,)

            <class 'torch.Tensor'> (20L, 1L, 5L, 5L)

        """

        for name, param in self.named_parameters(recurse=recurse):

            yield param

register_backward_hook

def register_backward_hook(
    self,
    hook: Callable[[ForwardRef('Module'), Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[NoneType, torch.Tensor]]
) -> torch.utils.hooks.RemovableHandle

Registers a backward hook on the module.

.. warning ::

The current implementation will not have the presented behavior
for complex :class:`Module` that perform many operations.
In some failure cases, :attr:`grad_input` and :attr:`grad_output` will only
contain the gradients for a subset of the inputs and outputs.
For such :class:`Module`, you should use :func:`torch.Tensor.register_hook`
directly on a specific input or output to get the required gradients.

The hook will be called every time the gradients with respect to module inputs are computed. The hook should have the following signature::

hook(module, grad_input, grad_output) -> Tensor or None

The :attr:grad_input and :attr:grad_output may be tuples if the module has multiple inputs or outputs. The hook should not modify its arguments, but it can optionally return a new gradient with respect to input that will be used in place of :attr:grad_input in subsequent computations. :attr:grad_input will only correspond to the inputs given as positional arguments.

Returns:

Type Description
None :class:torch.utils.hooks.RemovableHandle:
a handle that can be used to remove the added hook by calling
handle.remove()
View Source
    def register_backward_hook(

        self, hook: Callable[['Module', _grad_t, _grad_t], Union[None, Tensor]]

    ) -> RemovableHandle:

        r"""Registers a backward hook on the module.

        .. warning ::

            The current implementation will not have the presented behavior

            for complex :class:`Module` that perform many operations.

            In some failure cases, :attr:`grad_input` and :attr:`grad_output` will only

            contain the gradients for a subset of the inputs and outputs.

            For such :class:`Module`, you should use :func:`torch.Tensor.register_hook`

            directly on a specific input or output to get the required gradients.

        The hook will be called every time the gradients with respect to module

        inputs are computed. The hook should have the following signature::

            hook(module, grad_input, grad_output) -> Tensor or None

        The :attr:`grad_input` and :attr:`grad_output` may be tuples if the

        module has multiple inputs or outputs. The hook should not modify its

        arguments, but it can optionally return a new gradient with respect to

        input that will be used in place of :attr:`grad_input` in subsequent

        computations. :attr:`grad_input` will only correspond to the inputs given

        as positional arguments.

        Returns:

            :class:`torch.utils.hooks.RemovableHandle`:

                a handle that can be used to remove the added hook by calling

                ``handle.remove()``

        """

        handle = hooks.RemovableHandle(self._backward_hooks)

        self._backward_hooks[handle.id] = hook

        return handle

register_buffer

def register_buffer(
    self,
    name: str,
    tensor: torch.Tensor,
    persistent: bool = True
) -> None

Adds a buffer to the module.

This is typically used to register a buffer that should not to be considered a model parameter. For example, BatchNorm's running_mean is not a parameter, but is part of the module's state. Buffers, by default, are persistent and will be saved alongside parameters. This behavior can be changed by setting :attr:persistent to False. The only difference between a persistent buffer and a non-persistent buffer is that the latter will not be a part of this module's :attr:state_dict.

Buffers can be accessed as attributes using given names.

Args: name (string): name of the buffer. The buffer can be accessed from this module using the given name tensor (Tensor): buffer to be registered. persistent (bool): whether the buffer is part of this module's :attr:state_dict.

Example::

>>> self.register_buffer('running_mean', torch.zeros(num_features))
View Source
    def register_buffer(self, name: str, tensor: Tensor, persistent: bool = True) -> None:

        r"""Adds a buffer to the module.

        This is typically used to register a buffer that should not to be

        considered a model parameter. For example, BatchNorm's ``running_mean``

        is not a parameter, but is part of the module's state. Buffers, by

        default, are persistent and will be saved alongside parameters. This

        behavior can be changed by setting :attr:`persistent` to ``False``. The

        only difference between a persistent buffer and a non-persistent buffer

        is that the latter will not be a part of this module's

        :attr:`state_dict`.

        Buffers can be accessed as attributes using given names.

        Args:

            name (string): name of the buffer. The buffer can be accessed

                from this module using the given name

            tensor (Tensor): buffer to be registered.

            persistent (bool): whether the buffer is part of this module's

                :attr:`state_dict`.

        Example::

            >>> self.register_buffer('running_mean', torch.zeros(num_features))

        """

        if persistent is False and isinstance(self, torch.jit.ScriptModule):

            raise RuntimeError("ScriptModule does not support non-persistent buffers")

        if '_buffers' not in self.__dict__:

            raise AttributeError(

                "cannot assign buffer before Module.__init__() call")

        elif not isinstance(name, torch._six.string_classes):

            raise TypeError("buffer name should be a string. "

                            "Got {}".format(torch.typename(name)))

        elif '.' in name:

            raise KeyError("buffer name can't contain \".\"")

        elif name == '':

            raise KeyError("buffer name can't be empty string \"\"")

        elif hasattr(self, name) and name not in self._buffers:

            raise KeyError("attribute '{}' already exists".format(name))

        elif tensor is not None and not isinstance(tensor, torch.Tensor):

            raise TypeError("cannot assign '{}' object to buffer '{}' "

                            "(torch Tensor or None required)"

                            .format(torch.typename(tensor), name))

        else:

            self._buffers[name] = tensor

            if persistent:

                self._non_persistent_buffers_set.discard(name)

            else:

                self._non_persistent_buffers_set.add(name)

register_forward_hook

def register_forward_hook(
    self,
    hook: Callable[..., NoneType]
) -> torch.utils.hooks.RemovableHandle

Registers a forward hook on the module.

The hook will be called every time after :func:forward has computed an output. It should have the following signature::

hook(module, input, output) -> None or modified output

The input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the forward. The hook can modify the output. It can modify the input inplace but it will not have effect on forward since this is called after

View Source
    def register_forward_hook(self, hook: Callable[..., None]) -> RemovableHandle:

        r"""Registers a forward hook on the module.

        The hook will be called every time after :func:`forward` has computed an output.

        It should have the following signature::

            hook(module, input, output) -> None or modified output

        The input contains only the positional arguments given to the module.

        Keyword arguments won't be passed to the hooks and only to the ``forward``.

        The hook can modify the output. It can modify the input inplace but

        it will not have effect on forward since this is called after

        :func:`forward` is called.

        Returns:

            :class:`torch.utils.hooks.RemovableHandle`:

                a handle that can be used to remove the added hook by calling

                ``handle.remove()``

        """

        handle = hooks.RemovableHandle(self._forward_hooks)

        self._forward_hooks[handle.id] = hook

        return handle

register_forward_pre_hook

def register_forward_pre_hook(
    self,
    hook: Callable[..., NoneType]
) -> torch.utils.hooks.RemovableHandle

Registers a forward pre-hook on the module.

The hook will be called every time before :func:forward is invoked. It should have the following signature::

hook(module, input) -> None or modified input

The input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the forward. The hook can modify the input. User can either return a tuple or a single modified value in the hook. We will wrap the value into a tuple if a single value is returned(unless that value is already a tuple).

Returns:

Type Description
None :class:torch.utils.hooks.RemovableHandle:
a handle that can be used to remove the added hook by calling
handle.remove()
View Source
    def register_forward_pre_hook(self, hook: Callable[..., None]) -> RemovableHandle:

        r"""Registers a forward pre-hook on the module.

        The hook will be called every time before :func:`forward` is invoked.

        It should have the following signature::

            hook(module, input) -> None or modified input

        The input contains only the positional arguments given to the module.

        Keyword arguments won't be passed to the hooks and only to the ``forward``.

        The hook can modify the input. User can either return a tuple or a

        single modified value in the hook. We will wrap the value into a tuple

        if a single value is returned(unless that value is already a tuple).

        Returns:

            :class:`torch.utils.hooks.RemovableHandle`:

                a handle that can be used to remove the added hook by calling

                ``handle.remove()``

        """

        handle = hooks.RemovableHandle(self._forward_pre_hooks)

        self._forward_pre_hooks[handle.id] = hook

        return handle

register_parameter

def register_parameter(
    self,
    name: str,
    param: torch.nn.parameter.Parameter
) -> None

Adds a parameter to the module.

The parameter can be accessed as an attribute using given name.

Parameters:

Name Type Description Default
name string name of the parameter. The parameter can be accessed
from this module using the given name None
param Parameter parameter to be added to the module. None
View Source
    def register_parameter(self, name: str, param: Parameter) -> None:

        r"""Adds a parameter to the module.

        The parameter can be accessed as an attribute using given name.

        Args:

            name (string): name of the parameter. The parameter can be accessed

                from this module using the given name

            param (Parameter): parameter to be added to the module.

        """

        if '_parameters' not in self.__dict__:

            raise AttributeError(

                "cannot assign parameter before Module.__init__() call")

        elif not isinstance(name, torch._six.string_classes):

            raise TypeError("parameter name should be a string. "

                            "Got {}".format(torch.typename(name)))

        elif '.' in name:

            raise KeyError("parameter name can't contain \".\"")

        elif name == '':

            raise KeyError("parameter name can't be empty string \"\"")

        elif hasattr(self, name) and name not in self._parameters:

            raise KeyError("attribute '{}' already exists".format(name))

        if param is None:

            self._parameters[name] = None

        elif not isinstance(param, Parameter):

            raise TypeError("cannot assign '{}' object to parameter '{}' "

                            "(torch.nn.Parameter or None required)"

                            .format(torch.typename(param), name))

        elif param.grad_fn:

            raise ValueError(

                "Cannot assign non-leaf Tensor to parameter '{0}'. Model "

                "parameters must be created explicitly. To express '{0}' "

                "as a function of another Tensor, compute the value in "

                "the forward() method.".format(name))

        else:

            self._parameters[name] = param

requires_grad_

def requires_grad_(
    self: ~T,
    requires_grad: bool = True
) -> ~T

Change if autograd should record operations on parameters in this

module.

This method sets the parameters' :attr:requires_grad attributes in-place.

This method is helpful for freezing part of the module for finetuning or training parts of a model individually (e.g., GAN training).

Parameters:

Name Type Description Default
requires_grad bool whether autograd should record operations on
parameters in this module. Default: True. None

Returns:

Type Description
Module self
View Source
    def requires_grad_(self: T, requires_grad: bool = True) -> T:

        r"""Change if autograd should record operations on parameters in this

        module.

        This method sets the parameters' :attr:`requires_grad` attributes

        in-place.

        This method is helpful for freezing part of the module for finetuning

        or training parts of a model individually (e.g., GAN training).

        Args:

            requires_grad (bool): whether autograd should record operations on

                                  parameters in this module. Default: ``True``.

        Returns:

            Module: self

        """

        for p in self.parameters():

            p.requires_grad_(requires_grad)

        return self

share_memory

def share_memory(
    self: ~T
) -> ~T
View Source
    def share_memory(self: T) -> T:

        return self._apply(lambda t: t.share_memory_())

state_dict

def state_dict(
    self,
    destination=None,
    prefix='',
    keep_vars=False
)

Returns a dictionary containing a whole state of the module.

Both parameters and persistent buffers (e.g. running averages) are included. Keys are corresponding parameter and buffer names.

Returns:

Type Description
dict a dictionary containing a whole state of the module

Example::

>>> module.state_dict().keys()
['bias', 'weight'] |
View Source
    def state_dict(self, destination=None, prefix='', keep_vars=False):

        r"""Returns a dictionary containing a whole state of the module.

        Both parameters and persistent buffers (e.g. running averages) are

        included. Keys are corresponding parameter and buffer names.

        Returns:

            dict:

                a dictionary containing a whole state of the module

        Example::

            >>> module.state_dict().keys()

            ['bias', 'weight']

        """

        if destination is None:

            destination = OrderedDict()

            destination._metadata = OrderedDict()

        destination._metadata[prefix[:-1]] = local_metadata = dict(version=self._version)

        self._save_to_state_dict(destination, prefix, keep_vars)

        for name, module in self._modules.items():

            if module is not None:

                module.state_dict(destination, prefix + name + '.', keep_vars=keep_vars)

        for hook in self._state_dict_hooks.values():

            hook_result = hook(self, destination, prefix, local_metadata)

            if hook_result is not None:

                destination = hook_result

        return destination

to

def to(
    self,
    *args,
    **kwargs
)

Moves and/or casts the parameters and buffers.

This can be called as

.. function:: to(device=None, dtype=None, non_blocking=False)

.. function:: to(dtype, non_blocking=False)

.. function:: to(tensor, non_blocking=False)

.. function:: to(memory_format=torch.channels_last)

Its signature is similar to :meth:torch.Tensor.to, but only accepts floating point desired :attr:dtype s. In addition, this method will only cast the floating point parameters and buffers to :attr:dtype (if given). The integral parameters and buffers will be moved :attr:device, if that is given, but with dtypes unchanged. When :attr:non_blocking is set, it tries to convert/move asynchronously with respect to the host if possible, e.g., moving CPU Tensors with pinned memory to CUDA devices.

See below for examples.

.. note:: This method modifies the module in-place.

Parameters:

Name Type Description Default
device ( None class:torch.device): the desired device of the parameters
and buffers in this module None
dtype ( None class:torch.dtype): the desired floating point type of
the floating point parameters and buffers in this module None
tensor torch.Tensor Tensor whose dtype and device are the desired
dtype and device for all parameters and buffers in this module None
memory_format ( None class:torch.memory_format): the desired memory
format for 4D parameters and buffers in this module (keyword
only argument) None

Returns:

Type Description
Module self
Example::
>>> linear = nn.Linear(2, 2)
>>> linear.weight
Parameter containing:
tensor([[ 0.1913, -0.3420],
        [-0.5113, -0.2325]])
>>> linear.to(torch.double)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1913, -0.3420],
        [-0.5113, -0.2325]], dtype=torch.float64)
>>> gpu1 = torch.device("cuda:1")
>>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1914, -0.3420],
        [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
>>> cpu = torch.device("cpu")
>>> linear.to(cpu)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1914, -0.3420],
        [-0.5112, -0.2324]], dtype=torch.float16) |
View Source
    def to(self, *args, **kwargs):

        r"""Moves and/or casts the parameters and buffers.

        This can be called as

        .. function:: to(device=None, dtype=None, non_blocking=False)

        .. function:: to(dtype, non_blocking=False)

        .. function:: to(tensor, non_blocking=False)

        .. function:: to(memory_format=torch.channels_last)

        Its signature is similar to :meth:`torch.Tensor.to`, but only accepts

        floating point desired :attr:`dtype` s. In addition, this method will

        only cast the floating point parameters and buffers to :attr:`dtype`

        (if given). The integral parameters and buffers will be moved

        :attr:`device`, if that is given, but with dtypes unchanged. When

        :attr:`non_blocking` is set, it tries to convert/move asynchronously

        with respect to the host if possible, e.g., moving CPU Tensors with

        pinned memory to CUDA devices.

        See below for examples.

        .. note::

            This method modifies the module in-place.

        Args:

            device (:class:`torch.device`): the desired device of the parameters

                and buffers in this module

            dtype (:class:`torch.dtype`): the desired floating point type of

                the floating point parameters and buffers in this module

            tensor (torch.Tensor): Tensor whose dtype and device are the desired

                dtype and device for all parameters and buffers in this module

            memory_format (:class:`torch.memory_format`): the desired memory

                format for 4D parameters and buffers in this module (keyword

                only argument)

        Returns:

            Module: self

        Example::

            >>> linear = nn.Linear(2, 2)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1913, -0.3420],

                    [-0.5113, -0.2325]])

            >>> linear.to(torch.double)

            Linear(in_features=2, out_features=2, bias=True)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1913, -0.3420],

                    [-0.5113, -0.2325]], dtype=torch.float64)

            >>> gpu1 = torch.device("cuda:1")

            >>> linear.to(gpu1, dtype=torch.half, non_blocking=True)

            Linear(in_features=2, out_features=2, bias=True)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1914, -0.3420],

                    [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')

            >>> cpu = torch.device("cpu")

            >>> linear.to(cpu)

            Linear(in_features=2, out_features=2, bias=True)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1914, -0.3420],

                    [-0.5112, -0.2324]], dtype=torch.float16)

        """

        device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)

        if dtype is not None:

            if not dtype.is_floating_point:

                raise TypeError('nn.Module.to only accepts floating point '

                                'dtypes, but got desired dtype={}'.format(dtype))

        def convert(t):

            if convert_to_format is not None and t.dim() == 4:

                return t.to(device, dtype if t.is_floating_point() else None, non_blocking, memory_format=convert_to_format)

            return t.to(device, dtype if t.is_floating_point() else None, non_blocking)

        return self._apply(convert)

train

def train(
    self: ~T,
    mode: bool = True
) -> ~T

Sets the module in training mode.

This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:Dropout, :class:BatchNorm, etc.

Parameters:

Name Type Description Default
mode bool whether to set training mode (True) or evaluation
mode (False). Default: True. None

Returns:

Type Description
Module self
View Source
    def train(self: T, mode: bool = True) -> T:

        r"""Sets the module in training mode.

        This has any effect only on certain modules. See documentations of

        particular modules for details of their behaviors in training/evaluation

        mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,

        etc.

        Args:

            mode (bool): whether to set training mode (``True``) or evaluation

                         mode (``False``). Default: ``True``.

        Returns:

            Module: self

        """

        self.training = mode

        for module in self.children():

            module.train(mode)

        return self

type

def type(
    self: ~T,
    dst_type: Union[torch.dtype, str]
) -> ~T

Casts all parameters and buffers to :attr:dst_type.

Parameters:

Name Type Description Default
dst_type type or string the desired type None

Returns:

Type Description
Module self
View Source
    def type(self: T, dst_type: Union[dtype, str]) -> T:

        r"""Casts all parameters and buffers to :attr:`dst_type`.

        Arguments:

            dst_type (type or string): the desired type

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.type(dst_type))

zero_grad

def zero_grad(
    self
) -> None

Sets gradients of all model parameters to zero.

View Source
    def zero_grad(self) -> None:

        r"""Sets gradients of all model parameters to zero."""

        if getattr(self, '_is_replica', False):

            warnings.warn(

                "Calling .zero_grad() from a module created with nn.DataParallel() has no effect. "

                "The parameters are copied (in a differentiable manner) from the original module. "

                "This means they are not leaf nodes in autograd and so don't accumulate gradients. "

                "If you need gradients in your forward method, consider using autograd.grad instead.")

        for p in self.parameters():

            if p.grad is not None:

                p.grad.detach_()

                p.grad.zero_()

MFN

class MFN(
    backbone: str
)

Ancestors (in MRO)

  • torch.nn.modules.module.Module

Class variables

T_destination
dump_patches

Methods

add_module

def add_module(
    self,
    name: str,
    module: 'Module'
) -> None

Adds a child module to the current module.

The module can be accessed as an attribute using the given name.

Parameters:

Name Type Description Default
name string name of the child module. The child module can be
accessed from this module using the given name None
module Module child module to be added to the module. None
View Source
    def add_module(self, name: str, module: 'Module') -> None:

        r"""Adds a child module to the current module.

        The module can be accessed as an attribute using the given name.

        Args:

            name (string): name of the child module. The child module can be

                accessed from this module using the given name

            module (Module): child module to be added to the module.

        """

        if not isinstance(module, Module) and module is not None:

            raise TypeError("{} is not a Module subclass".format(

                torch.typename(module)))

        elif not isinstance(name, torch._six.string_classes):

            raise TypeError("module name should be a string. Got {}".format(

                torch.typename(name)))

        elif hasattr(self, name) and name not in self._modules:

            raise KeyError("attribute '{}' already exists".format(name))

        elif '.' in name:

            raise KeyError("module name can't contain \".\"")

        elif name == '':

            raise KeyError("module name can't be empty string \"\"")

        self._modules[name] = module

apply

def apply(
    self: ~T,
    fn: Callable[[ForwardRef('Module')], NoneType]
) -> ~T

Applies fn recursively to every submodule (as returned by .children())

as well as self. Typical use includes initializing the parameters of a model (see also :ref:nn-init-doc).

Parameters:

Name Type Description Default
fn ( None class:Module -> None): function to be applied to each submodule None

Returns:

Type Description
Module self
Example::
>>> @torch.no_grad()
>>> def init_weights(m):
>>>     print(m)
>>>     if type(m) == nn.Linear:
>>>         m.weight.fill_(1.0)
>>>         print(m.weight)
>>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
>>> net.apply(init_weights)
Linear(in_features=2, out_features=2, bias=True)
Parameter containing:
tensor([[ 1.,  1.],
        [ 1.,  1.]])
Linear(in_features=2, out_features=2, bias=True)
Parameter containing:
tensor([[ 1.,  1.],
        [ 1.,  1.]])
Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
)
Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
) |
View Source
    def apply(self: T, fn: Callable[['Module'], None]) -> T:

        r"""Applies ``fn`` recursively to every submodule (as returned by ``.children()``)

        as well as self. Typical use includes initializing the parameters of a model

        (see also :ref:`nn-init-doc`).

        Args:

            fn (:class:`Module` -> None): function to be applied to each submodule

        Returns:

            Module: self

        Example::

            >>> @torch.no_grad()

            >>> def init_weights(m):

            >>>     print(m)

            >>>     if type(m) == nn.Linear:

            >>>         m.weight.fill_(1.0)

            >>>         print(m.weight)

            >>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))

            >>> net.apply(init_weights)

            Linear(in_features=2, out_features=2, bias=True)

            Parameter containing:

            tensor([[ 1.,  1.],

                    [ 1.,  1.]])

            Linear(in_features=2, out_features=2, bias=True)

            Parameter containing:

            tensor([[ 1.,  1.],

                    [ 1.,  1.]])

            Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            )

            Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            )

        """

        for module in self.children():

            module.apply(fn)

        fn(self)

        return self

bfloat16

def bfloat16(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to bfloat16 datatype.

Returns:

Type Description
Module self
View Source
    def bfloat16(self: T) -> T:

        r"""Casts all floating point parameters and buffers to ``bfloat16`` datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.bfloat16() if t.is_floating_point() else t)

buffers

def buffers(
    self,
    recurse: bool = True
) -> Iterator[torch.Tensor]

Returns an iterator over module buffers.

Parameters:

Name Type Description Default
recurse bool if True, then yields buffers of this module
and all submodules. Otherwise, yields only buffers that
are direct members of this module. None

Yields:

Type Description
torch.Tensor module buffer
Example::
>>> for buf in model.buffers():
>>>     print(type(buf), buf.size())
<class 'torch.Tensor'> (20L,)
<class 'torch.Tensor'> (20L, 1L, 5L, 5L) |
View Source
    def buffers(self, recurse: bool = True) -> Iterator[Tensor]:

        r"""Returns an iterator over module buffers.

        Args:

            recurse (bool): if True, then yields buffers of this module

                and all submodules. Otherwise, yields only buffers that

                are direct members of this module.

        Yields:

            torch.Tensor: module buffer

        Example::

            >>> for buf in model.buffers():

            >>>     print(type(buf), buf.size())

            <class 'torch.Tensor'> (20L,)

            <class 'torch.Tensor'> (20L, 1L, 5L, 5L)

        """

        for name, buf in self.named_buffers(recurse=recurse):

            yield buf

children

def children(
    self
) -> Iterator[ForwardRef('Module')]

Returns an iterator over immediate children modules.

Yields:

Type Description
Module a child module
View Source
    def children(self) -> Iterator['Module']:

        r"""Returns an iterator over immediate children modules.

        Yields:

            Module: a child module

        """

        for name, module in self.named_children():

            yield module

cpu

def cpu(
    self: ~T
) -> ~T

Moves all model parameters and buffers to the CPU.

Returns:

Type Description
Module self
View Source
    def cpu(self: T) -> T:

        r"""Moves all model parameters and buffers to the CPU.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.cpu())

cuda

def cuda(
    self: ~T,
    device: Union[int, torch.device, NoneType] = None
) -> ~T

Moves all model parameters and buffers to the GPU.

This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized.

Parameters:

Name Type Description Default
device int if specified, all parameters will be
copied to that device None

Returns:

Type Description
Module self
View Source
    def cuda(self: T, device: Optional[Union[int, device]] = None) -> T:

        r"""Moves all model parameters and buffers to the GPU.

        This also makes associated parameters and buffers different objects. So

        it should be called before constructing optimizer if the module will

        live on GPU while being optimized.

        Arguments:

            device (int, optional): if specified, all parameters will be

                copied to that device

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.cuda(device))

double

def double(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to double datatype.

Returns:

Type Description
Module self
View Source
    def double(self: T) -> T:

        r"""Casts all floating point parameters and buffers to ``double`` datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.double() if t.is_floating_point() else t)

eval

def eval(
    self: ~T
) -> ~T

Sets the module in evaluation mode.

This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:Dropout, :class:BatchNorm, etc.

This is equivalent with :meth:self.train(False) <torch.nn.Module.train>.

Returns:

Type Description
Module self
View Source
    def eval(self: T) -> T:

        r"""Sets the module in evaluation mode.

        This has any effect only on certain modules. See documentations of

        particular modules for details of their behaviors in training/evaluation

        mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,

        etc.

        This is equivalent with :meth:`self.train(False) <torch.nn.Module.train>`.

        Returns:

            Module: self

        """

        return self.train(False)

extra_repr

def extra_repr(
    self
) -> str

Set the extra representation of the module

To print customized extra information, you should reimplement this method in your own modules. Both single-line and multi-line strings are acceptable.

View Source
    def extra_repr(self) -> str:

        r"""Set the extra representation of the module

        To print customized extra information, you should reimplement

        this method in your own modules. Both single-line and multi-line

        strings are acceptable.

        """

        return ''

float

def float(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to float datatype.

Returns:

Type Description
Module self
View Source
    def float(self: T) -> T:

        r"""Casts all floating point parameters and buffers to float datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.float() if t.is_floating_point() else t)

forward

def forward(
    self,
    x
)
View Source
    def forward(self, x):

        x = self.start_layer(x)

        x = self.r2(x)

        b2_out = self.b2(x)

        x = self.r3(x)

        b3_out = self.b3(x)

        x = self.r4(x)

        b4_out = self.b4(x)

        x = self.r5(x)

        b5_out = self.b5(x)

        # BatchNorm works better than L2 normalize

        # out = torch.cat([F.normalize(o, p=2, dim=1) for o in (b2_out, b3_out, b4_out, b5_out)], dim=1)

        out = torch.cat((b2_out, b3_out, b4_out, b5_out), dim=1)

        return out

half

def half(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to half datatype.

Returns:

Type Description
Module self
View Source
    def half(self: T) -> T:

        r"""Casts all floating point parameters and buffers to ``half`` datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.half() if t.is_floating_point() else t)

load_state_dict

def load_state_dict(
    self,
    state_dict: Dict[str, torch.Tensor],
    strict: bool = True
)

Copies parameters and buffers from :attr:state_dict into

this module and its descendants. If :attr:strict is True, then the keys of :attr:state_dict must exactly match the keys returned by this module's :meth:~torch.nn.Module.state_dict function.

Parameters:

Name Type Description Default
state_dict dict a dict containing parameters and
persistent buffers. None
strict bool whether to strictly enforce that the keys
in :attr:state_dict match the keys returned by this module's
:meth:~torch.nn.Module.state_dict function. Default: True None

Returns:

Type Description
None NamedTuple with missing_keys and unexpected_keys fields:
* missing_keys is a list of str containing the missing keys
* unexpected_keys is a list of str containing the unexpected keys
View Source
    def load_state_dict(self, state_dict: Union[Dict[str, Tensor], Dict[str, Tensor]],

                        strict: bool = True):

        r"""Copies parameters and buffers from :attr:`state_dict` into

        this module and its descendants. If :attr:`strict` is ``True``, then

        the keys of :attr:`state_dict` must exactly match the keys returned

        by this module's :meth:`~torch.nn.Module.state_dict` function.

        Arguments:

            state_dict (dict): a dict containing parameters and

                persistent buffers.

            strict (bool, optional): whether to strictly enforce that the keys

                in :attr:`state_dict` match the keys returned by this module's

                :meth:`~torch.nn.Module.state_dict` function. Default: ``True``

        Returns:

            ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields:

                * **missing_keys** is a list of str containing the missing keys

                * **unexpected_keys** is a list of str containing the unexpected keys

        """

        missing_keys = []

        unexpected_keys = []

        error_msgs = []

        # copy state_dict so _load_from_state_dict can modify it

        metadata = getattr(state_dict, '_metadata', None)

        state_dict = state_dict.copy()

        if metadata is not None:

            state_dict._metadata = metadata

        def load(module, prefix=''):

            local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})

            module._load_from_state_dict(

                state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)

            for name, child in module._modules.items():

                if child is not None:

                    load(child, prefix + name + '.')

        load(self)

        load = None  # break load->load reference cycle

        if strict:

            if len(unexpected_keys) > 0:

                error_msgs.insert(

                    0, 'Unexpected key(s) in state_dict: {}. '.format(

                        ', '.join('"{}"'.format(k) for k in unexpected_keys)))

            if len(missing_keys) > 0:

                error_msgs.insert(

                    0, 'Missing key(s) in state_dict: {}. '.format(

                        ', '.join('"{}"'.format(k) for k in missing_keys)))

        if len(error_msgs) > 0:

            raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(

                               self.__class__.__name__, "\n\t".join(error_msgs)))

        return _IncompatibleKeys(missing_keys, unexpected_keys)

modules

def modules(
    self
) -> Iterator[ForwardRef('Module')]

Returns an iterator over all modules in the network.

Yields:

Type Description
Module a module in the network
Note:
Duplicate modules are returned only once. In the following
example, l will be returned only once.

Example::

>>> l = nn.Linear(2, 2)
>>> net = nn.Sequential(l, l)
>>> for idx, m in enumerate(net.modules()):
        print(idx, '->', m)

0 -> Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
)
1 -> Linear(in_features=2, out_features=2, bias=True) |
View Source
    def modules(self) -> Iterator['Module']:

        r"""Returns an iterator over all modules in the network.

        Yields:

            Module: a module in the network

        Note:

            Duplicate modules are returned only once. In the following

            example, ``l`` will be returned only once.

        Example::

            >>> l = nn.Linear(2, 2)

            >>> net = nn.Sequential(l, l)

            >>> for idx, m in enumerate(net.modules()):

                    print(idx, '->', m)

            0 -> Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            )

            1 -> Linear(in_features=2, out_features=2, bias=True)

        """

        for name, module in self.named_modules():

            yield module

named_buffers

def named_buffers(
    self,
    prefix: str = '',
    recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]

Returns an iterator over module buffers, yielding both the

name of the buffer as well as the buffer itself.

Parameters:

Name Type Description Default
prefix str prefix to prepend to all buffer names. None
recurse bool if True, then yields buffers of this module
and all submodules. Otherwise, yields only buffers that
are direct members of this module. None

Yields:

Type Description
None (string, torch.Tensor): Tuple containing the name and buffer

Example::

>>> for name, buf in self.named_buffers():
>>>    if name in ['running_var']:
>>>        print(buf.size()) |
View Source
    def named_buffers(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, Tensor]]:

        r"""Returns an iterator over module buffers, yielding both the

        name of the buffer as well as the buffer itself.

        Args:

            prefix (str): prefix to prepend to all buffer names.

            recurse (bool): if True, then yields buffers of this module

                and all submodules. Otherwise, yields only buffers that

                are direct members of this module.

        Yields:

            (string, torch.Tensor): Tuple containing the name and buffer

        Example::

            >>> for name, buf in self.named_buffers():

            >>>    if name in ['running_var']:

            >>>        print(buf.size())

        """

        gen = self._named_members(

            lambda module: module._buffers.items(),

            prefix=prefix, recurse=recurse)

        for elem in gen:

            yield elem

named_children

def named_children(
    self
) -> Iterator[Tuple[str, ForwardRef('Module')]]

Returns an iterator over immediate children modules, yielding both

the name of the module as well as the module itself.

Yields:

Type Description
None (string, Module): Tuple containing a name and child module

Example::

>>> for name, module in model.named_children():
>>>     if name in ['conv4', 'conv5']:
>>>         print(module) |
View Source
    def named_children(self) -> Iterator[Tuple[str, 'Module']]:

        r"""Returns an iterator over immediate children modules, yielding both

        the name of the module as well as the module itself.

        Yields:

            (string, Module): Tuple containing a name and child module

        Example::

            >>> for name, module in model.named_children():

            >>>     if name in ['conv4', 'conv5']:

            >>>         print(module)

        """

        memo = set()

        for name, module in self._modules.items():

            if module is not None and module not in memo:

                memo.add(module)

                yield name, module

named_modules

def named_modules(
    self,
    memo: Union[Set[ForwardRef('Module')], NoneType] = None,
    prefix: str = ''
)

Returns an iterator over all modules in the network, yielding

both the name of the module as well as the module itself.

Yields:

Type Description
None (string, Module): Tuple of name and module

Note: Duplicate modules are returned only once. In the following example, l will be returned only once.

Example::

>>> l = nn.Linear(2, 2)
>>> net = nn.Sequential(l, l)
>>> for idx, m in enumerate(net.named_modules()):
        print(idx, '->', m)

0 -> ('', Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
))
1 -> ('0', Linear(in_features=2, out_features=2, bias=True)) |
View Source
    def named_modules(self, memo: Optional[Set['Module']] = None, prefix: str = ''):

        r"""Returns an iterator over all modules in the network, yielding

        both the name of the module as well as the module itself.

        Yields:

            (string, Module): Tuple of name and module

        Note:

            Duplicate modules are returned only once. In the following

            example, ``l`` will be returned only once.

        Example::

            >>> l = nn.Linear(2, 2)

            >>> net = nn.Sequential(l, l)

            >>> for idx, m in enumerate(net.named_modules()):

                    print(idx, '->', m)

            0 -> ('', Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            ))

            1 -> ('0', Linear(in_features=2, out_features=2, bias=True))

        """

        if memo is None:

            memo = set()

        if self not in memo:

            memo.add(self)

            yield prefix, self

            for name, module in self._modules.items():

                if module is None:

                    continue

                submodule_prefix = prefix + ('.' if prefix else '') + name

                for m in module.named_modules(memo, submodule_prefix):

                    yield m

named_parameters

def named_parameters(
    self,
    prefix: str = '',
    recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]

Returns an iterator over module parameters, yielding both the

name of the parameter as well as the parameter itself.

Parameters:

Name Type Description Default
prefix str prefix to prepend to all parameter names. None
recurse bool if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module. None

Yields:

Type Description
None (string, Parameter): Tuple containing the name and parameter

Example::

>>> for name, param in self.named_parameters():
>>>    if name in ['bias']:
>>>        print(param.size()) |
View Source
    def named_parameters(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, Tensor]]:

        r"""Returns an iterator over module parameters, yielding both the

        name of the parameter as well as the parameter itself.

        Args:

            prefix (str): prefix to prepend to all parameter names.

            recurse (bool): if True, then yields parameters of this module

                and all submodules. Otherwise, yields only parameters that

                are direct members of this module.

        Yields:

            (string, Parameter): Tuple containing the name and parameter

        Example::

            >>> for name, param in self.named_parameters():

            >>>    if name in ['bias']:

            >>>        print(param.size())

        """

        gen = self._named_members(

            lambda module: module._parameters.items(),

            prefix=prefix, recurse=recurse)

        for elem in gen:

            yield elem

parameters

def parameters(
    self,
    recurse: bool = True
) -> Iterator[torch.nn.parameter.Parameter]

Returns an iterator over module parameters.

This is typically passed to an optimizer.

Parameters:

Name Type Description Default
recurse bool if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module. None

Yields:

Type Description
Parameter module parameter
Example::
>>> for param in model.parameters():
>>>     print(type(param), param.size())
<class 'torch.Tensor'> (20L,)
<class 'torch.Tensor'> (20L, 1L, 5L, 5L) |
View Source
    def parameters(self, recurse: bool = True) -> Iterator[Parameter]:

        r"""Returns an iterator over module parameters.

        This is typically passed to an optimizer.

        Args:

            recurse (bool): if True, then yields parameters of this module

                and all submodules. Otherwise, yields only parameters that

                are direct members of this module.

        Yields:

            Parameter: module parameter

        Example::

            >>> for param in model.parameters():

            >>>     print(type(param), param.size())

            <class 'torch.Tensor'> (20L,)

            <class 'torch.Tensor'> (20L, 1L, 5L, 5L)

        """

        for name, param in self.named_parameters(recurse=recurse):

            yield param

register_backward_hook

def register_backward_hook(
    self,
    hook: Callable[[ForwardRef('Module'), Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[NoneType, torch.Tensor]]
) -> torch.utils.hooks.RemovableHandle

Registers a backward hook on the module.

.. warning ::

The current implementation will not have the presented behavior
for complex :class:`Module` that perform many operations.
In some failure cases, :attr:`grad_input` and :attr:`grad_output` will only
contain the gradients for a subset of the inputs and outputs.
For such :class:`Module`, you should use :func:`torch.Tensor.register_hook`
directly on a specific input or output to get the required gradients.

The hook will be called every time the gradients with respect to module inputs are computed. The hook should have the following signature::

hook(module, grad_input, grad_output) -> Tensor or None

The :attr:grad_input and :attr:grad_output may be tuples if the module has multiple inputs or outputs. The hook should not modify its arguments, but it can optionally return a new gradient with respect to input that will be used in place of :attr:grad_input in subsequent computations. :attr:grad_input will only correspond to the inputs given as positional arguments.

Returns:

Type Description
None :class:torch.utils.hooks.RemovableHandle:
a handle that can be used to remove the added hook by calling
handle.remove()
View Source
    def register_backward_hook(

        self, hook: Callable[['Module', _grad_t, _grad_t], Union[None, Tensor]]

    ) -> RemovableHandle:

        r"""Registers a backward hook on the module.

        .. warning ::

            The current implementation will not have the presented behavior

            for complex :class:`Module` that perform many operations.

            In some failure cases, :attr:`grad_input` and :attr:`grad_output` will only

            contain the gradients for a subset of the inputs and outputs.

            For such :class:`Module`, you should use :func:`torch.Tensor.register_hook`

            directly on a specific input or output to get the required gradients.

        The hook will be called every time the gradients with respect to module

        inputs are computed. The hook should have the following signature::

            hook(module, grad_input, grad_output) -> Tensor or None

        The :attr:`grad_input` and :attr:`grad_output` may be tuples if the

        module has multiple inputs or outputs. The hook should not modify its

        arguments, but it can optionally return a new gradient with respect to

        input that will be used in place of :attr:`grad_input` in subsequent

        computations. :attr:`grad_input` will only correspond to the inputs given

        as positional arguments.

        Returns:

            :class:`torch.utils.hooks.RemovableHandle`:

                a handle that can be used to remove the added hook by calling

                ``handle.remove()``

        """

        handle = hooks.RemovableHandle(self._backward_hooks)

        self._backward_hooks[handle.id] = hook

        return handle

register_buffer

def register_buffer(
    self,
    name: str,
    tensor: torch.Tensor,
    persistent: bool = True
) -> None

Adds a buffer to the module.

This is typically used to register a buffer that should not to be considered a model parameter. For example, BatchNorm's running_mean is not a parameter, but is part of the module's state. Buffers, by default, are persistent and will be saved alongside parameters. This behavior can be changed by setting :attr:persistent to False. The only difference between a persistent buffer and a non-persistent buffer is that the latter will not be a part of this module's :attr:state_dict.

Buffers can be accessed as attributes using given names.

Args: name (string): name of the buffer. The buffer can be accessed from this module using the given name tensor (Tensor): buffer to be registered. persistent (bool): whether the buffer is part of this module's :attr:state_dict.

Example::

>>> self.register_buffer('running_mean', torch.zeros(num_features))
View Source
    def register_buffer(self, name: str, tensor: Tensor, persistent: bool = True) -> None:

        r"""Adds a buffer to the module.

        This is typically used to register a buffer that should not to be

        considered a model parameter. For example, BatchNorm's ``running_mean``

        is not a parameter, but is part of the module's state. Buffers, by

        default, are persistent and will be saved alongside parameters. This

        behavior can be changed by setting :attr:`persistent` to ``False``. The

        only difference between a persistent buffer and a non-persistent buffer

        is that the latter will not be a part of this module's

        :attr:`state_dict`.

        Buffers can be accessed as attributes using given names.

        Args:

            name (string): name of the buffer. The buffer can be accessed

                from this module using the given name

            tensor (Tensor): buffer to be registered.

            persistent (bool): whether the buffer is part of this module's

                :attr:`state_dict`.

        Example::

            >>> self.register_buffer('running_mean', torch.zeros(num_features))

        """

        if persistent is False and isinstance(self, torch.jit.ScriptModule):

            raise RuntimeError("ScriptModule does not support non-persistent buffers")

        if '_buffers' not in self.__dict__:

            raise AttributeError(

                "cannot assign buffer before Module.__init__() call")

        elif not isinstance(name, torch._six.string_classes):

            raise TypeError("buffer name should be a string. "

                            "Got {}".format(torch.typename(name)))

        elif '.' in name:

            raise KeyError("buffer name can't contain \".\"")

        elif name == '':

            raise KeyError("buffer name can't be empty string \"\"")

        elif hasattr(self, name) and name not in self._buffers:

            raise KeyError("attribute '{}' already exists".format(name))

        elif tensor is not None and not isinstance(tensor, torch.Tensor):

            raise TypeError("cannot assign '{}' object to buffer '{}' "

                            "(torch Tensor or None required)"

                            .format(torch.typename(tensor), name))

        else:

            self._buffers[name] = tensor

            if persistent:

                self._non_persistent_buffers_set.discard(name)

            else:

                self._non_persistent_buffers_set.add(name)

register_forward_hook

def register_forward_hook(
    self,
    hook: Callable[..., NoneType]
) -> torch.utils.hooks.RemovableHandle

Registers a forward hook on the module.

The hook will be called every time after :func:forward has computed an output. It should have the following signature::

hook(module, input, output) -> None or modified output

The input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the forward. The hook can modify the output. It can modify the input inplace but it will not have effect on forward since this is called after

View Source
    def register_forward_hook(self, hook: Callable[..., None]) -> RemovableHandle:

        r"""Registers a forward hook on the module.

        The hook will be called every time after :func:`forward` has computed an output.

        It should have the following signature::

            hook(module, input, output) -> None or modified output

        The input contains only the positional arguments given to the module.

        Keyword arguments won't be passed to the hooks and only to the ``forward``.

        The hook can modify the output. It can modify the input inplace but

        it will not have effect on forward since this is called after

        :func:`forward` is called.

        Returns:

            :class:`torch.utils.hooks.RemovableHandle`:

                a handle that can be used to remove the added hook by calling

                ``handle.remove()``

        """

        handle = hooks.RemovableHandle(self._forward_hooks)

        self._forward_hooks[handle.id] = hook

        return handle

register_forward_pre_hook

def register_forward_pre_hook(
    self,
    hook: Callable[..., NoneType]
) -> torch.utils.hooks.RemovableHandle

Registers a forward pre-hook on the module.

The hook will be called every time before :func:forward is invoked. It should have the following signature::

hook(module, input) -> None or modified input

The input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the forward. The hook can modify the input. User can either return a tuple or a single modified value in the hook. We will wrap the value into a tuple if a single value is returned(unless that value is already a tuple).

Returns:

Type Description
None :class:torch.utils.hooks.RemovableHandle:
a handle that can be used to remove the added hook by calling
handle.remove()
View Source
    def register_forward_pre_hook(self, hook: Callable[..., None]) -> RemovableHandle:

        r"""Registers a forward pre-hook on the module.

        The hook will be called every time before :func:`forward` is invoked.

        It should have the following signature::

            hook(module, input) -> None or modified input

        The input contains only the positional arguments given to the module.

        Keyword arguments won't be passed to the hooks and only to the ``forward``.

        The hook can modify the input. User can either return a tuple or a

        single modified value in the hook. We will wrap the value into a tuple

        if a single value is returned(unless that value is already a tuple).

        Returns:

            :class:`torch.utils.hooks.RemovableHandle`:

                a handle that can be used to remove the added hook by calling

                ``handle.remove()``

        """

        handle = hooks.RemovableHandle(self._forward_pre_hooks)

        self._forward_pre_hooks[handle.id] = hook

        return handle

register_parameter

def register_parameter(
    self,
    name: str,
    param: torch.nn.parameter.Parameter
) -> None

Adds a parameter to the module.

The parameter can be accessed as an attribute using given name.

Parameters:

Name Type Description Default
name string name of the parameter. The parameter can be accessed
from this module using the given name None
param Parameter parameter to be added to the module. None
View Source
    def register_parameter(self, name: str, param: Parameter) -> None:

        r"""Adds a parameter to the module.

        The parameter can be accessed as an attribute using given name.

        Args:

            name (string): name of the parameter. The parameter can be accessed

                from this module using the given name

            param (Parameter): parameter to be added to the module.

        """

        if '_parameters' not in self.__dict__:

            raise AttributeError(

                "cannot assign parameter before Module.__init__() call")

        elif not isinstance(name, torch._six.string_classes):

            raise TypeError("parameter name should be a string. "

                            "Got {}".format(torch.typename(name)))

        elif '.' in name:

            raise KeyError("parameter name can't contain \".\"")

        elif name == '':

            raise KeyError("parameter name can't be empty string \"\"")

        elif hasattr(self, name) and name not in self._parameters:

            raise KeyError("attribute '{}' already exists".format(name))

        if param is None:

            self._parameters[name] = None

        elif not isinstance(param, Parameter):

            raise TypeError("cannot assign '{}' object to parameter '{}' "

                            "(torch.nn.Parameter or None required)"

                            .format(torch.typename(param), name))

        elif param.grad_fn:

            raise ValueError(

                "Cannot assign non-leaf Tensor to parameter '{0}'. Model "

                "parameters must be created explicitly. To express '{0}' "

                "as a function of another Tensor, compute the value in "

                "the forward() method.".format(name))

        else:

            self._parameters[name] = param

requires_grad_

def requires_grad_(
    self: ~T,
    requires_grad: bool = True
) -> ~T

Change if autograd should record operations on parameters in this

module.

This method sets the parameters' :attr:requires_grad attributes in-place.

This method is helpful for freezing part of the module for finetuning or training parts of a model individually (e.g., GAN training).

Parameters:

Name Type Description Default
requires_grad bool whether autograd should record operations on
parameters in this module. Default: True. None

Returns:

Type Description
Module self
View Source
    def requires_grad_(self: T, requires_grad: bool = True) -> T:

        r"""Change if autograd should record operations on parameters in this

        module.

        This method sets the parameters' :attr:`requires_grad` attributes

        in-place.

        This method is helpful for freezing part of the module for finetuning

        or training parts of a model individually (e.g., GAN training).

        Args:

            requires_grad (bool): whether autograd should record operations on

                                  parameters in this module. Default: ``True``.

        Returns:

            Module: self

        """

        for p in self.parameters():

            p.requires_grad_(requires_grad)

        return self

share_memory

def share_memory(
    self: ~T
) -> ~T
View Source
    def share_memory(self: T) -> T:

        return self._apply(lambda t: t.share_memory_())

state_dict

def state_dict(
    self,
    destination=None,
    prefix='',
    keep_vars=False
)

Returns a dictionary containing a whole state of the module.

Both parameters and persistent buffers (e.g. running averages) are included. Keys are corresponding parameter and buffer names.

Returns:

Type Description
dict a dictionary containing a whole state of the module

Example::

>>> module.state_dict().keys()
['bias', 'weight'] |
View Source
    def state_dict(self, destination=None, prefix='', keep_vars=False):

        r"""Returns a dictionary containing a whole state of the module.

        Both parameters and persistent buffers (e.g. running averages) are

        included. Keys are corresponding parameter and buffer names.

        Returns:

            dict:

                a dictionary containing a whole state of the module

        Example::

            >>> module.state_dict().keys()

            ['bias', 'weight']

        """

        if destination is None:

            destination = OrderedDict()

            destination._metadata = OrderedDict()

        destination._metadata[prefix[:-1]] = local_metadata = dict(version=self._version)

        self._save_to_state_dict(destination, prefix, keep_vars)

        for name, module in self._modules.items():

            if module is not None:

                module.state_dict(destination, prefix + name + '.', keep_vars=keep_vars)

        for hook in self._state_dict_hooks.values():

            hook_result = hook(self, destination, prefix, local_metadata)

            if hook_result is not None:

                destination = hook_result

        return destination

to

def to(
    self,
    *args,
    **kwargs
)

Moves and/or casts the parameters and buffers.

This can be called as

.. function:: to(device=None, dtype=None, non_blocking=False)

.. function:: to(dtype, non_blocking=False)

.. function:: to(tensor, non_blocking=False)

.. function:: to(memory_format=torch.channels_last)

Its signature is similar to :meth:torch.Tensor.to, but only accepts floating point desired :attr:dtype s. In addition, this method will only cast the floating point parameters and buffers to :attr:dtype (if given). The integral parameters and buffers will be moved :attr:device, if that is given, but with dtypes unchanged. When :attr:non_blocking is set, it tries to convert/move asynchronously with respect to the host if possible, e.g., moving CPU Tensors with pinned memory to CUDA devices.

See below for examples.

.. note:: This method modifies the module in-place.

Parameters:

Name Type Description Default
device ( None class:torch.device): the desired device of the parameters
and buffers in this module None
dtype ( None class:torch.dtype): the desired floating point type of
the floating point parameters and buffers in this module None
tensor torch.Tensor Tensor whose dtype and device are the desired
dtype and device for all parameters and buffers in this module None
memory_format ( None class:torch.memory_format): the desired memory
format for 4D parameters and buffers in this module (keyword
only argument) None

Returns:

Type Description
Module self
Example::
>>> linear = nn.Linear(2, 2)
>>> linear.weight
Parameter containing:
tensor([[ 0.1913, -0.3420],
        [-0.5113, -0.2325]])
>>> linear.to(torch.double)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1913, -0.3420],
        [-0.5113, -0.2325]], dtype=torch.float64)
>>> gpu1 = torch.device("cuda:1")
>>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1914, -0.3420],
        [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
>>> cpu = torch.device("cpu")
>>> linear.to(cpu)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1914, -0.3420],
        [-0.5112, -0.2324]], dtype=torch.float16) |
View Source
    def to(self, *args, **kwargs):

        r"""Moves and/or casts the parameters and buffers.

        This can be called as

        .. function:: to(device=None, dtype=None, non_blocking=False)

        .. function:: to(dtype, non_blocking=False)

        .. function:: to(tensor, non_blocking=False)

        .. function:: to(memory_format=torch.channels_last)

        Its signature is similar to :meth:`torch.Tensor.to`, but only accepts

        floating point desired :attr:`dtype` s. In addition, this method will

        only cast the floating point parameters and buffers to :attr:`dtype`

        (if given). The integral parameters and buffers will be moved

        :attr:`device`, if that is given, but with dtypes unchanged. When

        :attr:`non_blocking` is set, it tries to convert/move asynchronously

        with respect to the host if possible, e.g., moving CPU Tensors with

        pinned memory to CUDA devices.

        See below for examples.

        .. note::

            This method modifies the module in-place.

        Args:

            device (:class:`torch.device`): the desired device of the parameters

                and buffers in this module

            dtype (:class:`torch.dtype`): the desired floating point type of

                the floating point parameters and buffers in this module

            tensor (torch.Tensor): Tensor whose dtype and device are the desired

                dtype and device for all parameters and buffers in this module

            memory_format (:class:`torch.memory_format`): the desired memory

                format for 4D parameters and buffers in this module (keyword

                only argument)

        Returns:

            Module: self

        Example::

            >>> linear = nn.Linear(2, 2)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1913, -0.3420],

                    [-0.5113, -0.2325]])

            >>> linear.to(torch.double)

            Linear(in_features=2, out_features=2, bias=True)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1913, -0.3420],

                    [-0.5113, -0.2325]], dtype=torch.float64)

            >>> gpu1 = torch.device("cuda:1")

            >>> linear.to(gpu1, dtype=torch.half, non_blocking=True)

            Linear(in_features=2, out_features=2, bias=True)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1914, -0.3420],

                    [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')

            >>> cpu = torch.device("cpu")

            >>> linear.to(cpu)

            Linear(in_features=2, out_features=2, bias=True)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1914, -0.3420],

                    [-0.5112, -0.2324]], dtype=torch.float16)

        """

        device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)

        if dtype is not None:

            if not dtype.is_floating_point:

                raise TypeError('nn.Module.to only accepts floating point '

                                'dtypes, but got desired dtype={}'.format(dtype))

        def convert(t):

            if convert_to_format is not None and t.dim() == 4:

                return t.to(device, dtype if t.is_floating_point() else None, non_blocking, memory_format=convert_to_format)

            return t.to(device, dtype if t.is_floating_point() else None, non_blocking)

        return self._apply(convert)

train

def train(
    self: ~T,
    mode: bool = True
) -> ~T

Sets the module in training mode.

This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:Dropout, :class:BatchNorm, etc.

Parameters:

Name Type Description Default
mode bool whether to set training mode (True) or evaluation
mode (False). Default: True. None

Returns:

Type Description
Module self
View Source
    def train(self: T, mode: bool = True) -> T:

        r"""Sets the module in training mode.

        This has any effect only on certain modules. See documentations of

        particular modules for details of their behaviors in training/evaluation

        mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,

        etc.

        Args:

            mode (bool): whether to set training mode (``True``) or evaluation

                         mode (``False``). Default: ``True``.

        Returns:

            Module: self

        """

        self.training = mode

        for module in self.children():

            module.train(mode)

        return self

type

def type(
    self: ~T,
    dst_type: Union[torch.dtype, str]
) -> ~T

Casts all parameters and buffers to :attr:dst_type.

Parameters:

Name Type Description Default
dst_type type or string the desired type None

Returns:

Type Description
Module self
View Source
    def type(self: T, dst_type: Union[dtype, str]) -> T:

        r"""Casts all parameters and buffers to :attr:`dst_type`.

        Arguments:

            dst_type (type or string): the desired type

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.type(dst_type))

zero_grad

def zero_grad(
    self
) -> None

Sets gradients of all model parameters to zero.

View Source
    def zero_grad(self) -> None:

        r"""Sets gradients of all model parameters to zero."""

        if getattr(self, '_is_replica', False):

            warnings.warn(

                "Calling .zero_grad() from a module created with nn.DataParallel() has no effect. "

                "The parameters are copied (in a differentiable manner) from the original module. "

                "This means they are not leaf nodes in autograd and so don't accumulate gradients. "

                "If you need gradients in your forward method, consider using autograd.grad instead.")

        for p in self.parameters():

            if p.grad is not None:

                p.grad.detach_()

                p.grad.zero_()

RPN

class RPN(
    out_channels: int = 512,
    rpn_pre_nms_top_n_train: int = 1000,
    rpn_pre_nms_top_n_test: int = 500,
    rpn_post_nms_top_n_train: int = 1000,
    rpn_post_nms_top_n_test: int = 500,
    rpn_nms_thresh: float = 0.7,
    rpn_fg_iou_thresh: float = 0.7,
    rpn_bg_iou_thresh: float = 0.3,
    rpn_batch_size_per_image: int = 256,
    rpn_positive_fraction: float = 0.5
)

Ancestors (in MRO)

  • torch.nn.modules.module.Module

Class variables

T_destination
dump_patches

Methods

add_module

def add_module(
    self,
    name: str,
    module: 'Module'
) -> None

Adds a child module to the current module.

The module can be accessed as an attribute using the given name.

Parameters:

Name Type Description Default
name string name of the child module. The child module can be
accessed from this module using the given name None
module Module child module to be added to the module. None
View Source
    def add_module(self, name: str, module: 'Module') -> None:

        r"""Adds a child module to the current module.

        The module can be accessed as an attribute using the given name.

        Args:

            name (string): name of the child module. The child module can be

                accessed from this module using the given name

            module (Module): child module to be added to the module.

        """

        if not isinstance(module, Module) and module is not None:

            raise TypeError("{} is not a Module subclass".format(

                torch.typename(module)))

        elif not isinstance(name, torch._six.string_classes):

            raise TypeError("module name should be a string. Got {}".format(

                torch.typename(name)))

        elif hasattr(self, name) and name not in self._modules:

            raise KeyError("attribute '{}' already exists".format(name))

        elif '.' in name:

            raise KeyError("module name can't contain \".\"")

        elif name == '':

            raise KeyError("module name can't be empty string \"\"")

        self._modules[name] = module

apply

def apply(
    self: ~T,
    fn: Callable[[ForwardRef('Module')], NoneType]
) -> ~T

Applies fn recursively to every submodule (as returned by .children())

as well as self. Typical use includes initializing the parameters of a model (see also :ref:nn-init-doc).

Parameters:

Name Type Description Default
fn ( None class:Module -> None): function to be applied to each submodule None

Returns:

Type Description
Module self
Example::
>>> @torch.no_grad()
>>> def init_weights(m):
>>>     print(m)
>>>     if type(m) == nn.Linear:
>>>         m.weight.fill_(1.0)
>>>         print(m.weight)
>>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
>>> net.apply(init_weights)
Linear(in_features=2, out_features=2, bias=True)
Parameter containing:
tensor([[ 1.,  1.],
        [ 1.,  1.]])
Linear(in_features=2, out_features=2, bias=True)
Parameter containing:
tensor([[ 1.,  1.],
        [ 1.,  1.]])
Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
)
Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
) |
View Source
    def apply(self: T, fn: Callable[['Module'], None]) -> T:

        r"""Applies ``fn`` recursively to every submodule (as returned by ``.children()``)

        as well as self. Typical use includes initializing the parameters of a model

        (see also :ref:`nn-init-doc`).

        Args:

            fn (:class:`Module` -> None): function to be applied to each submodule

        Returns:

            Module: self

        Example::

            >>> @torch.no_grad()

            >>> def init_weights(m):

            >>>     print(m)

            >>>     if type(m) == nn.Linear:

            >>>         m.weight.fill_(1.0)

            >>>         print(m.weight)

            >>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))

            >>> net.apply(init_weights)

            Linear(in_features=2, out_features=2, bias=True)

            Parameter containing:

            tensor([[ 1.,  1.],

                    [ 1.,  1.]])

            Linear(in_features=2, out_features=2, bias=True)

            Parameter containing:

            tensor([[ 1.,  1.],

                    [ 1.,  1.]])

            Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            )

            Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            )

        """

        for module in self.children():

            module.apply(fn)

        fn(self)

        return self

bfloat16

def bfloat16(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to bfloat16 datatype.

Returns:

Type Description
Module self
View Source
    def bfloat16(self: T) -> T:

        r"""Casts all floating point parameters and buffers to ``bfloat16`` datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.bfloat16() if t.is_floating_point() else t)

buffers

def buffers(
    self,
    recurse: bool = True
) -> Iterator[torch.Tensor]

Returns an iterator over module buffers.

Parameters:

Name Type Description Default
recurse bool if True, then yields buffers of this module
and all submodules. Otherwise, yields only buffers that
are direct members of this module. None

Yields:

Type Description
torch.Tensor module buffer
Example::
>>> for buf in model.buffers():
>>>     print(type(buf), buf.size())
<class 'torch.Tensor'> (20L,)
<class 'torch.Tensor'> (20L, 1L, 5L, 5L) |
View Source
    def buffers(self, recurse: bool = True) -> Iterator[Tensor]:

        r"""Returns an iterator over module buffers.

        Args:

            recurse (bool): if True, then yields buffers of this module

                and all submodules. Otherwise, yields only buffers that

                are direct members of this module.

        Yields:

            torch.Tensor: module buffer

        Example::

            >>> for buf in model.buffers():

            >>>     print(type(buf), buf.size())

            <class 'torch.Tensor'> (20L,)

            <class 'torch.Tensor'> (20L, 1L, 5L, 5L)

        """

        for name, buf in self.named_buffers(recurse=recurse):

            yield buf

children

def children(
    self
) -> Iterator[ForwardRef('Module')]

Returns an iterator over immediate children modules.

Yields:

Type Description
Module a child module
View Source
    def children(self) -> Iterator['Module']:

        r"""Returns an iterator over immediate children modules.

        Yields:

            Module: a child module

        """

        for name, module in self.named_children():

            yield module

cpu

def cpu(
    self: ~T
) -> ~T

Moves all model parameters and buffers to the CPU.

Returns:

Type Description
Module self
View Source
    def cpu(self: T) -> T:

        r"""Moves all model parameters and buffers to the CPU.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.cpu())

cuda

def cuda(
    self: ~T,
    device: Union[int, torch.device, NoneType] = None
) -> ~T

Moves all model parameters and buffers to the GPU.

This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized.

Parameters:

Name Type Description Default
device int if specified, all parameters will be
copied to that device None

Returns:

Type Description
Module self
View Source
    def cuda(self: T, device: Optional[Union[int, device]] = None) -> T:

        r"""Moves all model parameters and buffers to the GPU.

        This also makes associated parameters and buffers different objects. So

        it should be called before constructing optimizer if the module will

        live on GPU while being optimized.

        Arguments:

            device (int, optional): if specified, all parameters will be

                copied to that device

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.cuda(device))

double

def double(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to double datatype.

Returns:

Type Description
Module self
View Source
    def double(self: T) -> T:

        r"""Casts all floating point parameters and buffers to ``double`` datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.double() if t.is_floating_point() else t)

eval

def eval(
    self: ~T
) -> ~T

Sets the module in evaluation mode.

This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:Dropout, :class:BatchNorm, etc.

This is equivalent with :meth:self.train(False) <torch.nn.Module.train>.

Returns:

Type Description
Module self
View Source
    def eval(self: T) -> T:

        r"""Sets the module in evaluation mode.

        This has any effect only on certain modules. See documentations of

        particular modules for details of their behaviors in training/evaluation

        mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,

        etc.

        This is equivalent with :meth:`self.train(False) <torch.nn.Module.train>`.

        Returns:

            Module: self

        """

        return self.train(False)

extra_repr

def extra_repr(
    self
) -> str

Set the extra representation of the module

To print customized extra information, you should reimplement this method in your own modules. Both single-line and multi-line strings are acceptable.

View Source
    def extra_repr(self) -> str:

        r"""Set the extra representation of the module

        To print customized extra information, you should reimplement

        this method in your own modules. Both single-line and multi-line

        strings are acceptable.

        """

        return ''

float

def float(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to float datatype.

Returns:

Type Description
Module self
View Source
    def float(self: T) -> T:

        r"""Casts all floating point parameters and buffers to float datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.float() if t.is_floating_point() else t)

forward

def forward(
    self,
    *args,
    **kwargs
)
View Source
    def forward(self, *args, **kwargs):

        return self.rpn(*args, **kwargs)

half

def half(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to half datatype.

Returns:

Type Description
Module self
View Source
    def half(self: T) -> T:

        r"""Casts all floating point parameters and buffers to ``half`` datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.half() if t.is_floating_point() else t)

load_state_dict

def load_state_dict(
    self,
    state_dict: Dict[str, torch.Tensor],
    strict: bool = True
)

Copies parameters and buffers from :attr:state_dict into

this module and its descendants. If :attr:strict is True, then the keys of :attr:state_dict must exactly match the keys returned by this module's :meth:~torch.nn.Module.state_dict function.

Parameters:

Name Type Description Default
state_dict dict a dict containing parameters and
persistent buffers. None
strict bool whether to strictly enforce that the keys
in :attr:state_dict match the keys returned by this module's
:meth:~torch.nn.Module.state_dict function. Default: True None

Returns:

Type Description
None NamedTuple with missing_keys and unexpected_keys fields:
* missing_keys is a list of str containing the missing keys
* unexpected_keys is a list of str containing the unexpected keys
View Source
    def load_state_dict(self, state_dict: Union[Dict[str, Tensor], Dict[str, Tensor]],

                        strict: bool = True):

        r"""Copies parameters and buffers from :attr:`state_dict` into

        this module and its descendants. If :attr:`strict` is ``True``, then

        the keys of :attr:`state_dict` must exactly match the keys returned

        by this module's :meth:`~torch.nn.Module.state_dict` function.

        Arguments:

            state_dict (dict): a dict containing parameters and

                persistent buffers.

            strict (bool, optional): whether to strictly enforce that the keys

                in :attr:`state_dict` match the keys returned by this module's

                :meth:`~torch.nn.Module.state_dict` function. Default: ``True``

        Returns:

            ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields:

                * **missing_keys** is a list of str containing the missing keys

                * **unexpected_keys** is a list of str containing the unexpected keys

        """

        missing_keys = []

        unexpected_keys = []

        error_msgs = []

        # copy state_dict so _load_from_state_dict can modify it

        metadata = getattr(state_dict, '_metadata', None)

        state_dict = state_dict.copy()

        if metadata is not None:

            state_dict._metadata = metadata

        def load(module, prefix=''):

            local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})

            module._load_from_state_dict(

                state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)

            for name, child in module._modules.items():

                if child is not None:

                    load(child, prefix + name + '.')

        load(self)

        load = None  # break load->load reference cycle

        if strict:

            if len(unexpected_keys) > 0:

                error_msgs.insert(

                    0, 'Unexpected key(s) in state_dict: {}. '.format(

                        ', '.join('"{}"'.format(k) for k in unexpected_keys)))

            if len(missing_keys) > 0:

                error_msgs.insert(

                    0, 'Missing key(s) in state_dict: {}. '.format(

                        ', '.join('"{}"'.format(k) for k in missing_keys)))

        if len(error_msgs) > 0:

            raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(

                               self.__class__.__name__, "\n\t".join(error_msgs)))

        return _IncompatibleKeys(missing_keys, unexpected_keys)

modules

def modules(
    self
) -> Iterator[ForwardRef('Module')]

Returns an iterator over all modules in the network.

Yields:

Type Description
Module a module in the network
Note:
Duplicate modules are returned only once. In the following
example, l will be returned only once.

Example::

>>> l = nn.Linear(2, 2)
>>> net = nn.Sequential(l, l)
>>> for idx, m in enumerate(net.modules()):
        print(idx, '->', m)

0 -> Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
)
1 -> Linear(in_features=2, out_features=2, bias=True) |
View Source
    def modules(self) -> Iterator['Module']:

        r"""Returns an iterator over all modules in the network.

        Yields:

            Module: a module in the network

        Note:

            Duplicate modules are returned only once. In the following

            example, ``l`` will be returned only once.

        Example::

            >>> l = nn.Linear(2, 2)

            >>> net = nn.Sequential(l, l)

            >>> for idx, m in enumerate(net.modules()):

                    print(idx, '->', m)

            0 -> Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            )

            1 -> Linear(in_features=2, out_features=2, bias=True)

        """

        for name, module in self.named_modules():

            yield module

named_buffers

def named_buffers(
    self,
    prefix: str = '',
    recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]

Returns an iterator over module buffers, yielding both the

name of the buffer as well as the buffer itself.

Parameters:

Name Type Description Default
prefix str prefix to prepend to all buffer names. None
recurse bool if True, then yields buffers of this module
and all submodules. Otherwise, yields only buffers that
are direct members of this module. None

Yields:

Type Description
None (string, torch.Tensor): Tuple containing the name and buffer

Example::

>>> for name, buf in self.named_buffers():
>>>    if name in ['running_var']:
>>>        print(buf.size()) |
View Source
    def named_buffers(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, Tensor]]:

        r"""Returns an iterator over module buffers, yielding both the

        name of the buffer as well as the buffer itself.

        Args:

            prefix (str): prefix to prepend to all buffer names.

            recurse (bool): if True, then yields buffers of this module

                and all submodules. Otherwise, yields only buffers that

                are direct members of this module.

        Yields:

            (string, torch.Tensor): Tuple containing the name and buffer

        Example::

            >>> for name, buf in self.named_buffers():

            >>>    if name in ['running_var']:

            >>>        print(buf.size())

        """

        gen = self._named_members(

            lambda module: module._buffers.items(),

            prefix=prefix, recurse=recurse)

        for elem in gen:

            yield elem

named_children

def named_children(
    self
) -> Iterator[Tuple[str, ForwardRef('Module')]]

Returns an iterator over immediate children modules, yielding both

the name of the module as well as the module itself.

Yields:

Type Description
None (string, Module): Tuple containing a name and child module

Example::

>>> for name, module in model.named_children():
>>>     if name in ['conv4', 'conv5']:
>>>         print(module) |
View Source
    def named_children(self) -> Iterator[Tuple[str, 'Module']]:

        r"""Returns an iterator over immediate children modules, yielding both

        the name of the module as well as the module itself.

        Yields:

            (string, Module): Tuple containing a name and child module

        Example::

            >>> for name, module in model.named_children():

            >>>     if name in ['conv4', 'conv5']:

            >>>         print(module)

        """

        memo = set()

        for name, module in self._modules.items():

            if module is not None and module not in memo:

                memo.add(module)

                yield name, module

named_modules

def named_modules(
    self,
    memo: Union[Set[ForwardRef('Module')], NoneType] = None,
    prefix: str = ''
)

Returns an iterator over all modules in the network, yielding

both the name of the module as well as the module itself.

Yields:

Type Description
None (string, Module): Tuple of name and module

Note: Duplicate modules are returned only once. In the following example, l will be returned only once.

Example::

>>> l = nn.Linear(2, 2)
>>> net = nn.Sequential(l, l)
>>> for idx, m in enumerate(net.named_modules()):
        print(idx, '->', m)

0 -> ('', Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
))
1 -> ('0', Linear(in_features=2, out_features=2, bias=True)) |
View Source
    def named_modules(self, memo: Optional[Set['Module']] = None, prefix: str = ''):

        r"""Returns an iterator over all modules in the network, yielding

        both the name of the module as well as the module itself.

        Yields:

            (string, Module): Tuple of name and module

        Note:

            Duplicate modules are returned only once. In the following

            example, ``l`` will be returned only once.

        Example::

            >>> l = nn.Linear(2, 2)

            >>> net = nn.Sequential(l, l)

            >>> for idx, m in enumerate(net.named_modules()):

                    print(idx, '->', m)

            0 -> ('', Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            ))

            1 -> ('0', Linear(in_features=2, out_features=2, bias=True))

        """

        if memo is None:

            memo = set()

        if self not in memo:

            memo.add(self)

            yield prefix, self

            for name, module in self._modules.items():

                if module is None:

                    continue

                submodule_prefix = prefix + ('.' if prefix else '') + name

                for m in module.named_modules(memo, submodule_prefix):

                    yield m

named_parameters

def named_parameters(
    self,
    prefix: str = '',
    recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]

Returns an iterator over module parameters, yielding both the

name of the parameter as well as the parameter itself.

Parameters:

Name Type Description Default
prefix str prefix to prepend to all parameter names. None
recurse bool if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module. None

Yields:

Type Description
None (string, Parameter): Tuple containing the name and parameter

Example::

>>> for name, param in self.named_parameters():
>>>    if name in ['bias']:
>>>        print(param.size()) |
View Source
    def named_parameters(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, Tensor]]:

        r"""Returns an iterator over module parameters, yielding both the

        name of the parameter as well as the parameter itself.

        Args:

            prefix (str): prefix to prepend to all parameter names.

            recurse (bool): if True, then yields parameters of this module

                and all submodules. Otherwise, yields only parameters that

                are direct members of this module.

        Yields:

            (string, Parameter): Tuple containing the name and parameter

        Example::

            >>> for name, param in self.named_parameters():

            >>>    if name in ['bias']:

            >>>        print(param.size())

        """

        gen = self._named_members(

            lambda module: module._parameters.items(),

            prefix=prefix, recurse=recurse)

        for elem in gen:

            yield elem

parameters

def parameters(
    self,
    recurse: bool = True
) -> Iterator[torch.nn.parameter.Parameter]

Returns an iterator over module parameters.

This is typically passed to an optimizer.

Parameters:

Name Type Description Default
recurse bool if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module. None

Yields:

Type Description
Parameter module parameter
Example::
>>> for param in model.parameters():
>>>     print(type(param), param.size())
<class 'torch.Tensor'> (20L,)
<class 'torch.Tensor'> (20L, 1L, 5L, 5L) |
View Source
    def parameters(self, recurse: bool = True) -> Iterator[Parameter]:

        r"""Returns an iterator over module parameters.

        This is typically passed to an optimizer.

        Args:

            recurse (bool): if True, then yields parameters of this module

                and all submodules. Otherwise, yields only parameters that

                are direct members of this module.

        Yields:

            Parameter: module parameter

        Example::

            >>> for param in model.parameters():

            >>>     print(type(param), param.size())

            <class 'torch.Tensor'> (20L,)

            <class 'torch.Tensor'> (20L, 1L, 5L, 5L)

        """

        for name, param in self.named_parameters(recurse=recurse):

            yield param

register_backward_hook

def register_backward_hook(
    self,
    hook: Callable[[ForwardRef('Module'), Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[NoneType, torch.Tensor]]
) -> torch.utils.hooks.RemovableHandle

Registers a backward hook on the module.

.. warning ::

The current implementation will not have the presented behavior
for complex :class:`Module` that perform many operations.
In some failure cases, :attr:`grad_input` and :attr:`grad_output` will only
contain the gradients for a subset of the inputs and outputs.
For such :class:`Module`, you should use :func:`torch.Tensor.register_hook`
directly on a specific input or output to get the required gradients.

The hook will be called every time the gradients with respect to module inputs are computed. The hook should have the following signature::

hook(module, grad_input, grad_output) -> Tensor or None

The :attr:grad_input and :attr:grad_output may be tuples if the module has multiple inputs or outputs. The hook should not modify its arguments, but it can optionally return a new gradient with respect to input that will be used in place of :attr:grad_input in subsequent computations. :attr:grad_input will only correspond to the inputs given as positional arguments.

Returns:

Type Description
None :class:torch.utils.hooks.RemovableHandle:
a handle that can be used to remove the added hook by calling
handle.remove()
View Source
    def register_backward_hook(

        self, hook: Callable[['Module', _grad_t, _grad_t], Union[None, Tensor]]

    ) -> RemovableHandle:

        r"""Registers a backward hook on the module.

        .. warning ::

            The current implementation will not have the presented behavior

            for complex :class:`Module` that perform many operations.

            In some failure cases, :attr:`grad_input` and :attr:`grad_output` will only

            contain the gradients for a subset of the inputs and outputs.

            For such :class:`Module`, you should use :func:`torch.Tensor.register_hook`

            directly on a specific input or output to get the required gradients.

        The hook will be called every time the gradients with respect to module

        inputs are computed. The hook should have the following signature::

            hook(module, grad_input, grad_output) -> Tensor or None

        The :attr:`grad_input` and :attr:`grad_output` may be tuples if the

        module has multiple inputs or outputs. The hook should not modify its

        arguments, but it can optionally return a new gradient with respect to

        input that will be used in place of :attr:`grad_input` in subsequent

        computations. :attr:`grad_input` will only correspond to the inputs given

        as positional arguments.

        Returns:

            :class:`torch.utils.hooks.RemovableHandle`:

                a handle that can be used to remove the added hook by calling

                ``handle.remove()``

        """

        handle = hooks.RemovableHandle(self._backward_hooks)

        self._backward_hooks[handle.id] = hook

        return handle

register_buffer

def register_buffer(
    self,
    name: str,
    tensor: torch.Tensor,
    persistent: bool = True
) -> None

Adds a buffer to the module.

This is typically used to register a buffer that should not to be considered a model parameter. For example, BatchNorm's running_mean is not a parameter, but is part of the module's state. Buffers, by default, are persistent and will be saved alongside parameters. This behavior can be changed by setting :attr:persistent to False. The only difference between a persistent buffer and a non-persistent buffer is that the latter will not be a part of this module's :attr:state_dict.

Buffers can be accessed as attributes using given names.

Args: name (string): name of the buffer. The buffer can be accessed from this module using the given name tensor (Tensor): buffer to be registered. persistent (bool): whether the buffer is part of this module's :attr:state_dict.

Example::

>>> self.register_buffer('running_mean', torch.zeros(num_features))
View Source
    def register_buffer(self, name: str, tensor: Tensor, persistent: bool = True) -> None:

        r"""Adds a buffer to the module.

        This is typically used to register a buffer that should not to be

        considered a model parameter. For example, BatchNorm's ``running_mean``

        is not a parameter, but is part of the module's state. Buffers, by

        default, are persistent and will be saved alongside parameters. This

        behavior can be changed by setting :attr:`persistent` to ``False``. The

        only difference between a persistent buffer and a non-persistent buffer

        is that the latter will not be a part of this module's

        :attr:`state_dict`.

        Buffers can be accessed as attributes using given names.

        Args:

            name (string): name of the buffer. The buffer can be accessed

                from this module using the given name

            tensor (Tensor): buffer to be registered.

            persistent (bool): whether the buffer is part of this module's

                :attr:`state_dict`.

        Example::

            >>> self.register_buffer('running_mean', torch.zeros(num_features))

        """

        if persistent is False and isinstance(self, torch.jit.ScriptModule):

            raise RuntimeError("ScriptModule does not support non-persistent buffers")

        if '_buffers' not in self.__dict__:

            raise AttributeError(

                "cannot assign buffer before Module.__init__() call")

        elif not isinstance(name, torch._six.string_classes):

            raise TypeError("buffer name should be a string. "

                            "Got {}".format(torch.typename(name)))

        elif '.' in name:

            raise KeyError("buffer name can't contain \".\"")

        elif name == '':

            raise KeyError("buffer name can't be empty string \"\"")

        elif hasattr(self, name) and name not in self._buffers:

            raise KeyError("attribute '{}' already exists".format(name))

        elif tensor is not None and not isinstance(tensor, torch.Tensor):

            raise TypeError("cannot assign '{}' object to buffer '{}' "

                            "(torch Tensor or None required)"

                            .format(torch.typename(tensor), name))

        else:

            self._buffers[name] = tensor

            if persistent:

                self._non_persistent_buffers_set.discard(name)

            else:

                self._non_persistent_buffers_set.add(name)

register_forward_hook

def register_forward_hook(
    self,
    hook: Callable[..., NoneType]
) -> torch.utils.hooks.RemovableHandle

Registers a forward hook on the module.

The hook will be called every time after :func:forward has computed an output. It should have the following signature::

hook(module, input, output) -> None or modified output

The input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the forward. The hook can modify the output. It can modify the input inplace but it will not have effect on forward since this is called after

View Source
    def register_forward_hook(self, hook: Callable[..., None]) -> RemovableHandle:

        r"""Registers a forward hook on the module.

        The hook will be called every time after :func:`forward` has computed an output.

        It should have the following signature::

            hook(module, input, output) -> None or modified output

        The input contains only the positional arguments given to the module.

        Keyword arguments won't be passed to the hooks and only to the ``forward``.

        The hook can modify the output. It can modify the input inplace but

        it will not have effect on forward since this is called after

        :func:`forward` is called.

        Returns:

            :class:`torch.utils.hooks.RemovableHandle`:

                a handle that can be used to remove the added hook by calling

                ``handle.remove()``

        """

        handle = hooks.RemovableHandle(self._forward_hooks)

        self._forward_hooks[handle.id] = hook

        return handle

register_forward_pre_hook

def register_forward_pre_hook(
    self,
    hook: Callable[..., NoneType]
) -> torch.utils.hooks.RemovableHandle

Registers a forward pre-hook on the module.

The hook will be called every time before :func:forward is invoked. It should have the following signature::

hook(module, input) -> None or modified input

The input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the forward. The hook can modify the input. User can either return a tuple or a single modified value in the hook. We will wrap the value into a tuple if a single value is returned(unless that value is already a tuple).

Returns:

Type Description
None :class:torch.utils.hooks.RemovableHandle:
a handle that can be used to remove the added hook by calling
handle.remove()
View Source
    def register_forward_pre_hook(self, hook: Callable[..., None]) -> RemovableHandle:

        r"""Registers a forward pre-hook on the module.

        The hook will be called every time before :func:`forward` is invoked.

        It should have the following signature::

            hook(module, input) -> None or modified input

        The input contains only the positional arguments given to the module.

        Keyword arguments won't be passed to the hooks and only to the ``forward``.

        The hook can modify the input. User can either return a tuple or a

        single modified value in the hook. We will wrap the value into a tuple

        if a single value is returned(unless that value is already a tuple).

        Returns:

            :class:`torch.utils.hooks.RemovableHandle`:

                a handle that can be used to remove the added hook by calling

                ``handle.remove()``

        """

        handle = hooks.RemovableHandle(self._forward_pre_hooks)

        self._forward_pre_hooks[handle.id] = hook

        return handle

register_parameter

def register_parameter(
    self,
    name: str,
    param: torch.nn.parameter.Parameter
) -> None

Adds a parameter to the module.

The parameter can be accessed as an attribute using given name.

Parameters:

Name Type Description Default
name string name of the parameter. The parameter can be accessed
from this module using the given name None
param Parameter parameter to be added to the module. None
View Source
    def register_parameter(self, name: str, param: Parameter) -> None:

        r"""Adds a parameter to the module.

        The parameter can be accessed as an attribute using given name.

        Args:

            name (string): name of the parameter. The parameter can be accessed

                from this module using the given name

            param (Parameter): parameter to be added to the module.

        """

        if '_parameters' not in self.__dict__:

            raise AttributeError(

                "cannot assign parameter before Module.__init__() call")

        elif not isinstance(name, torch._six.string_classes):

            raise TypeError("parameter name should be a string. "

                            "Got {}".format(torch.typename(name)))

        elif '.' in name:

            raise KeyError("parameter name can't contain \".\"")

        elif name == '':

            raise KeyError("parameter name can't be empty string \"\"")

        elif hasattr(self, name) and name not in self._parameters:

            raise KeyError("attribute '{}' already exists".format(name))

        if param is None:

            self._parameters[name] = None

        elif not isinstance(param, Parameter):

            raise TypeError("cannot assign '{}' object to parameter '{}' "

                            "(torch.nn.Parameter or None required)"

                            .format(torch.typename(param), name))

        elif param.grad_fn:

            raise ValueError(

                "Cannot assign non-leaf Tensor to parameter '{0}'. Model "

                "parameters must be created explicitly. To express '{0}' "

                "as a function of another Tensor, compute the value in "

                "the forward() method.".format(name))

        else:

            self._parameters[name] = param

requires_grad_

def requires_grad_(
    self: ~T,
    requires_grad: bool = True
) -> ~T

Change if autograd should record operations on parameters in this

module.

This method sets the parameters' :attr:requires_grad attributes in-place.

This method is helpful for freezing part of the module for finetuning or training parts of a model individually (e.g., GAN training).

Parameters:

Name Type Description Default
requires_grad bool whether autograd should record operations on
parameters in this module. Default: True. None

Returns:

Type Description
Module self
View Source
    def requires_grad_(self: T, requires_grad: bool = True) -> T:

        r"""Change if autograd should record operations on parameters in this

        module.

        This method sets the parameters' :attr:`requires_grad` attributes

        in-place.

        This method is helpful for freezing part of the module for finetuning

        or training parts of a model individually (e.g., GAN training).

        Args:

            requires_grad (bool): whether autograd should record operations on

                                  parameters in this module. Default: ``True``.

        Returns:

            Module: self

        """

        for p in self.parameters():

            p.requires_grad_(requires_grad)

        return self

share_memory

def share_memory(
    self: ~T
) -> ~T
View Source
    def share_memory(self: T) -> T:

        return self._apply(lambda t: t.share_memory_())

state_dict

def state_dict(
    self,
    destination=None,
    prefix='',
    keep_vars=False
)

Returns a dictionary containing a whole state of the module.

Both parameters and persistent buffers (e.g. running averages) are included. Keys are corresponding parameter and buffer names.

Returns:

Type Description
dict a dictionary containing a whole state of the module

Example::

>>> module.state_dict().keys()
['bias', 'weight'] |
View Source
    def state_dict(self, destination=None, prefix='', keep_vars=False):

        r"""Returns a dictionary containing a whole state of the module.

        Both parameters and persistent buffers (e.g. running averages) are

        included. Keys are corresponding parameter and buffer names.

        Returns:

            dict:

                a dictionary containing a whole state of the module

        Example::

            >>> module.state_dict().keys()

            ['bias', 'weight']

        """

        if destination is None:

            destination = OrderedDict()

            destination._metadata = OrderedDict()

        destination._metadata[prefix[:-1]] = local_metadata = dict(version=self._version)

        self._save_to_state_dict(destination, prefix, keep_vars)

        for name, module in self._modules.items():

            if module is not None:

                module.state_dict(destination, prefix + name + '.', keep_vars=keep_vars)

        for hook in self._state_dict_hooks.values():

            hook_result = hook(self, destination, prefix, local_metadata)

            if hook_result is not None:

                destination = hook_result

        return destination

to

def to(
    self,
    *args,
    **kwargs
)

Moves and/or casts the parameters and buffers.

This can be called as

.. function:: to(device=None, dtype=None, non_blocking=False)

.. function:: to(dtype, non_blocking=False)

.. function:: to(tensor, non_blocking=False)

.. function:: to(memory_format=torch.channels_last)

Its signature is similar to :meth:torch.Tensor.to, but only accepts floating point desired :attr:dtype s. In addition, this method will only cast the floating point parameters and buffers to :attr:dtype (if given). The integral parameters and buffers will be moved :attr:device, if that is given, but with dtypes unchanged. When :attr:non_blocking is set, it tries to convert/move asynchronously with respect to the host if possible, e.g., moving CPU Tensors with pinned memory to CUDA devices.

See below for examples.

.. note:: This method modifies the module in-place.

Parameters:

Name Type Description Default
device ( None class:torch.device): the desired device of the parameters
and buffers in this module None
dtype ( None class:torch.dtype): the desired floating point type of
the floating point parameters and buffers in this module None
tensor torch.Tensor Tensor whose dtype and device are the desired
dtype and device for all parameters and buffers in this module None
memory_format ( None class:torch.memory_format): the desired memory
format for 4D parameters and buffers in this module (keyword
only argument) None

Returns:

Type Description
Module self
Example::
>>> linear = nn.Linear(2, 2)
>>> linear.weight
Parameter containing:
tensor([[ 0.1913, -0.3420],
        [-0.5113, -0.2325]])
>>> linear.to(torch.double)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1913, -0.3420],
        [-0.5113, -0.2325]], dtype=torch.float64)
>>> gpu1 = torch.device("cuda:1")
>>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1914, -0.3420],
        [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
>>> cpu = torch.device("cpu")
>>> linear.to(cpu)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1914, -0.3420],
        [-0.5112, -0.2324]], dtype=torch.float16) |
View Source
    def to(self, *args, **kwargs):

        r"""Moves and/or casts the parameters and buffers.

        This can be called as

        .. function:: to(device=None, dtype=None, non_blocking=False)

        .. function:: to(dtype, non_blocking=False)

        .. function:: to(tensor, non_blocking=False)

        .. function:: to(memory_format=torch.channels_last)

        Its signature is similar to :meth:`torch.Tensor.to`, but only accepts

        floating point desired :attr:`dtype` s. In addition, this method will

        only cast the floating point parameters and buffers to :attr:`dtype`

        (if given). The integral parameters and buffers will be moved

        :attr:`device`, if that is given, but with dtypes unchanged. When

        :attr:`non_blocking` is set, it tries to convert/move asynchronously

        with respect to the host if possible, e.g., moving CPU Tensors with

        pinned memory to CUDA devices.

        See below for examples.

        .. note::

            This method modifies the module in-place.

        Args:

            device (:class:`torch.device`): the desired device of the parameters

                and buffers in this module

            dtype (:class:`torch.dtype`): the desired floating point type of

                the floating point parameters and buffers in this module

            tensor (torch.Tensor): Tensor whose dtype and device are the desired

                dtype and device for all parameters and buffers in this module

            memory_format (:class:`torch.memory_format`): the desired memory

                format for 4D parameters and buffers in this module (keyword

                only argument)

        Returns:

            Module: self

        Example::

            >>> linear = nn.Linear(2, 2)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1913, -0.3420],

                    [-0.5113, -0.2325]])

            >>> linear.to(torch.double)

            Linear(in_features=2, out_features=2, bias=True)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1913, -0.3420],

                    [-0.5113, -0.2325]], dtype=torch.float64)

            >>> gpu1 = torch.device("cuda:1")

            >>> linear.to(gpu1, dtype=torch.half, non_blocking=True)

            Linear(in_features=2, out_features=2, bias=True)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1914, -0.3420],

                    [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')

            >>> cpu = torch.device("cpu")

            >>> linear.to(cpu)

            Linear(in_features=2, out_features=2, bias=True)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1914, -0.3420],

                    [-0.5112, -0.2324]], dtype=torch.float16)

        """

        device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)

        if dtype is not None:

            if not dtype.is_floating_point:

                raise TypeError('nn.Module.to only accepts floating point '

                                'dtypes, but got desired dtype={}'.format(dtype))

        def convert(t):

            if convert_to_format is not None and t.dim() == 4:

                return t.to(device, dtype if t.is_floating_point() else None, non_blocking, memory_format=convert_to_format)

            return t.to(device, dtype if t.is_floating_point() else None, non_blocking)

        return self._apply(convert)

train

def train(
    self: ~T,
    mode: bool = True
) -> ~T

Sets the module in training mode.

This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:Dropout, :class:BatchNorm, etc.

Parameters:

Name Type Description Default
mode bool whether to set training mode (True) or evaluation
mode (False). Default: True. None

Returns:

Type Description
Module self
View Source
    def train(self: T, mode: bool = True) -> T:

        r"""Sets the module in training mode.

        This has any effect only on certain modules. See documentations of

        particular modules for details of their behaviors in training/evaluation

        mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,

        etc.

        Args:

            mode (bool): whether to set training mode (``True``) or evaluation

                         mode (``False``). Default: ``True``.

        Returns:

            Module: self

        """

        self.training = mode

        for module in self.children():

            module.train(mode)

        return self

type

def type(
    self: ~T,
    dst_type: Union[torch.dtype, str]
) -> ~T

Casts all parameters and buffers to :attr:dst_type.

Parameters:

Name Type Description Default
dst_type type or string the desired type None

Returns:

Type Description
Module self
View Source
    def type(self: T, dst_type: Union[dtype, str]) -> T:

        r"""Casts all parameters and buffers to :attr:`dst_type`.

        Arguments:

            dst_type (type or string): the desired type

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.type(dst_type))

zero_grad

def zero_grad(
    self
) -> None

Sets gradients of all model parameters to zero.

View Source
    def zero_grad(self) -> None:

        r"""Sets gradients of all model parameters to zero."""

        if getattr(self, '_is_replica', False):

            warnings.warn(

                "Calling .zero_grad() from a module created with nn.DataParallel() has no effect. "

                "The parameters are copied (in a differentiable manner) from the original module. "

                "This means they are not leaf nodes in autograd and so don't accumulate gradients. "

                "If you need gradients in your forward method, consider using autograd.grad instead.")

        for p in self.parameters():

            if p.grad is not None:

                p.grad.detach_()

                p.grad.zero_()

RoI

class RoI(
    num_classes: int,
    box_fg_iou_thresh=0.5,
    box_bg_iou_thresh=0.5,
    box_batch_size_per_image=512,
    box_positive_fraction=0.25,
    bbox_reg_weights=None,
    box_score_thresh=0.05,
    box_nms_thresh=0.5,
    box_detections_per_img=100
)

Ancestors (in MRO)

  • torch.nn.modules.module.Module

Class variables

T_destination
dump_patches

Methods

add_module

def add_module(
    self,
    name: str,
    module: 'Module'
) -> None

Adds a child module to the current module.

The module can be accessed as an attribute using the given name.

Parameters:

Name Type Description Default
name string name of the child module. The child module can be
accessed from this module using the given name None
module Module child module to be added to the module. None
View Source
    def add_module(self, name: str, module: 'Module') -> None:

        r"""Adds a child module to the current module.

        The module can be accessed as an attribute using the given name.

        Args:

            name (string): name of the child module. The child module can be

                accessed from this module using the given name

            module (Module): child module to be added to the module.

        """

        if not isinstance(module, Module) and module is not None:

            raise TypeError("{} is not a Module subclass".format(

                torch.typename(module)))

        elif not isinstance(name, torch._six.string_classes):

            raise TypeError("module name should be a string. Got {}".format(

                torch.typename(name)))

        elif hasattr(self, name) and name not in self._modules:

            raise KeyError("attribute '{}' already exists".format(name))

        elif '.' in name:

            raise KeyError("module name can't contain \".\"")

        elif name == '':

            raise KeyError("module name can't be empty string \"\"")

        self._modules[name] = module

apply

def apply(
    self: ~T,
    fn: Callable[[ForwardRef('Module')], NoneType]
) -> ~T

Applies fn recursively to every submodule (as returned by .children())

as well as self. Typical use includes initializing the parameters of a model (see also :ref:nn-init-doc).

Parameters:

Name Type Description Default
fn ( None class:Module -> None): function to be applied to each submodule None

Returns:

Type Description
Module self
Example::
>>> @torch.no_grad()
>>> def init_weights(m):
>>>     print(m)
>>>     if type(m) == nn.Linear:
>>>         m.weight.fill_(1.0)
>>>         print(m.weight)
>>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
>>> net.apply(init_weights)
Linear(in_features=2, out_features=2, bias=True)
Parameter containing:
tensor([[ 1.,  1.],
        [ 1.,  1.]])
Linear(in_features=2, out_features=2, bias=True)
Parameter containing:
tensor([[ 1.,  1.],
        [ 1.,  1.]])
Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
)
Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
) |
View Source
    def apply(self: T, fn: Callable[['Module'], None]) -> T:

        r"""Applies ``fn`` recursively to every submodule (as returned by ``.children()``)

        as well as self. Typical use includes initializing the parameters of a model

        (see also :ref:`nn-init-doc`).

        Args:

            fn (:class:`Module` -> None): function to be applied to each submodule

        Returns:

            Module: self

        Example::

            >>> @torch.no_grad()

            >>> def init_weights(m):

            >>>     print(m)

            >>>     if type(m) == nn.Linear:

            >>>         m.weight.fill_(1.0)

            >>>         print(m.weight)

            >>> net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))

            >>> net.apply(init_weights)

            Linear(in_features=2, out_features=2, bias=True)

            Parameter containing:

            tensor([[ 1.,  1.],

                    [ 1.,  1.]])

            Linear(in_features=2, out_features=2, bias=True)

            Parameter containing:

            tensor([[ 1.,  1.],

                    [ 1.,  1.]])

            Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            )

            Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            )

        """

        for module in self.children():

            module.apply(fn)

        fn(self)

        return self

bfloat16

def bfloat16(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to bfloat16 datatype.

Returns:

Type Description
Module self
View Source
    def bfloat16(self: T) -> T:

        r"""Casts all floating point parameters and buffers to ``bfloat16`` datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.bfloat16() if t.is_floating_point() else t)

buffers

def buffers(
    self,
    recurse: bool = True
) -> Iterator[torch.Tensor]

Returns an iterator over module buffers.

Parameters:

Name Type Description Default
recurse bool if True, then yields buffers of this module
and all submodules. Otherwise, yields only buffers that
are direct members of this module. None

Yields:

Type Description
torch.Tensor module buffer
Example::
>>> for buf in model.buffers():
>>>     print(type(buf), buf.size())
<class 'torch.Tensor'> (20L,)
<class 'torch.Tensor'> (20L, 1L, 5L, 5L) |
View Source
    def buffers(self, recurse: bool = True) -> Iterator[Tensor]:

        r"""Returns an iterator over module buffers.

        Args:

            recurse (bool): if True, then yields buffers of this module

                and all submodules. Otherwise, yields only buffers that

                are direct members of this module.

        Yields:

            torch.Tensor: module buffer

        Example::

            >>> for buf in model.buffers():

            >>>     print(type(buf), buf.size())

            <class 'torch.Tensor'> (20L,)

            <class 'torch.Tensor'> (20L, 1L, 5L, 5L)

        """

        for name, buf in self.named_buffers(recurse=recurse):

            yield buf

children

def children(
    self
) -> Iterator[ForwardRef('Module')]

Returns an iterator over immediate children modules.

Yields:

Type Description
Module a child module
View Source
    def children(self) -> Iterator['Module']:

        r"""Returns an iterator over immediate children modules.

        Yields:

            Module: a child module

        """

        for name, module in self.named_children():

            yield module

cpu

def cpu(
    self: ~T
) -> ~T

Moves all model parameters and buffers to the CPU.

Returns:

Type Description
Module self
View Source
    def cpu(self: T) -> T:

        r"""Moves all model parameters and buffers to the CPU.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.cpu())

cuda

def cuda(
    self: ~T,
    device: Union[int, torch.device, NoneType] = None
) -> ~T

Moves all model parameters and buffers to the GPU.

This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized.

Parameters:

Name Type Description Default
device int if specified, all parameters will be
copied to that device None

Returns:

Type Description
Module self
View Source
    def cuda(self: T, device: Optional[Union[int, device]] = None) -> T:

        r"""Moves all model parameters and buffers to the GPU.

        This also makes associated parameters and buffers different objects. So

        it should be called before constructing optimizer if the module will

        live on GPU while being optimized.

        Arguments:

            device (int, optional): if specified, all parameters will be

                copied to that device

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.cuda(device))

double

def double(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to double datatype.

Returns:

Type Description
Module self
View Source
    def double(self: T) -> T:

        r"""Casts all floating point parameters and buffers to ``double`` datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.double() if t.is_floating_point() else t)

eval

def eval(
    self: ~T
) -> ~T

Sets the module in evaluation mode.

This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:Dropout, :class:BatchNorm, etc.

This is equivalent with :meth:self.train(False) <torch.nn.Module.train>.

Returns:

Type Description
Module self
View Source
    def eval(self: T) -> T:

        r"""Sets the module in evaluation mode.

        This has any effect only on certain modules. See documentations of

        particular modules for details of their behaviors in training/evaluation

        mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,

        etc.

        This is equivalent with :meth:`self.train(False) <torch.nn.Module.train>`.

        Returns:

            Module: self

        """

        return self.train(False)

extra_repr

def extra_repr(
    self
) -> str

Set the extra representation of the module

To print customized extra information, you should reimplement this method in your own modules. Both single-line and multi-line strings are acceptable.

View Source
    def extra_repr(self) -> str:

        r"""Set the extra representation of the module

        To print customized extra information, you should reimplement

        this method in your own modules. Both single-line and multi-line

        strings are acceptable.

        """

        return ''

float

def float(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to float datatype.

Returns:

Type Description
Module self
View Source
    def float(self: T) -> T:

        r"""Casts all floating point parameters and buffers to float datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.float() if t.is_floating_point() else t)

forward

def forward(
    self,
    *args,
    **kwargs
)
View Source
    def forward(self, *args, **kwargs):

        return self.roi_head(*args, **kwargs)

half

def half(
    self: ~T
) -> ~T

Casts all floating point parameters and buffers to half datatype.

Returns:

Type Description
Module self
View Source
    def half(self: T) -> T:

        r"""Casts all floating point parameters and buffers to ``half`` datatype.

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.half() if t.is_floating_point() else t)

load_state_dict

def load_state_dict(
    self,
    state_dict: Dict[str, torch.Tensor],
    strict: bool = True
)

Copies parameters and buffers from :attr:state_dict into

this module and its descendants. If :attr:strict is True, then the keys of :attr:state_dict must exactly match the keys returned by this module's :meth:~torch.nn.Module.state_dict function.

Parameters:

Name Type Description Default
state_dict dict a dict containing parameters and
persistent buffers. None
strict bool whether to strictly enforce that the keys
in :attr:state_dict match the keys returned by this module's
:meth:~torch.nn.Module.state_dict function. Default: True None

Returns:

Type Description
None NamedTuple with missing_keys and unexpected_keys fields:
* missing_keys is a list of str containing the missing keys
* unexpected_keys is a list of str containing the unexpected keys
View Source
    def load_state_dict(self, state_dict: Union[Dict[str, Tensor], Dict[str, Tensor]],

                        strict: bool = True):

        r"""Copies parameters and buffers from :attr:`state_dict` into

        this module and its descendants. If :attr:`strict` is ``True``, then

        the keys of :attr:`state_dict` must exactly match the keys returned

        by this module's :meth:`~torch.nn.Module.state_dict` function.

        Arguments:

            state_dict (dict): a dict containing parameters and

                persistent buffers.

            strict (bool, optional): whether to strictly enforce that the keys

                in :attr:`state_dict` match the keys returned by this module's

                :meth:`~torch.nn.Module.state_dict` function. Default: ``True``

        Returns:

            ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields:

                * **missing_keys** is a list of str containing the missing keys

                * **unexpected_keys** is a list of str containing the unexpected keys

        """

        missing_keys = []

        unexpected_keys = []

        error_msgs = []

        # copy state_dict so _load_from_state_dict can modify it

        metadata = getattr(state_dict, '_metadata', None)

        state_dict = state_dict.copy()

        if metadata is not None:

            state_dict._metadata = metadata

        def load(module, prefix=''):

            local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})

            module._load_from_state_dict(

                state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)

            for name, child in module._modules.items():

                if child is not None:

                    load(child, prefix + name + '.')

        load(self)

        load = None  # break load->load reference cycle

        if strict:

            if len(unexpected_keys) > 0:

                error_msgs.insert(

                    0, 'Unexpected key(s) in state_dict: {}. '.format(

                        ', '.join('"{}"'.format(k) for k in unexpected_keys)))

            if len(missing_keys) > 0:

                error_msgs.insert(

                    0, 'Missing key(s) in state_dict: {}. '.format(

                        ', '.join('"{}"'.format(k) for k in missing_keys)))

        if len(error_msgs) > 0:

            raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(

                               self.__class__.__name__, "\n\t".join(error_msgs)))

        return _IncompatibleKeys(missing_keys, unexpected_keys)

modules

def modules(
    self
) -> Iterator[ForwardRef('Module')]

Returns an iterator over all modules in the network.

Yields:

Type Description
Module a module in the network
Note:
Duplicate modules are returned only once. In the following
example, l will be returned only once.

Example::

>>> l = nn.Linear(2, 2)
>>> net = nn.Sequential(l, l)
>>> for idx, m in enumerate(net.modules()):
        print(idx, '->', m)

0 -> Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
)
1 -> Linear(in_features=2, out_features=2, bias=True) |
View Source
    def modules(self) -> Iterator['Module']:

        r"""Returns an iterator over all modules in the network.

        Yields:

            Module: a module in the network

        Note:

            Duplicate modules are returned only once. In the following

            example, ``l`` will be returned only once.

        Example::

            >>> l = nn.Linear(2, 2)

            >>> net = nn.Sequential(l, l)

            >>> for idx, m in enumerate(net.modules()):

                    print(idx, '->', m)

            0 -> Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            )

            1 -> Linear(in_features=2, out_features=2, bias=True)

        """

        for name, module in self.named_modules():

            yield module

named_buffers

def named_buffers(
    self,
    prefix: str = '',
    recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]

Returns an iterator over module buffers, yielding both the

name of the buffer as well as the buffer itself.

Parameters:

Name Type Description Default
prefix str prefix to prepend to all buffer names. None
recurse bool if True, then yields buffers of this module
and all submodules. Otherwise, yields only buffers that
are direct members of this module. None

Yields:

Type Description
None (string, torch.Tensor): Tuple containing the name and buffer

Example::

>>> for name, buf in self.named_buffers():
>>>    if name in ['running_var']:
>>>        print(buf.size()) |
View Source
    def named_buffers(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, Tensor]]:

        r"""Returns an iterator over module buffers, yielding both the

        name of the buffer as well as the buffer itself.

        Args:

            prefix (str): prefix to prepend to all buffer names.

            recurse (bool): if True, then yields buffers of this module

                and all submodules. Otherwise, yields only buffers that

                are direct members of this module.

        Yields:

            (string, torch.Tensor): Tuple containing the name and buffer

        Example::

            >>> for name, buf in self.named_buffers():

            >>>    if name in ['running_var']:

            >>>        print(buf.size())

        """

        gen = self._named_members(

            lambda module: module._buffers.items(),

            prefix=prefix, recurse=recurse)

        for elem in gen:

            yield elem

named_children

def named_children(
    self
) -> Iterator[Tuple[str, ForwardRef('Module')]]

Returns an iterator over immediate children modules, yielding both

the name of the module as well as the module itself.

Yields:

Type Description
None (string, Module): Tuple containing a name and child module

Example::

>>> for name, module in model.named_children():
>>>     if name in ['conv4', 'conv5']:
>>>         print(module) |
View Source
    def named_children(self) -> Iterator[Tuple[str, 'Module']]:

        r"""Returns an iterator over immediate children modules, yielding both

        the name of the module as well as the module itself.

        Yields:

            (string, Module): Tuple containing a name and child module

        Example::

            >>> for name, module in model.named_children():

            >>>     if name in ['conv4', 'conv5']:

            >>>         print(module)

        """

        memo = set()

        for name, module in self._modules.items():

            if module is not None and module not in memo:

                memo.add(module)

                yield name, module

named_modules

def named_modules(
    self,
    memo: Union[Set[ForwardRef('Module')], NoneType] = None,
    prefix: str = ''
)

Returns an iterator over all modules in the network, yielding

both the name of the module as well as the module itself.

Yields:

Type Description
None (string, Module): Tuple of name and module

Note: Duplicate modules are returned only once. In the following example, l will be returned only once.

Example::

>>> l = nn.Linear(2, 2)
>>> net = nn.Sequential(l, l)
>>> for idx, m in enumerate(net.named_modules()):
        print(idx, '->', m)

0 -> ('', Sequential(
  (0): Linear(in_features=2, out_features=2, bias=True)
  (1): Linear(in_features=2, out_features=2, bias=True)
))
1 -> ('0', Linear(in_features=2, out_features=2, bias=True)) |
View Source
    def named_modules(self, memo: Optional[Set['Module']] = None, prefix: str = ''):

        r"""Returns an iterator over all modules in the network, yielding

        both the name of the module as well as the module itself.

        Yields:

            (string, Module): Tuple of name and module

        Note:

            Duplicate modules are returned only once. In the following

            example, ``l`` will be returned only once.

        Example::

            >>> l = nn.Linear(2, 2)

            >>> net = nn.Sequential(l, l)

            >>> for idx, m in enumerate(net.named_modules()):

                    print(idx, '->', m)

            0 -> ('', Sequential(

              (0): Linear(in_features=2, out_features=2, bias=True)

              (1): Linear(in_features=2, out_features=2, bias=True)

            ))

            1 -> ('0', Linear(in_features=2, out_features=2, bias=True))

        """

        if memo is None:

            memo = set()

        if self not in memo:

            memo.add(self)

            yield prefix, self

            for name, module in self._modules.items():

                if module is None:

                    continue

                submodule_prefix = prefix + ('.' if prefix else '') + name

                for m in module.named_modules(memo, submodule_prefix):

                    yield m

named_parameters

def named_parameters(
    self,
    prefix: str = '',
    recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]

Returns an iterator over module parameters, yielding both the

name of the parameter as well as the parameter itself.

Parameters:

Name Type Description Default
prefix str prefix to prepend to all parameter names. None
recurse bool if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module. None

Yields:

Type Description
None (string, Parameter): Tuple containing the name and parameter

Example::

>>> for name, param in self.named_parameters():
>>>    if name in ['bias']:
>>>        print(param.size()) |
View Source
    def named_parameters(self, prefix: str = '', recurse: bool = True) -> Iterator[Tuple[str, Tensor]]:

        r"""Returns an iterator over module parameters, yielding both the

        name of the parameter as well as the parameter itself.

        Args:

            prefix (str): prefix to prepend to all parameter names.

            recurse (bool): if True, then yields parameters of this module

                and all submodules. Otherwise, yields only parameters that

                are direct members of this module.

        Yields:

            (string, Parameter): Tuple containing the name and parameter

        Example::

            >>> for name, param in self.named_parameters():

            >>>    if name in ['bias']:

            >>>        print(param.size())

        """

        gen = self._named_members(

            lambda module: module._parameters.items(),

            prefix=prefix, recurse=recurse)

        for elem in gen:

            yield elem

parameters

def parameters(
    self,
    recurse: bool = True
) -> Iterator[torch.nn.parameter.Parameter]

Returns an iterator over module parameters.

This is typically passed to an optimizer.

Parameters:

Name Type Description Default
recurse bool if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module. None

Yields:

Type Description
Parameter module parameter
Example::
>>> for param in model.parameters():
>>>     print(type(param), param.size())
<class 'torch.Tensor'> (20L,)
<class 'torch.Tensor'> (20L, 1L, 5L, 5L) |
View Source
    def parameters(self, recurse: bool = True) -> Iterator[Parameter]:

        r"""Returns an iterator over module parameters.

        This is typically passed to an optimizer.

        Args:

            recurse (bool): if True, then yields parameters of this module

                and all submodules. Otherwise, yields only parameters that

                are direct members of this module.

        Yields:

            Parameter: module parameter

        Example::

            >>> for param in model.parameters():

            >>>     print(type(param), param.size())

            <class 'torch.Tensor'> (20L,)

            <class 'torch.Tensor'> (20L, 1L, 5L, 5L)

        """

        for name, param in self.named_parameters(recurse=recurse):

            yield param

register_backward_hook

def register_backward_hook(
    self,
    hook: Callable[[ForwardRef('Module'), Union[Tuple[torch.Tensor, ...], torch.Tensor], Union[Tuple[torch.Tensor, ...], torch.Tensor]], Union[NoneType, torch.Tensor]]
) -> torch.utils.hooks.RemovableHandle

Registers a backward hook on the module.

.. warning ::

The current implementation will not have the presented behavior
for complex :class:`Module` that perform many operations.
In some failure cases, :attr:`grad_input` and :attr:`grad_output` will only
contain the gradients for a subset of the inputs and outputs.
For such :class:`Module`, you should use :func:`torch.Tensor.register_hook`
directly on a specific input or output to get the required gradients.

The hook will be called every time the gradients with respect to module inputs are computed. The hook should have the following signature::

hook(module, grad_input, grad_output) -> Tensor or None

The :attr:grad_input and :attr:grad_output may be tuples if the module has multiple inputs or outputs. The hook should not modify its arguments, but it can optionally return a new gradient with respect to input that will be used in place of :attr:grad_input in subsequent computations. :attr:grad_input will only correspond to the inputs given as positional arguments.

Returns:

Type Description
None :class:torch.utils.hooks.RemovableHandle:
a handle that can be used to remove the added hook by calling
handle.remove()
View Source
    def register_backward_hook(

        self, hook: Callable[['Module', _grad_t, _grad_t], Union[None, Tensor]]

    ) -> RemovableHandle:

        r"""Registers a backward hook on the module.

        .. warning ::

            The current implementation will not have the presented behavior

            for complex :class:`Module` that perform many operations.

            In some failure cases, :attr:`grad_input` and :attr:`grad_output` will only

            contain the gradients for a subset of the inputs and outputs.

            For such :class:`Module`, you should use :func:`torch.Tensor.register_hook`

            directly on a specific input or output to get the required gradients.

        The hook will be called every time the gradients with respect to module

        inputs are computed. The hook should have the following signature::

            hook(module, grad_input, grad_output) -> Tensor or None

        The :attr:`grad_input` and :attr:`grad_output` may be tuples if the

        module has multiple inputs or outputs. The hook should not modify its

        arguments, but it can optionally return a new gradient with respect to

        input that will be used in place of :attr:`grad_input` in subsequent

        computations. :attr:`grad_input` will only correspond to the inputs given

        as positional arguments.

        Returns:

            :class:`torch.utils.hooks.RemovableHandle`:

                a handle that can be used to remove the added hook by calling

                ``handle.remove()``

        """

        handle = hooks.RemovableHandle(self._backward_hooks)

        self._backward_hooks[handle.id] = hook

        return handle

register_buffer

def register_buffer(
    self,
    name: str,
    tensor: torch.Tensor,
    persistent: bool = True
) -> None

Adds a buffer to the module.

This is typically used to register a buffer that should not to be considered a model parameter. For example, BatchNorm's running_mean is not a parameter, but is part of the module's state. Buffers, by default, are persistent and will be saved alongside parameters. This behavior can be changed by setting :attr:persistent to False. The only difference between a persistent buffer and a non-persistent buffer is that the latter will not be a part of this module's :attr:state_dict.

Buffers can be accessed as attributes using given names.

Args: name (string): name of the buffer. The buffer can be accessed from this module using the given name tensor (Tensor): buffer to be registered. persistent (bool): whether the buffer is part of this module's :attr:state_dict.

Example::

>>> self.register_buffer('running_mean', torch.zeros(num_features))
View Source
    def register_buffer(self, name: str, tensor: Tensor, persistent: bool = True) -> None:

        r"""Adds a buffer to the module.

        This is typically used to register a buffer that should not to be

        considered a model parameter. For example, BatchNorm's ``running_mean``

        is not a parameter, but is part of the module's state. Buffers, by

        default, are persistent and will be saved alongside parameters. This

        behavior can be changed by setting :attr:`persistent` to ``False``. The

        only difference between a persistent buffer and a non-persistent buffer

        is that the latter will not be a part of this module's

        :attr:`state_dict`.

        Buffers can be accessed as attributes using given names.

        Args:

            name (string): name of the buffer. The buffer can be accessed

                from this module using the given name

            tensor (Tensor): buffer to be registered.

            persistent (bool): whether the buffer is part of this module's

                :attr:`state_dict`.

        Example::

            >>> self.register_buffer('running_mean', torch.zeros(num_features))

        """

        if persistent is False and isinstance(self, torch.jit.ScriptModule):

            raise RuntimeError("ScriptModule does not support non-persistent buffers")

        if '_buffers' not in self.__dict__:

            raise AttributeError(

                "cannot assign buffer before Module.__init__() call")

        elif not isinstance(name, torch._six.string_classes):

            raise TypeError("buffer name should be a string. "

                            "Got {}".format(torch.typename(name)))

        elif '.' in name:

            raise KeyError("buffer name can't contain \".\"")

        elif name == '':

            raise KeyError("buffer name can't be empty string \"\"")

        elif hasattr(self, name) and name not in self._buffers:

            raise KeyError("attribute '{}' already exists".format(name))

        elif tensor is not None and not isinstance(tensor, torch.Tensor):

            raise TypeError("cannot assign '{}' object to buffer '{}' "

                            "(torch Tensor or None required)"

                            .format(torch.typename(tensor), name))

        else:

            self._buffers[name] = tensor

            if persistent:

                self._non_persistent_buffers_set.discard(name)

            else:

                self._non_persistent_buffers_set.add(name)

register_forward_hook

def register_forward_hook(
    self,
    hook: Callable[..., NoneType]
) -> torch.utils.hooks.RemovableHandle

Registers a forward hook on the module.

The hook will be called every time after :func:forward has computed an output. It should have the following signature::

hook(module, input, output) -> None or modified output

The input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the forward. The hook can modify the output. It can modify the input inplace but it will not have effect on forward since this is called after

View Source
    def register_forward_hook(self, hook: Callable[..., None]) -> RemovableHandle:

        r"""Registers a forward hook on the module.

        The hook will be called every time after :func:`forward` has computed an output.

        It should have the following signature::

            hook(module, input, output) -> None or modified output

        The input contains only the positional arguments given to the module.

        Keyword arguments won't be passed to the hooks and only to the ``forward``.

        The hook can modify the output. It can modify the input inplace but

        it will not have effect on forward since this is called after

        :func:`forward` is called.

        Returns:

            :class:`torch.utils.hooks.RemovableHandle`:

                a handle that can be used to remove the added hook by calling

                ``handle.remove()``

        """

        handle = hooks.RemovableHandle(self._forward_hooks)

        self._forward_hooks[handle.id] = hook

        return handle

register_forward_pre_hook

def register_forward_pre_hook(
    self,
    hook: Callable[..., NoneType]
) -> torch.utils.hooks.RemovableHandle

Registers a forward pre-hook on the module.

The hook will be called every time before :func:forward is invoked. It should have the following signature::

hook(module, input) -> None or modified input

The input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the forward. The hook can modify the input. User can either return a tuple or a single modified value in the hook. We will wrap the value into a tuple if a single value is returned(unless that value is already a tuple).

Returns:

Type Description
None :class:torch.utils.hooks.RemovableHandle:
a handle that can be used to remove the added hook by calling
handle.remove()
View Source
    def register_forward_pre_hook(self, hook: Callable[..., None]) -> RemovableHandle:

        r"""Registers a forward pre-hook on the module.

        The hook will be called every time before :func:`forward` is invoked.

        It should have the following signature::

            hook(module, input) -> None or modified input

        The input contains only the positional arguments given to the module.

        Keyword arguments won't be passed to the hooks and only to the ``forward``.

        The hook can modify the input. User can either return a tuple or a

        single modified value in the hook. We will wrap the value into a tuple

        if a single value is returned(unless that value is already a tuple).

        Returns:

            :class:`torch.utils.hooks.RemovableHandle`:

                a handle that can be used to remove the added hook by calling

                ``handle.remove()``

        """

        handle = hooks.RemovableHandle(self._forward_pre_hooks)

        self._forward_pre_hooks[handle.id] = hook

        return handle

register_parameter

def register_parameter(
    self,
    name: str,
    param: torch.nn.parameter.Parameter
) -> None

Adds a parameter to the module.

The parameter can be accessed as an attribute using given name.

Parameters:

Name Type Description Default
name string name of the parameter. The parameter can be accessed
from this module using the given name None
param Parameter parameter to be added to the module. None
View Source
    def register_parameter(self, name: str, param: Parameter) -> None:

        r"""Adds a parameter to the module.

        The parameter can be accessed as an attribute using given name.

        Args:

            name (string): name of the parameter. The parameter can be accessed

                from this module using the given name

            param (Parameter): parameter to be added to the module.

        """

        if '_parameters' not in self.__dict__:

            raise AttributeError(

                "cannot assign parameter before Module.__init__() call")

        elif not isinstance(name, torch._six.string_classes):

            raise TypeError("parameter name should be a string. "

                            "Got {}".format(torch.typename(name)))

        elif '.' in name:

            raise KeyError("parameter name can't contain \".\"")

        elif name == '':

            raise KeyError("parameter name can't be empty string \"\"")

        elif hasattr(self, name) and name not in self._parameters:

            raise KeyError("attribute '{}' already exists".format(name))

        if param is None:

            self._parameters[name] = None

        elif not isinstance(param, Parameter):

            raise TypeError("cannot assign '{}' object to parameter '{}' "

                            "(torch.nn.Parameter or None required)"

                            .format(torch.typename(param), name))

        elif param.grad_fn:

            raise ValueError(

                "Cannot assign non-leaf Tensor to parameter '{0}'. Model "

                "parameters must be created explicitly. To express '{0}' "

                "as a function of another Tensor, compute the value in "

                "the forward() method.".format(name))

        else:

            self._parameters[name] = param

requires_grad_

def requires_grad_(
    self: ~T,
    requires_grad: bool = True
) -> ~T

Change if autograd should record operations on parameters in this

module.

This method sets the parameters' :attr:requires_grad attributes in-place.

This method is helpful for freezing part of the module for finetuning or training parts of a model individually (e.g., GAN training).

Parameters:

Name Type Description Default
requires_grad bool whether autograd should record operations on
parameters in this module. Default: True. None

Returns:

Type Description
Module self
View Source
    def requires_grad_(self: T, requires_grad: bool = True) -> T:

        r"""Change if autograd should record operations on parameters in this

        module.

        This method sets the parameters' :attr:`requires_grad` attributes

        in-place.

        This method is helpful for freezing part of the module for finetuning

        or training parts of a model individually (e.g., GAN training).

        Args:

            requires_grad (bool): whether autograd should record operations on

                                  parameters in this module. Default: ``True``.

        Returns:

            Module: self

        """

        for p in self.parameters():

            p.requires_grad_(requires_grad)

        return self

share_memory

def share_memory(
    self: ~T
) -> ~T
View Source
    def share_memory(self: T) -> T:

        return self._apply(lambda t: t.share_memory_())

state_dict

def state_dict(
    self,
    destination=None,
    prefix='',
    keep_vars=False
)

Returns a dictionary containing a whole state of the module.

Both parameters and persistent buffers (e.g. running averages) are included. Keys are corresponding parameter and buffer names.

Returns:

Type Description
dict a dictionary containing a whole state of the module

Example::

>>> module.state_dict().keys()
['bias', 'weight'] |
View Source
    def state_dict(self, destination=None, prefix='', keep_vars=False):

        r"""Returns a dictionary containing a whole state of the module.

        Both parameters and persistent buffers (e.g. running averages) are

        included. Keys are corresponding parameter and buffer names.

        Returns:

            dict:

                a dictionary containing a whole state of the module

        Example::

            >>> module.state_dict().keys()

            ['bias', 'weight']

        """

        if destination is None:

            destination = OrderedDict()

            destination._metadata = OrderedDict()

        destination._metadata[prefix[:-1]] = local_metadata = dict(version=self._version)

        self._save_to_state_dict(destination, prefix, keep_vars)

        for name, module in self._modules.items():

            if module is not None:

                module.state_dict(destination, prefix + name + '.', keep_vars=keep_vars)

        for hook in self._state_dict_hooks.values():

            hook_result = hook(self, destination, prefix, local_metadata)

            if hook_result is not None:

                destination = hook_result

        return destination

to

def to(
    self,
    *args,
    **kwargs
)

Moves and/or casts the parameters and buffers.

This can be called as

.. function:: to(device=None, dtype=None, non_blocking=False)

.. function:: to(dtype, non_blocking=False)

.. function:: to(tensor, non_blocking=False)

.. function:: to(memory_format=torch.channels_last)

Its signature is similar to :meth:torch.Tensor.to, but only accepts floating point desired :attr:dtype s. In addition, this method will only cast the floating point parameters and buffers to :attr:dtype (if given). The integral parameters and buffers will be moved :attr:device, if that is given, but with dtypes unchanged. When :attr:non_blocking is set, it tries to convert/move asynchronously with respect to the host if possible, e.g., moving CPU Tensors with pinned memory to CUDA devices.

See below for examples.

.. note:: This method modifies the module in-place.

Parameters:

Name Type Description Default
device ( None class:torch.device): the desired device of the parameters
and buffers in this module None
dtype ( None class:torch.dtype): the desired floating point type of
the floating point parameters and buffers in this module None
tensor torch.Tensor Tensor whose dtype and device are the desired
dtype and device for all parameters and buffers in this module None
memory_format ( None class:torch.memory_format): the desired memory
format for 4D parameters and buffers in this module (keyword
only argument) None

Returns:

Type Description
Module self
Example::
>>> linear = nn.Linear(2, 2)
>>> linear.weight
Parameter containing:
tensor([[ 0.1913, -0.3420],
        [-0.5113, -0.2325]])
>>> linear.to(torch.double)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1913, -0.3420],
        [-0.5113, -0.2325]], dtype=torch.float64)
>>> gpu1 = torch.device("cuda:1")
>>> linear.to(gpu1, dtype=torch.half, non_blocking=True)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1914, -0.3420],
        [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')
>>> cpu = torch.device("cpu")
>>> linear.to(cpu)
Linear(in_features=2, out_features=2, bias=True)
>>> linear.weight
Parameter containing:
tensor([[ 0.1914, -0.3420],
        [-0.5112, -0.2324]], dtype=torch.float16) |
View Source
    def to(self, *args, **kwargs):

        r"""Moves and/or casts the parameters and buffers.

        This can be called as

        .. function:: to(device=None, dtype=None, non_blocking=False)

        .. function:: to(dtype, non_blocking=False)

        .. function:: to(tensor, non_blocking=False)

        .. function:: to(memory_format=torch.channels_last)

        Its signature is similar to :meth:`torch.Tensor.to`, but only accepts

        floating point desired :attr:`dtype` s. In addition, this method will

        only cast the floating point parameters and buffers to :attr:`dtype`

        (if given). The integral parameters and buffers will be moved

        :attr:`device`, if that is given, but with dtypes unchanged. When

        :attr:`non_blocking` is set, it tries to convert/move asynchronously

        with respect to the host if possible, e.g., moving CPU Tensors with

        pinned memory to CUDA devices.

        See below for examples.

        .. note::

            This method modifies the module in-place.

        Args:

            device (:class:`torch.device`): the desired device of the parameters

                and buffers in this module

            dtype (:class:`torch.dtype`): the desired floating point type of

                the floating point parameters and buffers in this module

            tensor (torch.Tensor): Tensor whose dtype and device are the desired

                dtype and device for all parameters and buffers in this module

            memory_format (:class:`torch.memory_format`): the desired memory

                format for 4D parameters and buffers in this module (keyword

                only argument)

        Returns:

            Module: self

        Example::

            >>> linear = nn.Linear(2, 2)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1913, -0.3420],

                    [-0.5113, -0.2325]])

            >>> linear.to(torch.double)

            Linear(in_features=2, out_features=2, bias=True)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1913, -0.3420],

                    [-0.5113, -0.2325]], dtype=torch.float64)

            >>> gpu1 = torch.device("cuda:1")

            >>> linear.to(gpu1, dtype=torch.half, non_blocking=True)

            Linear(in_features=2, out_features=2, bias=True)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1914, -0.3420],

                    [-0.5112, -0.2324]], dtype=torch.float16, device='cuda:1')

            >>> cpu = torch.device("cpu")

            >>> linear.to(cpu)

            Linear(in_features=2, out_features=2, bias=True)

            >>> linear.weight

            Parameter containing:

            tensor([[ 0.1914, -0.3420],

                    [-0.5112, -0.2324]], dtype=torch.float16)

        """

        device, dtype, non_blocking, convert_to_format = torch._C._nn._parse_to(*args, **kwargs)

        if dtype is not None:

            if not dtype.is_floating_point:

                raise TypeError('nn.Module.to only accepts floating point '

                                'dtypes, but got desired dtype={}'.format(dtype))

        def convert(t):

            if convert_to_format is not None and t.dim() == 4:

                return t.to(device, dtype if t.is_floating_point() else None, non_blocking, memory_format=convert_to_format)

            return t.to(device, dtype if t.is_floating_point() else None, non_blocking)

        return self._apply(convert)

train

def train(
    self: ~T,
    mode: bool = True
) -> ~T

Sets the module in training mode.

This has any effect only on certain modules. See documentations of particular modules for details of their behaviors in training/evaluation mode, if they are affected, e.g. :class:Dropout, :class:BatchNorm, etc.

Parameters:

Name Type Description Default
mode bool whether to set training mode (True) or evaluation
mode (False). Default: True. None

Returns:

Type Description
Module self
View Source
    def train(self: T, mode: bool = True) -> T:

        r"""Sets the module in training mode.

        This has any effect only on certain modules. See documentations of

        particular modules for details of their behaviors in training/evaluation

        mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,

        etc.

        Args:

            mode (bool): whether to set training mode (``True``) or evaluation

                         mode (``False``). Default: ``True``.

        Returns:

            Module: self

        """

        self.training = mode

        for module in self.children():

            module.train(mode)

        return self

type

def type(
    self: ~T,
    dst_type: Union[torch.dtype, str]
) -> ~T

Casts all parameters and buffers to :attr:dst_type.

Parameters:

Name Type Description Default
dst_type type or string the desired type None

Returns:

Type Description
Module self
View Source
    def type(self: T, dst_type: Union[dtype, str]) -> T:

        r"""Casts all parameters and buffers to :attr:`dst_type`.

        Arguments:

            dst_type (type or string): the desired type

        Returns:

            Module: self

        """

        return self._apply(lambda t: t.type(dst_type))

zero_grad

def zero_grad(
    self
) -> None

Sets gradients of all model parameters to zero.

View Source
    def zero_grad(self) -> None:

        r"""Sets gradients of all model parameters to zero."""

        if getattr(self, '_is_replica', False):

            warnings.warn(

                "Calling .zero_grad() from a module created with nn.DataParallel() has no effect. "

                "The parameters are copied (in a differentiable manner) from the original module. "

                "This means they are not leaf nodes in autograd and so don't accumulate gradients. "

                "If you need gradients in your forward method, consider using autograd.grad instead.")

        for p in self.parameters():

            if p.grad is not None:

                p.grad.detach_()

                p.grad.zero_()