Spaces:
Build error
Build error
# Copyright (c) OpenMMLab. All rights reserved. | |
from abc import ABCMeta, abstractmethod | |
from collections import OrderedDict | |
import torch | |
import torch.distributed as dist | |
import torch.nn as nn | |
class BasePose(nn.Module, metaclass=ABCMeta): | |
"""Base class for pose detectors. | |
All recognizers should subclass it. | |
All subclass should overwrite: | |
Methods:`forward_train`, supporting to forward when training. | |
Methods:`forward_test`, supporting to forward when testing. | |
Args: | |
backbone (dict): Backbone modules to extract feature. | |
head (dict): Head modules to give output. | |
train_cfg (dict): Config for training. Default: None. | |
test_cfg (dict): Config for testing. Default: None. | |
""" | |
def forward_train(self, img, img_metas, **kwargs): | |
"""Defines the computation performed at training.""" | |
def forward_test(self, img, img_metas, **kwargs): | |
"""Defines the computation performed at testing.""" | |
def forward(self, img, img_metas, return_loss=True, **kwargs): | |
"""Forward function.""" | |
def _parse_losses(losses): | |
"""Parse the raw outputs (losses) of the network. | |
Args: | |
losses (dict): Raw output of the network, which usually contain | |
losses and other necessary information. | |
Returns: | |
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \ | |
which may be a weighted sum of all losses, log_vars \ | |
contains all the variables to be sent to the logger. | |
""" | |
log_vars = OrderedDict() | |
for loss_name, loss_value in losses.items(): | |
if isinstance(loss_value, torch.Tensor): | |
log_vars[loss_name] = loss_value.mean() | |
elif isinstance(loss_value, float): | |
log_vars[loss_name] = loss_value | |
elif isinstance(loss_value, list): | |
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) | |
else: | |
raise TypeError( | |
f'{loss_name} is not a tensor or list of tensors or float') | |
loss = sum(_value for _key, _value in log_vars.items() | |
if 'loss' in _key) | |
log_vars['loss'] = loss | |
for loss_name, loss_value in log_vars.items(): | |
# reduce loss when distributed training | |
if not isinstance(loss_value, float): | |
if dist.is_available() and dist.is_initialized(): | |
loss_value = loss_value.data.clone() | |
dist.all_reduce(loss_value.div_(dist.get_world_size())) | |
log_vars[loss_name] = loss_value.item() | |
else: | |
log_vars[loss_name] = loss_value | |
return loss, log_vars | |
def train_step(self, data_batch, optimizer, **kwargs): | |
"""The iteration step during training. | |
This method defines an iteration step during training, except for the | |
back propagation and optimizer updating, which are done in an optimizer | |
hook. Note that in some complicated cases or models, the whole process | |
including back propagation and optimizer updating is also defined in | |
this method, such as GAN. | |
Args: | |
data_batch (dict): The output of dataloader. | |
optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of | |
runner is passed to ``train_step()``. This argument is unused | |
and reserved. | |
Returns: | |
dict: It should contain at least 3 keys: ``loss``, ``log_vars``, | |
``num_samples``. | |
``loss`` is a tensor for back propagation, which can be a | |
weighted sum of multiple losses. | |
``log_vars`` contains all the variables to be sent to the | |
logger. | |
``num_samples`` indicates the batch size (when the model is | |
DDP, it means the batch size on each GPU), which is used for | |
averaging the logs. | |
""" | |
losses = self.forward(**data_batch) | |
loss, log_vars = self._parse_losses(losses) | |
outputs = dict( | |
loss=loss, | |
log_vars=log_vars, | |
num_samples=len(next(iter(data_batch.values())))) | |
return outputs | |
def val_step(self, data_batch, optimizer, **kwargs): | |
"""The iteration step during validation. | |
This method shares the same signature as :func:`train_step`, but used | |
during val epochs. Note that the evaluation after training epochs is | |
not implemented with this method, but an evaluation hook. | |
""" | |
results = self.forward(return_loss=False, **data_batch) | |
outputs = dict(results=results) | |
return outputs | |
def show_result(self, **kwargs): | |
"""Visualize the results.""" | |
raise NotImplementedError | |