Python源码示例:torch.typename()

示例1
def __init__(self, params, defaults):
        self.defaults = defaults

        if isinstance(params, Variable) or torch.is_tensor(params):
            raise TypeError("params argument given to the optimizer should be "
                            "an iterable of Variables or dicts, but got " +
                            torch.typename(params))

        self.state = defaultdict(dict)
        self.param_groups = []

        param_groups = list(params)
        if len(param_groups) == 0:
            raise ValueError("optimizer got an empty parameter list")
        if not isinstance(param_groups[0], dict):
            param_groups = [{'params': param_groups}]

        for param_group in param_groups:
            self.add_param_group(param_group) 
示例2
def _short_str(tensor):
    # unwrap variable to tensor
    if not torch.is_tensor(tensor):
        # (1) unpack variable
        if hasattr(tensor, 'data'):
            tensor = getattr(tensor, 'data')
        # (2) handle include_lengths
        elif isinstance(tensor, tuple):
            return str(tuple(_short_str(t) for t in tensor))
        # (3) fallback to default str
        else:
            return str(tensor)

    # copied from torch _tensor_str
    size_str = 'x'.join(str(size) for size in tensor.size())
    device_str = '' if not tensor.is_cuda else \
        ' (GPU {})'.format(tensor.get_device())
    strt = '[{} of size {}{}]'.format(torch.typename(tensor),
                                      size_str, device_str)
    return strt 
示例3
def __init__(self, named_params, defaults):
        self.defaults = defaults

        if isinstance(named_params, torch.Tensor):
            raise TypeError("params argument given to the optimizer should be "
                            "an iterable of Tensors or dicts, but got " +
                            torch.typename(named_params))

        self.state = defaultdict(dict)
        self.param_groups = []

        param_groups = list(named_params)
        if len(param_groups) == 0:
            raise ValueError("optimizer got an empty parameter list")
        if not isinstance(param_groups[0], dict):
            param_groups = [{'params': param_groups}]

        for param_group in param_groups:
            self.add_param_group(param_group) 
示例4
def print_size(self, input, output):
        print(torch.typename(self).split('.')[-1], ' output size:',output.data.size()) 
示例5
def __setattr__(self, name, value):
        """Redefine __setattr__ so that any submodules created
           inside the Module object are registered with _modules
           OrderedDict.
        """

        def remove_from(*dicts):
            for d in dicts:
                if name in d:
                    del d[name]

        modules = self.__dict__.get("_modules")
        if isinstance(value, Module):
            if modules is None:
                raise AttributeError(
                    "cannot assign module before Module.__init__() call"
                )
            remove_from(self.__dict__, self._parameters, self._buffers)
            modules[name] = value
        elif modules is not None and name in modules:
            if value is not None:
                raise TypeError(
                    "cannot assign '{}' as child module '{}' "
                    "(torch.nn.Module or None expected)".format(
                        torch.typename(value), name
                    )
                )
            modules[name] = value
        else:
            object.__setattr__(self, name, value) 
示例6
def print_tensor_dict(params):
    kmax = max(len(key) for key in params.keys())
    for i, (key, v) in enumerate(params.items()):
        print(str(i).ljust(5), key.ljust(kmax + 3), str(tuple(v.shape)).ljust(23), torch.typename(v), v.requires_grad) 
示例7
def vector_to_parameters(vec, parameters, grad=True):
    """Convert one vector to the parameters or gradients of the parameters
    Arguments:
        vec (torch.Tensor): a single vector represents the parameters of a model.
        parameters (Iterable[Variable]): an iterator of Variables that are the
            parameters of a model.
        grad (bool): True for assigning de-vectorized `vec` to gradients
    """
    # Ensure vec of type Variable
    if not isinstance(vec, torch.cuda.FloatTensor):
        raise TypeError('expected torch.Tensor, but got: {}'
                        .format(torch.typename(vec)))
    # Flag for the device where the parameter is located
    param_device = None

    # Pointer for slicing the vector for each parameter
    pointer = 0
    if grad:
        for param in parameters:
            # Ensure the parameters are located in the same device
            param_device = _check_param_device(param, param_device)
            # The length of the parameter
            num_param = torch.prod(torch.LongTensor(list(param.size())))
            param.grad.data = vec[pointer:pointer + num_param].view(
                param.size())
            # Increment the pointer
            pointer += num_param
    else:
        for param in parameters:
            # Ensure the parameters are located in the same device
            param_device = _check_param_device(param, param_device)
            # The length of the parameter
            num_param = torch.prod(torch.LongTensor(list(param.size())))
            param.data = vec[pointer:pointer + num_param].view(
                param.size())
            # Increment the pointer
            pointer += num_param 
示例8
def __init__(self, model, n_samples=20):
        super(BayesNN, self).__init__()
        if not isinstance(model, nn.Module):
            raise TypeError("model {} is not a Module subclass".format(
                torch.typename(model)))

        self.n_samples = n_samples

        # w_i ~ StudentT(w_i | mu=0, lambda=shape/rate, nu=2*shape)
        # for efficiency, represent StudentT params using Gamma params
        self.w_prior_shape = 1.
        self.w_prior_rate = 0.05
        
        # noise variance 1e-6: beta ~ Gamma(beta | shape, rate)
        self.beta_prior_shape = 2.
        self.beta_prior_rate = 1.e-6

        # replicate `n_samples` instances with the same network as `model`
        instances = []
        for i in range(n_samples):
            new_instance = copy.deepcopy(model)
            # initialize each model instance with their defualt initialization
            # instead of the prior
            new_instance.reset_parameters()
            print('Reset parameters in model instance {}'.format(i))
            instances.append(new_instance)
        self.nnets = nn.ModuleList(instances)
        del instances

        # log precision (Gamma) of Gaussian noise
        log_beta = Gamma(self.beta_prior_shape, 
                         self.beta_prior_rate).sample((self.n_samples,)).log()
        for i in range(n_samples):
            self.nnets[i].log_beta = Parameter(log_beta[i])

        print('Total number of parameters: {}'.format(self._num_parameters())) 
示例9
def __str__(self):
        if not self.__dict__:
            return 'Empty {} instance'.format(torch.typename(self))

        fields_to_index = filter(lambda field: field is not None, self.fields)
        var_strs = '\n'.join(['\t[.' + name + ']' + ":" + _short_str(getattr(self, name))
                              for name in fields_to_index if hasattr(self, name)])

        data_str = (' from {}'.format(self.dataset.name.upper())
                    if hasattr(self.dataset, 'name')
                    and isinstance(self.dataset.name, str) else '')

        strt = '[{} of size {}{}]\n{}'.format(torch.typename(self),
                                              self.batch_size, data_str, var_strs)
        return '\n' + strt 
示例10
def permute(x, perm):
    """Permutes the last three dimensions of the input Tensor or Array.

    Args:
        x (Tensor or Array): Input to be permuted.
        perm (tuple or list): Permutation.

    Note:
        If the input has less than three dimensions a copy is returned.
    """
    if is_tensor(x):
        if x.dim() < 3:
            return x.data.clone()
        else:     
            s = tuple(range(0, x.dim()))
            permutation = s[:-3] + tuple(s[-3:][i] for i in perm)
        return x.permute(*permutation).contiguous()
    elif is_array(x):
        if x.ndim < 3:
            return x.copy()
        else:
            s = tuple(range(0,x.ndim))
            permutation = s[:-3] + tuple(s[-3:][i] for i in perm)
        # Copying to get rid of negative strides
        return np.transpose(x, permutation).copy()
    else:
        raise TypeError('Uknown type {0} encountered.'.format(torch.typename(x))) 
示例11
def channel_flip(x, dim=-3):
    """Reverses the channel dimension.
    
    Args:
        x (Tensor or Array): Input to have its channels flipped.
        dim (int, optional): Channels dimension (default -3).

    Note:
        If the input has less than three dimensions a copy is returned.
    """

    if is_tensor(x):
        dim = x.dim() + dim if dim < 0 else dim
        if x.dim() < 3:
            return x.data.clone()
        return x[tuple(slice(None, None) if i != dim
                 else torch.arange(x.size(i)-1, -1, -1).long()
                 for i in range(x.dim()))]
    elif is_array(x):
        dim = x.ndim + dim if dim < 0 else dim
        if x.ndim < 3:
            return x.copy()
        return np.ascontiguousarray(np.flip(x,dim))
    else:
        raise TypeError('Uknown type {0} encountered.'.format(torch.typename(x)))

# Default is dimension -3 (e.g. for bchw) 
示例12
def _get_tensor(x):
    x = x[0] if torch.typename(x) in ['tuple', 'list'] else x
    return x 
示例13
def _register(net, hook, modules=None, match_names=None, do_forward=True):
    modules = process_none(modules)
    match_names = process_none(match_names)
    for mod_name, mod in net.named_modules():
        name_match = any([torch.typename(modules).find(x) >= 0 for x in match_names])
        instance_match = any([isinstance(mod, x) for x in modules])
        if instance_match or name_match:
            if do_forward:
                mod.register_forward_hook(hook(mod_name))
            else:
                mod.register_backward_hook(hook(mod_name))
    return net 
示例14
def __init__(self, params, lr=required, gravity=required, truncate_freq=1, weight_decay=0):
        defaults = dict(lr=lr, gravity=gravity, truncate_freq=truncate_freq, weight_decay=weight_decay)
        super(TruncateSGD, self).__init__(params, defaults)

        if not isinstance(truncate_freq, int) or truncate_freq <= 0:
            raise ValueError('truncate_freq should be integer and greater than 0',
                             'while type(truncate_freq) =', torch.typename(truncate_freq),
                             'truncate_freq =', truncate_freq) 
示例15
def updatePadding(net, nn_padding):
    typename = torch.typename(net)
    # print(typename)
    if typename.find('Sequential') >= 0 or typename.find('Bottleneck') >= 0:
        modules_keys = list(net._modules.keys())
        for i in reversed(range(len(modules_keys))):
            subnet = net._modules[modules_keys[i]]
            out = updatePadding(subnet, nn_padding)
            if out != -1:
                p = out
                in_c, out_c, k, s, _, d, g, b = \
                    subnet.in_channels, subnet.out_channels, \
                    subnet.kernel_size[0], subnet.stride[0], \
                    subnet.padding[0], subnet.dilation[0], \
                    subnet.groups, subnet.bias,
                conv_temple = nn.Conv2d(in_c, out_c, k, stride=s, padding=0,
                                        dilation=d, groups=g, bias=b)
                conv_temple.weight = subnet.weight
                conv_temple.bias = subnet.bias
                if p > 1:
                    net._modules[modules_keys[i]] = nn.Sequential(SymmetricPad2d(p), conv_temple)
                else:
                    net._modules[modules_keys[i]] = nn.Sequential(nn_padding(p), conv_temple)
    else:
        if typename.find('torch.nn.modules.conv.Conv2d') >= 0:
            k_sz, p_sz = net.kernel_size[0], net.padding[0]
            if ((k_sz == 3) or (k_sz == 7)) and p_sz != 0:
                return p_sz
    return -1 
示例16
def extra_repr(self):
        child_lines = []
        for k, p in self._buffers.items():
            size_str = "x".join(str(size) for size in p.size())
            device_str = "" if not p.is_cuda else " (GPU {})".format(p.get_device())
            parastr = "Buffer containing: [{} of size {}{}]".format(
                torch.typename(p), size_str, device_str
            )
            child_lines.append("  (" + k + "): " + parastr)
        tmpstr = "\n".join(child_lines)
        return tmpstr 
示例17
def vector_to_parameter_list(vec, parameters):
    """
    Convert the vector `vec` to a parameter-list format matching `parameters`.

    This function is the inverse of `parameters_to_vector` from the
    pytorch module `torch.nn.utils.convert_parameters`.
    Contrary to `vector_to_parameters`, which replaces the value
    of the parameters, this function leaves the parameters unchanged and
    returns a list of parameter views of the vector.

    ```
    from torch.nn.utils import parameters_to_vector

    vector_view = parameters_to_vector(parameters)
    param_list_view = vector_to_parameter_list(vec, parameters)

    for a, b in zip(parameters, param_list_view):
        assert torch.all_close(a, b)
    ```

    Parameters:
    -----------
        vec: Tensor
            a single vector represents the parameters of a model
        parameters: (Iterable[Tensor])
            an iterator of Tensors that are of the desired shapes.
    """
    # Ensure vec of type Tensor
    if not isinstance(vec, torch.Tensor):
        raise TypeError(
            "expected torch.Tensor, but got: {}".format(torch.typename(vec))
        )
    params_new = []
    # Pointer for slicing the vector for each parameter
    pointer = 0
    for param in parameters:
        # The length of the parameter
        num_param = param.numel()
        # Slice the vector, reshape it
        param_new = vec[pointer : pointer + num_param].view_as(param).data
        params_new.append(param_new)
        # Increment the pointer
        pointer += num_param

    return params_new 
示例18
def add_param_group(self, param_group):
        """Add a param group to the :class:`Optimizer` s `param_groups`.

        This can be useful when fine tuning a pre-trained network as frozen layers can be made
        trainable and added to the :class:`Optimizer` as training progresses.

        Arguments:
            param_group (dict): Specifies what Variables should be optimized along with group
            specific optimization options.
        """
        assert isinstance(param_group, dict), "param group must be a dict"

        params = param_group['params']
        if isinstance(params, Variable):
            param_group['params'] = [params]
        else:
            param_group['params'] = list(params)

        for param in param_group['params']:
            if not isinstance(param, Variable):
                raise TypeError("optimizer can only optimize Variables, "
                                "but one of the params is " + torch.typename(param))
            if not param.requires_grad:
                raise ValueError("optimizing a parameter that doesn't require gradients")
            if not param.is_leaf:
                raise ValueError("can't optimize a non-leaf Variable")

        for name, default in self.defaults.items():
            if default is required and name not in param_group:
                raise ValueError("parameter group didn't specify a value of required optimization parameter " +
                                 name)
            else:
                param_group.setdefault(name, default)

        param_set = set()
        for group in self.param_groups:
            param_set.update(set(group['params']))

        if not param_set.isdisjoint(set(param_group['params'])):
            raise ValueError("some parameters appear in more than one parameter group")

        self.param_groups.append(param_group) 
示例19
def parameters(net, modules=None, match_names=None, param_names=None, tag='', save_path='.', histogram=True, bins=100):
    """Visualizes a network's parameters on an image grid or histogram.

    Args:
        net (nn.Module): The network whose parameters are to be visualized.
        modules (list or tuple, optional): List of class definitions for the
            modules where the hook is attached e.g. nn.Conv2d  (default None).
        match_names (list or tuple, optional): List of strings. If any modules
            contain one of the strings then the hook is attached (default None).
        param_names (list or tuple, optional): List of strings. If any
            parameters of the module contain one of the strings then they are
            visualized (default None).
        tag (str, optional): String tag to attach to saved images (default None).
        save_path (str, optional): Path to save visualisation results 
            (default '.').
        histogram (bool, optional): If True then the visualization is a
            histrogram, otherwise it's an image grid.
        bins (bool, optional): Number of bins for histogram, if `histogram` is
            True (default 100).

    Note:
        * If modules or match_names are not provided then no parameters will be
          visualized.
        * If param_names are not provided then no parameters will be visualized.
    """
    save_path = process(save_path, True)
    modules = process_none(modules)
    match_names = process_none(match_names)
    for module_name, mod in net.named_modules():
        name_match = any([torch.typename(modules).find(x) >= 0 for x in match_names])
        instance_match = any([isinstance(mod, x) for x in modules])
        if instance_match or name_match:
            params = {x: _get_tensor(getattr(mod, x)) for x in param_names}
            for tensor_name, data in params.items():
                title = "{0}-{1}-{2}".format(tag, module_name, tensor_name)
                if data is None:
                    continue
                if histogram:
                    img = torch2cv(data)
                    df = pd.DataFrame(img.reshape(img.size))
                    fig, ax = plt.subplots()
                    df.hist(bins=bins, ax=ax)
                    fig.savefig(os.path.join(save_path, '{0}.png'.format(title)))
                    plt.close(fig)
                else:
                    if data.dim() > 1:
                        img = torch2cv(make_grid(data, color=False))
                        to_save = (map_range(img)*255).astype(int)
                        cv2.imwrite(os.path.join(save_path, '{0}.png'.format(title)), to_save) 
示例20
def add_param_group(self, param_group):
        r"""Add a param group to the :class:`Optimizer` s `param_groups`.

        This can be useful when fine tuning a pre-trained network as frozen layers can be made
        trainable and added to the :class:`Optimizer` as training progresses.

        Arguments:
            param_group (dict): Specifies what Tensors should be optimized along with group
            specific optimization options.
        """
        assert isinstance(param_group, dict), "param group must be a dict"

        params = param_group['params']
        if isinstance(params, torch.Tensor):
            param_group['params'] = [params]
        elif isinstance(params, set):
            raise TypeError('optimizer parameters need to be organized in ordered collections, but '
                            'the ordering of tensors in sets will change between runs. Please use a list instead.')
        else:
            param_group['params'] = list(params)

        for name, param in param_group['params']:
            if not isinstance(param, torch.Tensor):
                raise TypeError("optimizer can only optimize Tensors, "
                                "but one of the params is " + torch.typename(param))
            if not param.is_leaf:
                raise ValueError("can't optimize a non-leaf Tensor")

        for name, default in self.defaults.items():
            if default is required and name not in param_group:
                raise ValueError("parameter group didn't specify a value of required optimization parameter " +
                                 name)
            else:
                param_group.setdefault(name, default)

        param_set = set()
        for group in self.param_groups:
            param_set.update(set(group['params']))

        if not param_set.isdisjoint(set(param_group['params'])):
            raise ValueError("some parameters appear in more than one parameter group")

        self.param_groups.append(param_group) 
示例21
def add_param_group(self, param_group):
        r"""Add a param group to the :class:`Optimizer` s `param_groups`.

            This can be useful when fine tuning a pre-trained network as frozen layers can be made
            trainable and added to the :class:`Optimizer` as training progresses.

            Arguments:
                param_group (dict): Specifies what Tensors should be optimized along with group
                specific optimization options.
            """
        assert isinstance(param_group, dict), "param group must be a dict"

        params = param_group["params"]
        if isinstance(params, torch.Tensor):
            param_group["params"] = [params]
        elif isinstance(params, set):
            raise TypeError(
                "optimizer parameters need to be organized in ordered collections, but "
                "the ordering of tensors in sets will change between runs. Please use a list instead."
            )
        else:
            param_group["params"] = list(params)

        for param in param_group["params"]:
            if not isinstance(param, torch.Tensor):
                raise TypeError(
                    "optimizer can only optimize Tensors, "
                    "but one of the params is " + torch.typename(param)
                )
            if not param.is_leaf:
                raise ValueError("can't optimize a non-leaf Tensor")

        for name, default in self.defaults.items():
            if default is required and name not in param_group:
                raise ValueError(
                    "parameter group didn't specify a value of required optimization parameter "
                    + name
                )
            else:
                param_group.setdefault(name, default)

        param_set = set()
        for group in self.param_groups:
            param_set.update(set(group["params"]))

        if not param_set.isdisjoint(set(param_group["params"])):
            raise ValueError("some parameters appear in more than one parameter group")

        self.param_groups.append(param_group)