Python源码示例:torch.sparse()
示例1
def normalize_sparse_tensor(adj, fill_value=1):
"""Normalize sparse tensor. Need to import torch_scatter
"""
edge_index = adj._indices()
edge_weight = adj._values()
num_nodes= adj.size(0)
edge_index, edge_weight = add_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
row, col = edge_index
from torch_scatter import scatter_add
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
values = deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
shape = adj.shape
return torch.sparse.FloatTensor(edge_index, values, shape)
示例2
def degree_normalize_sparse_tensor(adj, fill_value=1):
"""degree_normalize_sparse_tensor.
"""
edge_index = adj._indices()
edge_weight = adj._values()
num_nodes= adj.size(0)
edge_index, edge_weight = add_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
row, col = edge_index
from torch_scatter import scatter_add
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-1)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
values = deg_inv_sqrt[row] * edge_weight
shape = adj.shape
return torch.sparse.FloatTensor(edge_index, values, shape)
示例3
def degree_normalize_adj_tensor(adj, sparse=True):
"""degree_normalize_adj_tensor.
"""
device = torch.device("cuda" if adj.is_cuda else "cpu")
if sparse:
# return degree_normalize_sparse_tensor(adj)
adj = to_scipy(adj)
mx = degree_normalize_adj(adj)
return sparse_mx_to_torch_sparse_tensor(mx).to(device)
else:
mx = adj + torch.eye(adj.shape[0]).to(device)
rowsum = mx.sum(1)
r_inv = rowsum.pow(-1).flatten()
r_inv[torch.isinf(r_inv)] = 0.
r_mat_inv = torch.diag(r_inv)
mx = r_mat_inv @ mx
return mx
示例4
def generate_w(output_dim, w_distrib='uniform', w_sparsity=None, mean=0.0, std=1.0, seed=None, dtype=torch.float32):
"""
Generate W matrix
:param output_dim:
:param w_sparsity:
:return:
"""
# Manual seed
if seed is not None:
torch.manual_seed(seed)
np.random.seed(seed)
# end if
# Distribution
if w_distrib == 'uniform':
w = ESNCell.generate_uniform_matrix(size=(output_dim, output_dim), sparsity=w_sparsity, input_set=[-1.0, 1.0])
w = torch.from_numpy(w.astype(np.float32))
else:
w = ESNCell.generate_gaussian_matrix(size=(output_dim, output_dim), sparsity=w_sparsity, mean=mean, std=std, dtype=dtype)
# end if
return w
# end generate_w
# To sparse matrix
示例5
def preprocess(adj, features, labels, preprocess_adj=False, preprocess_feature=False, sparse=False, device='cpu'):
"""Convert adj, features, labels from array or sparse matrix to
torch Tensor, and normalize the input data.
Parameters
----------
adj : scipy.sparse.csr_matrix
the adjacency matrix.
features : scipy.sparse.csr_matrix
node features
labels : numpy.array
node labels
preprocess_adj : bool
whether to normalize the adjacency matrix
preprocess_feature :
whether to normalize the feature matrix
sparse : bool
whether to return sparse tensor
device : str
'cpu' or 'cuda'
"""
if preprocess_adj:
adj_norm = normalize_adj(adj)
if preprocess_feature:
features = normalize_feature(features)
labels = torch.LongTensor(labels)
if sparse:
adj = sparse_mx_to_torch_sparse_tensor(adj)
features = sparse_mx_to_torch_sparse_tensor(features)
else:
features = torch.FloatTensor(np.array(features.todense()))
adj = torch.FloatTensor(adj.todense())
return adj.to(device), features.to(device), labels.to(device)
示例6
def to_tensor(adj, features, labels=None, device='cpu'):
"""Convert adj, features, labels from array or sparse matrix to
torch Tensor.
Parameters
----------
adj : scipy.sparse.csr_matrix
the adjacency matrix.
features : scipy.sparse.csr_matrix
node features
labels : numpy.array
node labels
device : str
'cpu' or 'cuda'
"""
if sp.issparse(adj):
adj = sparse_mx_to_torch_sparse_tensor(adj)
else:
adj = torch.FloatTensor(adj)
if sp.issparse(features):
features = sparse_mx_to_torch_sparse_tensor(features)
else:
features = torch.FloatTensor(np.array(features))
if labels is None:
return adj.to(device), features.to(device)
else:
labels = torch.LongTensor(labels)
return adj.to(device), features.to(device), labels.to(device)
示例7
def normalize_feature(mx):
"""Row-normalize sparse matrix
Parameters
----------
mx : scipy.sparse.csr_matrix
matrix to be normalized
Returns
-------
scipy.sprase.lil_matrix
normalized matrix
"""
if type(mx) is not sp.lil.lil_matrix:
mx = mx.tolil()
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
示例8
def normalize_adj(mx):
"""Normalize sparse adjacency matrix,
A' = (D + I)^-1/2 * ( A + I ) * (D + I)^-1/2
Row-normalize sparse matrix
Parameters
----------
mx : scipy.sparse.csr_matrix
matrix to be normalized
Returns
-------
scipy.sprase.lil_matrix
normalized matrix
"""
if type(mx) is not sp.lil.lil_matrix:
mx = mx.tolil()
if mx[0, 0] == 0 :
mx = mx + sp.eye(mx.shape[0])
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1/2).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
mx = mx.dot(r_mat_inv)
return mx
示例9
def degree_normalize_adj(mx):
"""Row-normalize sparse matrix"""
mx = mx.tolil()
if mx[0, 0] == 0 :
mx = mx + sp.eye(mx.shape[0])
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
# mx = mx.dot(r_mat_inv)
mx = r_mat_inv.dot(mx)
return mx
示例10
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
示例11
def to_scipy(tensor):
"""Convert a dense/sparse tensor to scipy matrix"""
if is_sparse_tensor(tensor):
values = tensor._values()
indices = tensor._indices()
return sp.csr_matrix((values.cpu().numpy(), indices.cpu().numpy()), shape=tensor.shape)
else:
indices = tensor.nonzero().t()
values = tensor[indices[0], indices[1]]
return sp.csr_matrix((values.cpu().numpy(), indices.cpu().numpy()), shape=tensor.shape)
示例12
def backward(ctx, grad_output, grad_indices):
tensor_type = type(grad_output).__name__
if grad_output.is_cuda:
SparseTensor = getattr(torch.cuda.sparse, tensor_type)
else:
SparseTensor = getattr(torch.sparse, tensor_type)
grad_input = grad_output
indices = ctx._indices
indices = indices.view(1, -1)
grad_weight = SparseTensor(indices, grad_output, ctx._weight_size).to_dense()
return grad_input, grad_weight
示例13
def sparse_eye(size):
"""
Returns the identity matrix as a sparse matrix
"""
indices = torch.arange(0, size).long().unsqueeze(0).expand(2, size)
values = torch.tensor(1.0).expand(size)
cls = getattr(torch.sparse, values.type().split(".")[-1])
return cls(indices, values, torch.Size([size, size]))
示例14
def sparse_repeat(sparse, *repeat_sizes):
"""
"""
if len(repeat_sizes) == 1 and isinstance(repeat_sizes, tuple):
repeat_sizes = repeat_sizes[0]
if len(repeat_sizes) > len(sparse.shape):
num_new_dims = len(repeat_sizes) - len(sparse.shape)
new_indices = sparse._indices()
new_indices = torch.cat(
[
torch.zeros(num_new_dims, new_indices.size(1), dtype=new_indices.dtype, device=new_indices.device),
new_indices,
],
0,
)
sparse = torch.sparse_coo_tensor(
new_indices,
sparse._values(),
torch.Size((*[1 for _ in range(num_new_dims)], *sparse.shape)),
dtype=sparse.dtype,
device=sparse.device,
)
for i, repeat_size in enumerate(repeat_sizes):
if repeat_size > 1:
new_indices = sparse._indices().repeat(1, repeat_size)
adding_factor = torch.arange(0, repeat_size, dtype=new_indices.dtype, device=new_indices.device).unsqueeze_(
1
)
new_indices[i].view(repeat_size, -1).add_(adding_factor)
sparse = torch.sparse_coo_tensor(
new_indices,
sparse._values().repeat(repeat_size),
torch.Size((*sparse.shape[:i], repeat_size * sparse.size(i), *sparse.shape[i + 1 :])),
dtype=sparse.dtype,
device=sparse.device,
)
return sparse
示例15
def _setupX(self, sparse=0):
"""
Initializes an X tensor of features for prediction
:param sparse: 0 if dense tensor, 1 if sparse
:return: Null
"""
feature_table = self .dataengine.get_table_to_dataframe(
"Feature_clean", self.dataset).collect()
if sparse:
coordinates = torch.LongTensor()
values = torch.FloatTensor([])
for factor in feature_table:
coordinate = torch.LongTensor([[int(factor.vid) - 1],
[int(factor.feature) - 1],
[int(factor.assigned_val) - 1]])
coordinates = torch.cat((coordinates, coordinate), 1)
value = factor['count']
values = torch.cat((values, torch.FloatTensor([value])), 0)
self.X = torch.sparse\
.FloatTensor(coordinates, values,
torch.Size([self.N, self.M, self.L]))
else:
self.X = torch.zeros(self.N, self.M, self.L)
for factor in feature_table:
self.X[factor.vid - 1, factor.feature - 1,
factor.assigned_val - 1] = factor['count']
return
示例16
def setuptrainingX(self, sparse=0):
"""
Initializes an X tensor of features for training
:param sparse: 0 if dense tensor, 1 if sparse
:return: x tensor of features
"""
dataframe_offset = self.dataengine.get_table_to_dataframe(
"Dimensions_dk", self.dataset)
list = dataframe_offset.collect()
dimension_dict = {}
for dimension in list:
dimension_dict[dimension['dimension']] = dimension['length']
# X Tensor Dimensions (N * M * L)
self.testM = dimension_dict['M']
self.testN = dimension_dict['N']
self.testL = dimension_dict['L']
feature_table = self.dataengine.get_table_to_dataframe(
"Feature_dk", self.dataset).collect()
if sparse:
coordinates = torch.LongTensor()
values = torch.FloatTensor([])
for factor in feature_table:
coordinate = torch.LongTensor([[int(factor.vid) - 1],
[int(factor.feature) - 1],
[int(factor.assigned_val) - 1]])
coordinates = torch.cat((coordinates, coordinate), 1)
value = factor['count']
values = torch.cat((values, torch.FloatTensor([value])), 0)
X = torch.sparse.FloatTensor(coordinates, values, torch.Size(
[self.testN, self.testM, self.testL]))
else:
X = torch.zeros(self.testN, self.testM, self.testL)
for factor in feature_table:
X[factor.vid -
1, factor.feature -
1, factor.assigned_val -
1] = factor['count']
return X
示例17
def to_sparse(m):
"""
To sparse matrix
:param m:
:return:
"""
# Rows, columns and values
rows = torch.LongTensor()
columns = torch.LongTensor()
values = torch.FloatTensor()
# For each row
for i in range(m.shape[0]):
# For each column
for j in range(m.shape[1]):
if m[i, j] != 0.0:
rows = torch.cat((rows, torch.LongTensor([i])), dim=0)
columns = torch.cat((columns, torch.LongTensor([j])), dim=0)
values = torch.cat((values, torch.FloatTensor([m[i, j]])), dim=0)
# end if
# end for
# end for
# Indices
indices = torch.cat((rows.unsqueeze(0), columns.unsqueeze(0)), dim=0)
# To sparse
return torch.sparse.FloatTensor(indices, values)
# end to_sparse
# end ESNCell
示例18
def extra_repr(self):
return (super(MaskedLinear, self).extra_repr() +
', exclusive={exclusive}'.format(**self.__dict__))
# TODO: reduce unused weights, maybe when torch.sparse is stable
示例19
def extra_repr(self):
return (super(MaskedLinear, self).extra_repr() +
', exclusive={exclusive}'.format(**self.__dict__))
# TODO: reduce unused weights, maybe when torch.sparse is stable
示例20
def forward(self, x, to_dense=False):
d = super().forward_as_dict(x)
f8_3 = F.elu(self.f8_3(d['conv4']))
f8_4 = F.elu(self.f8_4(d['conv5']))
f8_5 = F.elu(self.f8_5(d['conv6']))
x = F.elu(self.f9(torch.cat([f8_3, f8_4, f8_5], dim=1)))
if x.size(2) == self.predefined_featuresize and x.size(3) == self.predefined_featuresize:
ind_from = self.ind_from
ind_to = self.ind_to
else:
ind_from, ind_to = pyutils.get_indices_of_pairs(5, (x.size(2), x.size(3)))
ind_from = torch.from_numpy(ind_from); ind_to = torch.from_numpy(ind_to)
x = x.view(x.size(0), x.size(1), -1)
ff = torch.index_select(x, dim=2, index=ind_from.cuda(non_blocking=True))
ft = torch.index_select(x, dim=2, index=ind_to.cuda(non_blocking=True))
ff = torch.unsqueeze(ff, dim=2)
ft = ft.view(ft.size(0), ft.size(1), -1, ff.size(3))
aff = torch.exp(-torch.mean(torch.abs(ft-ff), dim=1))
if to_dense:
aff = aff.view(-1).cpu()
ind_from_exp = torch.unsqueeze(ind_from, dim=0).expand(ft.size(2), -1).contiguous().view(-1)
indices = torch.stack([ind_from_exp, ind_to])
indices_tp = torch.stack([ind_to, ind_from_exp])
area = x.size(2)
indices_id = torch.stack([torch.arange(0, area).long(), torch.arange(0, area).long()])
aff_mat = sparse.FloatTensor(torch.cat([indices, indices_id, indices_tp], dim=1),
torch.cat([aff, torch.ones([area]), aff])).to_dense().cuda()
return aff_mat
else:
return aff
示例21
def forward(self, x, to_dense=False):
d = super().forward_as_dict(x)
f8_3 = F.elu(self.gn8_3(self.f8_3(d['conv4'])))
f8_4 = F.elu(self.gn8_4(self.f8_4(d['conv5'])))
f8_5 = F.elu(self.gn8_5(self.f8_5(d['conv5fc'])))
x = torch.cat([f8_3, f8_4, f8_5], dim=1)
x = F.elu(self.f9(x))
if x.size(2) == self.predefined_featuresize and x.size(3) == self.predefined_featuresize:
ind_from = self.ind_from
ind_to = self.ind_to
else:
ind_from, ind_to = pyutils.get_indices_of_pairs(5, (x.size(2), x.size(3)))
ind_from = torch.from_numpy(ind_from); ind_to = torch.from_numpy(ind_to)
x = x.view(x.size(0), x.size(1), -1)
ff = torch.index_select(x, dim=2, index=ind_from.cuda(non_blocking=True))
ft = torch.index_select(x, dim=2, index=ind_to.cuda(non_blocking=True))
ff = torch.unsqueeze(ff, dim=2)
ft = ft.view(ft.size(0), ft.size(1), -1, ff.size(3))
aff = torch.exp(-torch.mean(torch.abs(ft-ff), dim=1))
if to_dense:
aff = aff.view(-1).cpu()
ind_from_exp = torch.unsqueeze(ind_from, dim=0).expand(ft.size(2), -1).contiguous().view(-1)
indices = torch.stack([ind_from_exp, ind_to])
indices_tp = torch.stack([ind_to, ind_from_exp])
area = x.size(2)
indices_id = torch.stack([torch.arange(0, area).long(), torch.arange(0, area).long()])
aff_mat = sparse.FloatTensor(torch.cat([indices, indices_id, indices_tp], dim=1),
torch.cat([aff, torch.ones([area]), aff])).to_dense().cuda()
return aff_mat
else:
return aff
示例22
def make_sparse_from_indices_and_values(interp_indices, interp_values, num_rows):
"""
This produces a sparse tensor with a fixed number of non-zero entries in each column.
Args:
- interp_indices - Tensor (batch_size) x num_cols x n_nonzero_entries
A matrix which has the indices of the nonzero_entries for each column
- interp_values - Tensor (batch_size) x num_cols x n_nonzero_entries
The corresponding values
- num_rows - the number of rows in the result matrix
Returns:
- SparseTensor - (batch_size) x num_cols x num_rows
"""
if not torch.is_tensor(interp_indices):
raise RuntimeError("interp_indices and interp_values should be tensors")
# Is it batch mode?
batch_shape = interp_values.shape[:-2]
n_target_points, n_coefficients = interp_values.shape[-2:]
# Index tensor
batch_tensors = []
for i, batch_size in enumerate(batch_shape):
batch_tensor = torch.arange(0, batch_size, dtype=torch.long, device=interp_values.device)
batch_tensor = (
batch_tensor.unsqueeze_(1)
.repeat(batch_shape[:i].numel(), batch_shape[i + 1 :].numel() * n_target_points * n_coefficients)
.view(-1)
)
batch_tensors.append(batch_tensor)
row_tensor = torch.arange(0, n_target_points, dtype=torch.long, device=interp_values.device)
row_tensor = row_tensor.unsqueeze_(1).repeat(batch_shape.numel(), n_coefficients).view(-1)
index_tensor = torch.stack([*batch_tensors, interp_indices.reshape(-1), row_tensor], 0)
# Value tensor
value_tensor = interp_values.reshape(-1)
nonzero_indices = value_tensor.nonzero()
if nonzero_indices.storage():
nonzero_indices.squeeze_()
index_tensor = index_tensor.index_select(1, nonzero_indices)
value_tensor = value_tensor.index_select(0, nonzero_indices)
else:
index_tensor = index_tensor.resize_(interp_indices.dim(), 1).zero_()
value_tensor = value_tensor.resize_(1).zero_()
# Make the sparse tensor
type_name = value_tensor.type().split(".")[-1] # e.g. FloatTensor
interp_size = torch.Size((*batch_shape, num_rows, n_target_points))
if index_tensor.is_cuda:
cls = getattr(torch.cuda.sparse, type_name)
else:
cls = getattr(torch.sparse, type_name)
res = cls(index_tensor, value_tensor, interp_size)
# Wrap things as a variable, if necessary
return res
示例23
def bdsmm(sparse, dense):
"""
Batch dense-sparse matrix multiply
"""
# Make the batch sparse matrix into a block-diagonal matrix
if sparse.ndimension() > 2:
# Expand the tensors to account for broadcasting
output_shape = _matmul_broadcast_shape(sparse.shape, dense.shape)
expanded_sparse_shape = output_shape[:-2] + sparse.shape[-2:]
unsqueezed_sparse_shape = [1 for _ in range(len(output_shape) - sparse.dim())] + list(sparse.shape)
repeat_sizes = tuple(
output_size // sparse_size
for output_size, sparse_size in zip(expanded_sparse_shape, unsqueezed_sparse_shape)
)
sparse = sparse_repeat(sparse, *repeat_sizes)
dense = dense.expand(*output_shape[:-2], dense.size(-2), dense.size(-1))
# Figure out how much need to be added to the row/column indices to create
# a block-diagonal matrix
*batch_shape, num_rows, num_cols = sparse.shape
batch_size = torch.Size(batch_shape).numel()
batch_multiplication_factor = torch.tensor(
[torch.Size(batch_shape[i + 1 :]).numel() for i in range(len(batch_shape))],
dtype=torch.long,
device=sparse.device,
)
if batch_multiplication_factor.is_cuda:
batch_assignment = (sparse._indices()[:-2].float().t() @ batch_multiplication_factor.float()).long()
else:
batch_assignment = sparse._indices()[:-2].t() @ batch_multiplication_factor
# Create block-diagonal sparse tensor
indices = sparse._indices()[-2:].clone()
indices[0].add_(batch_assignment, alpha=num_rows)
indices[1].add_(batch_assignment, alpha=num_cols)
sparse_2d = torch.sparse_coo_tensor(
indices,
sparse._values(),
torch.Size((batch_size * num_rows, batch_size * num_cols)),
dtype=sparse._values().dtype,
device=sparse._values().device,
)
dense_2d = dense.reshape(batch_size * num_cols, -1)
res = torch.dsmm(sparse_2d, dense_2d)
res = res.view(*batch_shape, num_rows, -1)
return res
elif dense.dim() > 2:
*batch_shape, num_rows, num_cols = dense.size()
batch_size = torch.Size(batch_shape).numel()
dense = dense.view(batch_size, num_rows, num_cols)
res = torch.dsmm(sparse, dense.transpose(0, 1).reshape(-1, batch_size * num_cols))
res = res.view(-1, batch_size, num_cols)
res = res.transpose(0, 1).reshape(*batch_shape, -1, num_cols)
return res
else:
return torch.dsmm(sparse, dense)
示例24
def sparse_getitem(sparse, idxs):
"""
"""
if not isinstance(idxs, tuple):
idxs = (idxs,)
if not sparse.ndimension() <= 2:
raise RuntimeError("Must be a 1d or 2d sparse tensor")
if len(idxs) > sparse.ndimension():
raise RuntimeError("Invalid index for %d-order tensor" % sparse.ndimension())
indices = sparse._indices()
values = sparse._values()
size = list(sparse.size())
for i, idx in list(enumerate(idxs))[::-1]:
if isinstance(idx, int):
del size[i]
mask = indices[i].eq(idx)
if torch.any(mask):
new_indices = torch.zeros(
indices.size(0) - 1, torch.sum(mask), dtype=indices.dtype, device=indices.device
)
for j in range(indices.size(0)):
if i > j:
new_indices[j].copy_(indices[j][mask])
elif i < j:
new_indices[j - 1].copy_(indices[j][mask])
indices = new_indices
values = values[mask]
else:
indices.resize_(indices.size(0) - 1, 1).zero_()
values.resize_(1).zero_()
if not len(size):
return sum(values)
elif isinstance(idx, slice):
start, stop, step = idx.indices(size[i])
size = list(size[:i]) + [stop - start] + list(size[i + 1 :])
if step != 1:
raise RuntimeError("Slicing with step is not supported")
mask = indices[i].lt(stop) & indices[i].ge(start)
if torch.any(mask):
new_indices = torch.zeros(indices.size(0), torch.sum(mask), dtype=indices.dtype, device=indices.device)
for j in range(indices.size(0)):
new_indices[j].copy_(indices[j][mask])
new_indices[i].sub_(start)
indices = new_indices
values = values[mask]
else:
indices.resize_(indices.size(0), 1).zero_()
values.resize_(1).zero_()
else:
raise RuntimeError("Unknown index type")
return torch.sparse_coo_tensor(indices, values, torch.Size(size), dtype=values.dtype, device=values.device)
示例25
def left_t_interp(interp_indices, interp_values, rhs, output_dim):
"""
"""
from .. import dsmm
is_vector = rhs.ndimension() == 1
if is_vector:
rhs = rhs.unsqueeze(-1)
# Multiply the rhs by the interp_values
# This multiplication here will give us the ability to perform backprop
values = rhs.unsqueeze(-2) * interp_values.unsqueeze(-1)
# Define a bunch of sizes
num_data, num_interp = interp_values.shape[-2:]
num_cols = rhs.size(-1)
interp_shape = torch.Size((*interp_indices.shape[:-2], output_dim, num_data))
output_shape = _matmul_broadcast_shape(interp_shape, rhs.shape)
batch_shape = output_shape[:-2]
batch_size = batch_shape.numel()
# Using interp_indices, create a sparse matrix that will sum up the values
interp_indices = interp_indices.expand(*batch_shape, *interp_indices.shape[-2:]).contiguous()
batch_indices = torch.arange(0, batch_size, dtype=torch.long, device=values.device).unsqueeze_(1)
batch_indices = batch_indices.repeat(1, num_data * num_interp)
column_indices = torch.arange(0, num_data * num_interp, dtype=torch.long, device=values.device).unsqueeze_(1)
column_indices = column_indices.repeat(batch_size, 1)
summing_matrix_indices = torch.stack([batch_indices.view(-1), interp_indices.view(-1), column_indices.view(-1)], 0)
summing_matrix_values = torch.ones(
batch_size * num_data * num_interp, dtype=interp_values.dtype, device=interp_values.device
)
size = torch.Size((batch_size, output_dim, num_data * num_interp))
type_name = summing_matrix_values.type().split(".")[-1] # e.g. FloatTensor
if interp_values.is_cuda:
cls = getattr(torch.cuda.sparse, type_name)
else:
cls = getattr(torch.sparse, type_name)
summing_matrix = cls(summing_matrix_indices, summing_matrix_values, size)
# Sum up the values appropriately by performing sparse matrix multiplication
values = values.reshape(batch_size, num_data * num_interp, num_cols)
res = dsmm(summing_matrix, values)
res = res.view(*batch_shape, *res.shape[-2:])
if is_vector:
res = res.squeeze(-1)
return res
示例26
def BilateralRefSmoothnessLoss(self, pred_R, targets, att, num_features):
# pred_R = pred_R.cpu()
total_loss = Variable(torch.cuda.FloatTensor(1))
total_loss[0] = 0
N = pred_R.size(2) * pred_R.size(3)
Z = (pred_R.size(1) * N )
# grad_input = torch.FloatTensor(pred_R.size())
# grad_input = grad_input.zero_()
for i in range(pred_R.size(0)): # for each image
B_mat = targets[att+'B_list'][i] # still list of blur sparse matrices
S_mat = Variable(targets[att + 'S'][i].cuda(), requires_grad = False) # Splat and Slicing matrix
n_vec = Variable(targets[att + 'N'][i].cuda(), requires_grad = False) # bi-stochatistic vector, which is diagonal matrix
p = pred_R[i,:,:,:].view(pred_R.size(1),-1).t() # NX3
# p'p
# p_norm = torch.mm(p.t(), p)
# p_norm_sum = torch.trace(p_norm)
p_norm_sum = torch.sum(torch.mul(p,p))
# S * N * p
Snp = torch.mul(n_vec.repeat(1,pred_R.size(1)), p)
sp_mm = Sparse()
Snp = sp_mm(Snp, S_mat)
Snp_1 = Snp.clone()
Snp_2 = Snp.clone()
# # blur
for f in range(num_features+1):
B_var1 = Variable(B_mat[f].cuda(), requires_grad = False)
sp_mm1 = Sparse()
Snp_1 = sp_mm1(Snp_1, B_var1)
B_var2 = Variable(B_mat[num_features-f].cuda(), requires_grad = False)
sp_mm2 = Sparse()
Snp_2 = sp_mm2(Snp_2, B_var2)
Snp_12 = Snp_1 + Snp_2
pAp = torch.sum(torch.mul(Snp, Snp_12))
total_loss = total_loss + ((p_norm_sum - pAp)/Z)
total_loss = total_loss/pred_R.size(0)
# average over all images
return total_loss
示例27
def forward(self, x, to_dense=False):
d = super().forward_as_dict(x)
f8_3 = F.elu(self.f8_3(d['conv4']))
f8_4 = F.elu(self.f8_4(d['conv5']))
f8_5 = F.elu(self.f8_5(d['conv6']))
x = F.elu(self.f9(torch.cat([f8_3, f8_4, f8_5], dim=1)))
if x.size(2) == self.predefined_featuresize and x.size(3) == self.predefined_featuresize:
ind_from = self.ind_from
ind_to = self.ind_to
else:
ind_from, ind_to = pyutils.get_indices_of_pairs(5, (x.size(2), x.size(3)))
ind_from = torch.from_numpy(ind_from); ind_to = torch.from_numpy(ind_to)
x = x.view(x.size(0), x.size(1), -1)
ff = torch.index_select(x, dim=2, index=ind_from.cuda(non_blocking=True))
ft = torch.index_select(x, dim=2, index=ind_to.cuda(non_blocking=True))
ff = torch.unsqueeze(ff, dim=2)
ft = ft.view(ft.size(0), ft.size(1), -1, ff.size(3))
aff = torch.exp(-torch.mean(torch.abs(ft-ff), dim=1))
if to_dense:
aff = aff.view(-1).cpu()
ind_from_exp = torch.unsqueeze(ind_from, dim=0).expand(ft.size(2), -1).contiguous().view(-1)
indices = torch.stack([ind_from_exp, ind_to])
indices_tp = torch.stack([ind_to, ind_from_exp])
area = x.size(2)
indices_id = torch.stack([torch.arange(0, area).long(), torch.arange(0, area).long()])
aff_mat = sparse.FloatTensor(torch.cat([indices, indices_id, indices_tp], dim=1),
torch.cat([aff, torch.ones([area]), aff])).to_dense().cuda()
return aff_mat
else:
return aff
示例28
def forward(self, x, to_dense=False):
d = super().forward_as_dict(x)
f8_3 = F.elu(self.gn8_3(self.f8_3(d['conv4'])))
f8_4 = F.elu(self.gn8_4(self.f8_4(d['conv5'])))
f8_5 = F.elu(self.gn8_5(self.f8_5(d['conv5fc'])))
x = torch.cat([f8_3, f8_4, f8_5], dim=1)
x = F.elu(self.f9(x))
if x.size(2) == self.predefined_featuresize and x.size(3) == self.predefined_featuresize:
ind_from = self.ind_from
ind_to = self.ind_to
else:
ind_from, ind_to = pyutils.get_indices_of_pairs(5, (x.size(2), x.size(3)))
ind_from = torch.from_numpy(ind_from); ind_to = torch.from_numpy(ind_to)
x = x.view(x.size(0), x.size(1), -1)
ff = torch.index_select(x, dim=2, index=ind_from.cuda(non_blocking=True))
ft = torch.index_select(x, dim=2, index=ind_to.cuda(non_blocking=True))
ff = torch.unsqueeze(ff, dim=2)
ft = ft.view(ft.size(0), ft.size(1), -1, ff.size(3))
aff = torch.exp(-torch.mean(torch.abs(ft-ff), dim=1))
if to_dense:
aff = aff.view(-1).cpu()
ind_from_exp = torch.unsqueeze(ind_from, dim=0).expand(ft.size(2), -1).contiguous().view(-1)
indices = torch.stack([ind_from_exp, ind_to])
indices_tp = torch.stack([ind_to, ind_from_exp])
area = x.size(2)
indices_id = torch.stack([torch.arange(0, area).long(), torch.arange(0, area).long()])
aff_mat = sparse.FloatTensor(torch.cat([indices, indices_id, indices_tp], dim=1),
torch.cat([aff, torch.ones([area]), aff])).to_dense().cuda()
return aff_mat
else:
return aff