Python源码示例:torch.int16()
示例1
def normalize_wav(tensor: torch.Tensor) -> torch.Tensor:
if tensor.dtype == torch.float32:
pass
elif tensor.dtype == torch.int32:
tensor = tensor.to(torch.float32)
tensor[tensor > 0] /= 2147483647.
tensor[tensor < 0] /= 2147483648.
elif tensor.dtype == torch.int16:
tensor = tensor.to(torch.float32)
tensor[tensor > 0] /= 32767.
tensor[tensor < 0] /= 32768.
elif tensor.dtype == torch.uint8:
tensor = tensor.to(torch.float32) - 128
tensor[tensor > 0] /= 127.
tensor[tensor < 0] /= 128.
return tensor
示例2
def encode_uniform(self, dmll, S, fout):
""" encode coarsest scale, for which we assume a uniform prior. """
write_shape(S.shape, fout)
r = ArithmeticCoder(dmll.L)
entropy_coding_bytes = 0
with self.times.prefix_scope('uniform encode'):
c_uniform = self._get_uniform_cdf(S.shape, dmll.L)
for c in range(S.shape[1]):
S_c = S[:, c, ...].to(torch.int16)
encoded = r.range_encode(S_c, c_uniform, self.times)
write_num_bytes_encoded(len(encoded), fout)
entropy_coding_bytes += len(encoded)
fout.write(encoded)
return entropy_coding_bytes
示例3
def range_decode(self, encoded_bytes, cdf, time_logger: StackTimeLogger = no_op.NoOp):
"""
:param encoded_bytes: bytes encoded by range_encode
:param cdf: cdf to use, either a NHWLp matrix or instance of CDFOut
:return: decoded matrix as np.int16, NHW
"""
if isinstance(cdf, CDFOut):
logit_probs_c_sm, means_c, log_scales_c, K, targets = cdf
N, _, H, W = means_c.shape
with time_logger.run('ac.encode'):
decoded = torchac.decode_logistic_mixture(
targets, means_c, log_scales_c, logit_probs_c_sm, encoded_bytes)
else:
N, H, W, Lp = cdf.shape
assert Lp == self.L + 1, (Lp, self.L)
with time_logger.run('ac.encode'):
decoded = torchac.decode_cdf(cdf, encoded_bytes)
return decoded.reshape(N, H, W)
示例4
def torch_dtype_to_np_dtype(dtype):
dtype_dict = {
torch.bool : np.dtype(np.bool),
torch.uint8 : np.dtype(np.uint8),
torch.int8 : np.dtype(np.int8),
torch.int16 : np.dtype(np.int16),
torch.short : np.dtype(np.int16),
torch.int32 : np.dtype(np.int32),
torch.int : np.dtype(np.int32),
torch.int64 : np.dtype(np.int64),
torch.long : np.dtype(np.int64),
torch.float16 : np.dtype(np.float16),
torch.half : np.dtype(np.float16),
torch.float32 : np.dtype(np.float32),
torch.float : np.dtype(np.float32),
torch.float64 : np.dtype(np.float64),
torch.double : np.dtype(np.float64),
}
return dtype_dict[dtype]
# ---------------------- InferenceEngine internal types ------------------------
示例5
def update_dtype(self, old_dtype):
updated = {}
for k, v in old_dtype.items():
if v == np.float32:
dt = torch.float32
elif v == np.float64:
dt = torch.float64
elif v == np.float16:
dt = torch.float16
elif v == np.uint8:
dt = torch.uint8
elif v == np.int8:
dt = torch.int8
elif v == np.int16:
dt = torch.int16
elif v == np.int32:
dt = torch.int32
elif v == np.int16:
dt = torch.int16
else:
raise ValueError("Unsupported dtype {}".format(v))
updated[k] = dt
return updated
示例6
def sanitize_infinity(dtype):
"""
Returns largest possible value for the specified dtype.
Parameters:
-----------
dtype: torch dtype
Returns:
--------
large_enough: largest possible value for the given dtype
"""
if dtype is torch.int8:
large_enough = (1 << 7) - 1
elif dtype is torch.int16:
large_enough = (1 << 15) - 1
elif dtype is torch.int32:
large_enough = (1 << 31) - 1
elif dtype is torch.int64:
large_enough = (1 << 63) - 1
else:
large_enough = float("inf")
return large_enough
示例7
def test_canonical_heat_type(self):
self.assertEqual(ht.core.types.canonical_heat_type(ht.float32), ht.float32)
self.assertEqual(ht.core.types.canonical_heat_type("?"), ht.bool)
self.assertEqual(ht.core.types.canonical_heat_type(int), ht.int32)
self.assertEqual(ht.core.types.canonical_heat_type("u1"), ht.uint8)
self.assertEqual(ht.core.types.canonical_heat_type(np.int8), ht.int8)
self.assertEqual(ht.core.types.canonical_heat_type(torch.short), ht.int16)
with self.assertRaises(TypeError):
ht.core.types.canonical_heat_type({})
with self.assertRaises(TypeError):
ht.core.types.canonical_heat_type(object)
with self.assertRaises(TypeError):
ht.core.types.canonical_heat_type(1)
with self.assertRaises(TypeError):
ht.core.types.canonical_heat_type("i7")
示例8
def pytorch_dtype_to_type(dtype):
"""Map a pytorch dtype to a myia type."""
import torch
_type_map = {
torch.int8: Int[8],
torch.int16: Int[16],
torch.int32: Int[32],
torch.int64: Int[64],
torch.uint8: UInt[8],
torch.float16: Float[16],
torch.float32: Float[32],
torch.float64: Float[64],
torch.bool: Bool,
}
if dtype not in _type_map:
raise TypeError(f"Unsupported dtype {dtype}")
return _type_map[dtype]
示例9
def _convert_dtype_value(val):
"""converts a PyTorch the PyTorch numeric type id to a torch scalar type."""
convert_torch_dtype_map = {7:"torch.float64",
6:"torch.float32",
5:"torch.float16",
4:"torch.int64",
3:"torch.int32",
2:"torch.int16",
1:"torch.int8",
0:"torch.unit8",
None:"torch.int64"} # Default is torch.int64
if val in convert_torch_dtype_map:
return _convert_data_type(convert_torch_dtype_map[val])
else:
msg = "Torch data type value %d is not handled yet." % (val)
raise NotImplementedError(msg)
示例10
def _create_typed_const(data, dtype):
"""create a (scalar) constant of given value and dtype.
dtype should be a TVM dtype"""
if dtype == "float64":
typed_data = _expr.const(np.float64(data), dtype=dtype)
elif dtype == "float32":
typed_data = _expr.const(np.float32(data), dtype=dtype)
elif dtype == "float16":
typed_data = _expr.const(np.float16(data), dtype=dtype)
elif dtype == "int64":
typed_data = _expr.const(np.int64(data), dtype=dtype)
elif dtype == "int32":
typed_data = _expr.const(np.int32(data), dtype=dtype)
elif dtype == "int16":
typed_data = _expr.const(np.int16(data), dtype=dtype)
elif dtype == "int8":
typed_data = _expr.const(np.int8(data), dtype=dtype)
elif dtype == "uint8":
typed_data = _expr.const(np.uint8(data), dtype=dtype)
else:
raise NotImplementedError("input_type {} is not handled yet".format(dtype))
return typed_data
示例11
def _generate(num_channels, compression_level, bitrate):
org_path = 'original.wav'
ops_path = f'{bitrate}_{compression_level}_{num_channels}ch.opus'
# Note: ffmpeg forces sample rate 48k Hz for opus https://stackoverflow.com/a/39186779
# 1. generate original wav
data = torch.linspace(-32768, 32767, 32768, dtype=torch.int16).repeat([num_channels, 1]).t()
scipy.io.wavfile.write(org_path, 48000, data.numpy())
# 2. convert to opus
convert_to_opus(org_path, ops_path, bitrate=bitrate, compression_level=compression_level)
示例12
def encode_cdf(cdf, sym):
"""
:param cdf: CDF as 1HWLp, as int16, on CPU!
:param sym: the symbols to encode, as int16, on CPU
:return: byte-string, encoding `sym`
"""
if cdf.is_cuda or sym.is_cuda:
raise ValueError('CDF and symbols must be on CPU for `encode_cdf`')
# encode_cdf is defined in both backends, so doesn't matter which one we use!
return any_backend.encode_cdf(cdf, sym)
示例13
def decode_cdf(cdf, input_string):
"""
:param cdf: CDF as 1HWLp, as int16, on CPU
:param input_string: byte-string, encoding some symbols `sym`.
:return: decoded `sym`.
"""
if cdf.is_cuda:
raise ValueError('CDF must be on CPU for `decode_cdf`')
# encode_cdf is defined in both backends, so doesn't matter which one we use!
return any_backend.decode_cdf(cdf, input_string)
示例14
def _renorm_cast_cdf_(cdf, precision):
Lp = cdf.shape[-1]
finals = 1 # NHW1
# RENORMALIZATION_FACTOR in cuda
f = torch.tensor(2, dtype=torch.float32, device=cdf.device).pow_(precision)
cdf = cdf.mul((f - (Lp - 1)) / finals) # TODO
cdf = cdf.round()
cdf = cdf.to(dtype=torch.int16, non_blocking=True)
r = torch.arange(Lp, dtype=torch.int16, device=cdf.device)
cdf.add_(r)
return cdf
示例15
def encode_scale(self, scale, dmll, out, img, fout):
""" Encode scale `scale`. """
l = out.P[scale]
bn = out.bn[scale] if scale != 0 else img
S = out.S[scale]
# shape used for all!
write_shape(S.shape, fout)
overhead_bytes = 5
overhead_bytes += 4 * S.shape[1]
r = ArithmeticCoder(dmll.L)
# We encode channel by channel, because that's what's needed for the RGB scale. For s > 0, this could be done
# in parallel for all channels
def encoder(c, C_cur):
S_c = S[:, c, ...].to(torch.int16)
encoded = r.range_encode(S_c, cdf=C_cur, time_logger=self.times)
write_num_bytes_encoded(len(encoded), fout)
fout.write(encoded)
# yielding always bottleneck and extra_info
return bn[:, c, ...], len(encoded)
with self.times.prefix_scope('encode scale'):
with self.times.run('total'):
_, entropy_coding_bytes_per_c = \
self.code_with_cdf(l, bn.shape, encoder, dmll)
# --- cleanup
out.P[scale] = None
out.bn[scale] = None
out.S[scale] = None
# ---
return sum(entropy_coding_bytes_per_c)
示例16
def range_encode(self, data, cdf, time_logger: StackTimeLogger):
"""
:param data: data to encode
:param cdf: cdf to use, either a NHWLp matrix or instance of CDFOut
:return: data encode to a bytes string
"""
assert len(data.shape) == 3, data.shape
with time_logger.run('data -> cpu'):
data = data.to('cpu', non_blocking=True)
assert data.dtype == torch.int16, 'Wrong dtype: {}'.format(data.dtype)
with time_logger.run('reshape'):
data = data.reshape(-1).contiguous()
if isinstance(cdf, CDFOut):
logit_probs_c_sm, means_c, log_scales_c, K, targets = cdf
with time_logger.run('ac.encode'):
out_bytes = torchac.encode_logistic_mixture(
targets, means_c, log_scales_c, logit_probs_c_sm, data)
else:
N, H, W, Lp = cdf.shape
assert Lp == self.L + 1, (Lp, self.L)
with time_logger.run('ac.encode'):
out_bytes = torchac.encode_cdf(cdf, data)
return out_bytes
示例17
def data_type_dict():
return {'float16' : th.float16,
'float32' : th.float32,
'float64' : th.float64,
'uint8' : th.uint8,
'int8' : th.int8,
'int16' : th.int16,
'int32' : th.int32,
'int64' : th.int64,
'bool' : th.bool}
示例18
def toOutput(bitDepth):
quant = 1 << bitDepth
if bitDepth <= 8:
dtype = torch.uint8
elif bitDepth <= 15:
dtype = torch.int16
else:
dtype = torch.int32
def f(image):
image = image.detach() * quant
image.clamp_(0, quant - 1)
return image.to(dtype=dtype, device=deviceCPU).numpy()
return f
示例19
def is_int_tensor(tensor):
"""Checks if the input tensor is a Torch tensor of an int type."""
return _is_type_tensor(
tensor, [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]
)
示例20
def test_encode_decode(self):
"""Tests tensor encoding and decoding."""
for float in [False, True]:
if float:
fpe = FixedPointEncoder(precision_bits=16)
else:
fpe = FixedPointEncoder(precision_bits=0)
tensor = get_test_tensor(float=float)
decoded = fpe.decode(fpe.encode(tensor))
self._check(
decoded,
tensor,
"Encoding/decoding a %s failed." % "float" if float else "long",
)
# Make sure encoding a subclass of CrypTensor is a no-op
crypten.mpc.set_default_provider(crypten.mpc.provider.TrustedFirstParty)
crypten.init()
tensor = get_test_tensor(float=True)
encrypted_tensor = crypten.cryptensor(tensor)
encrypted_tensor = fpe.encode(encrypted_tensor)
self._check(
encrypted_tensor.get_plain_text(),
tensor,
"Encoding an EncryptedTensor failed.",
)
# Try a few other types.
fpe = FixedPointEncoder(precision_bits=0)
for dtype in [torch.uint8, torch.int8, torch.int16]:
tensor = torch.zeros(5, dtype=dtype).random_()
decoded = fpe.decode(fpe.encode(tensor)).type(dtype)
self._check(decoded, tensor, "Encoding/decoding a %s failed." % dtype)
示例21
def prepare_label(self):
args = self.args
# prepare one-hot label
label = torch.arange(args.way, dtype=torch.int16).repeat(args.query)
label_aux = torch.arange(args.way, dtype=torch.int8).repeat(args.shot + args.query)
label = label.type(torch.LongTensor)
label_aux = label_aux.type(torch.LongTensor)
if torch.cuda.is_available():
label = label.cuda()
label_aux = label_aux.cuda()
return label, label_aux
示例22
def evaluate(self, data_loader):
# restore model args
args = self.args
# evaluation mode
self.model.eval()
record = np.zeros((args.num_eval_episodes, 2)) # loss and acc
label = torch.arange(args.eval_way, dtype=torch.int16).repeat(args.eval_query)
label = label.type(torch.LongTensor)
if torch.cuda.is_available():
label = label.cuda()
print('best epoch {}, best val acc={:.4f} + {:.4f}'.format(
self.trlog['max_acc_epoch'],
self.trlog['max_acc'],
self.trlog['max_acc_interval']))
with torch.no_grad():
for i, batch in enumerate(data_loader, 1):
if torch.cuda.is_available():
data, _ = [_.cuda() for _ in batch]
else:
data = batch[0]
logits = self.model(data)
loss = F.cross_entropy(logits, label)
acc = count_acc(logits, label)
record[i-1, 0] = loss.item()
record[i-1, 1] = acc
assert(i == record.shape[0])
vl, _ = compute_confidence_interval(record[:,0])
va, vap = compute_confidence_interval(record[:,1])
# train mode
self.model.train()
if self.args.fix_BN:
self.model.encoder.eval()
return vl, va, vap
示例23
def sample(x, size):
#https://gist.github.com/yoavram/4134617
i = random.sample(range(x.shape[0]), size)
return torch.tensor(x[i], dtype=torch.int16)
#x = np.random.permutation(x)
#return torch.tensor(x[:size])
示例24
def get_all_coords(stride):
return torch.tensor(
np.stack([v.reshape(-1) for v in
np.meshgrid(
*[stride//2 + np.arange(0, s, stride) for s in _shape],
indexing='ij')],
-1), dtype=torch.int16)
示例25
def test_int16(self):
self.assert_is_instantiable_heat_type(ht.int16, torch.int16)
self.assert_is_instantiable_heat_type(ht.short, torch.int16)
示例26
def test_type_promotions(self):
self.assertEqual(ht.promote_types(ht.uint8, ht.uint8), ht.uint8)
self.assertEqual(ht.promote_types(ht.int8, ht.uint8), ht.int16)
self.assertEqual(ht.promote_types(ht.int32, ht.float32), ht.float32)
self.assertEqual(ht.promote_types("f4", ht.float), ht.float32)
self.assertEqual(ht.promote_types(ht.bool_, "?"), ht.bool)
# exceptions
with self.assertRaises(TypeError):
ht.promote_types(1, "?")
with self.assertRaises(TypeError):
ht.promote_types(ht.float32, "hello world")
示例27
def test_and(self):
int16_tensor = ht.array([[1, 1], [2, 2]], dtype=ht.int16)
int16_vector = ht.array([[3, 4]], dtype=ht.int16)
self.assertTrue(
ht.equal(int16_tensor & int16_vector, ht.bitwise_and(int16_tensor, int16_vector))
)
示例28
def test_or(self):
int16_tensor = ht.array([[1, 1], [2, 2]], dtype=ht.int16)
int16_vector = ht.array([[3, 4]], dtype=ht.int16)
self.assertTrue(
ht.equal(int16_tensor | int16_vector, ht.bitwise_or(int16_tensor, int16_vector))
)
示例29
def test_xor(self):
int16_tensor = ht.array([[1, 1], [2, 2]], dtype=ht.int16)
int16_vector = ht.array([[3, 4]], dtype=ht.int16)
self.assertTrue(
ht.equal(int16_tensor ^ int16_vector, ht.bitwise_xor(int16_tensor, int16_vector))
)
示例30
def promote_types(type1, type2):
"""
Returns the data type with the smallest size and smallest scalar kind to which both type1 and type2 may be
intuitively cast to, where intuitive casting refers to maintaining the same bit length if possible. This
function is symmetric.
Parameters
----------
type1 : type, str, ht.dtype
type of first operand
type2 : type, str, ht.dtype
type of second operand
Returns
-------
out : ht.dtype
The promoted data type.
Examples
--------
>>> ht.promote_types(ht.uint8, ht.uint8)
ht.uint8
>>> ht.promote_types(ht.int32, ht.float32)
ht.float32
>>> ht.promote_types(ht.int8, ht.uint8)
ht.int16
>>> ht.promote_types("i8", "f4")
ht.float64
"""
typecode_type1 = __type_codes[canonical_heat_type(type1)]
typecode_type2 = __type_codes[canonical_heat_type(type2)]
return __type_promotions[typecode_type1][typecode_type2]