Python源码示例:unicodedata.combining()
示例1
def preprocess_text(inputs, remove_space=True, lower=False):
"""preprocess data by removing extra space and normalize data."""
outputs = inputs
if remove_space:
outputs = " ".join(inputs.strip().split())
if six.PY2 and isinstance(outputs, str):
try:
outputs = six.ensure_text(outputs, "utf-8")
except UnicodeDecodeError:
outputs = six.ensure_text(outputs, "latin-1")
outputs = unicodedata.normalize("NFKD", outputs)
outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
if lower:
outputs = outputs.lower()
return outputs
示例2
def chars(self, num, truncate=None, html=False):
"""
Returns the text truncated to be no longer than the specified number
of characters.
Takes an optional argument of what should be used to notify that the
string has been truncated, defaulting to a translatable string of an
ellipsis (...).
"""
length = int(num)
text = unicodedata.normalize('NFC', self._wrapped)
# Calculate the length to truncate to (max length - end_text length)
truncate_len = length
for char in self.add_truncation_text('', truncate):
if not unicodedata.combining(char):
truncate_len -= 1
if truncate_len == 0:
break
if html:
return self._truncate_html(length, truncate, text, truncate_len, False)
return self._text_chars(length, truncate, text, truncate_len)
示例3
def _text_chars(self, length, truncate, text, truncate_len):
"""
Truncates a string after a certain number of chars.
"""
s_len = 0
end_index = None
for i, char in enumerate(text):
if unicodedata.combining(char):
# Don't consider combining characters
# as adding to the string length
continue
s_len += 1
if end_index is None and s_len > truncate_len:
end_index = i
if s_len > length:
# Return the truncated string
return self.add_truncation_text(text[:end_index or 0],
truncate)
# Return the original string since no truncation was necessary
return text
示例4
def test_ipy2_gh357(self):
"""https://github.com/IronLanguages/ironpython2/issues/357"""
import unicodedata
if is_cli:
self.assertEqual(unicodedata.name(u'\u4e2d'), '<CJK IDEOGRAPH, FIRST>..<CJK IDEOGRAPH, LAST>')
else:
self.assertEqual(unicodedata.name(u'\u4e2d'), 'CJK UNIFIED IDEOGRAPH-4E2D')
self.assertRaises(ValueError, unicodedata.decimal, u'\u4e2d')
self.assertEqual(unicodedata.decimal(u'\u4e2d', 0), 0)
self.assertRaises(ValueError, unicodedata.digit, u'\u4e2d')
self.assertEqual(unicodedata.digit(u'\u4e2d', 0), 0)
self.assertRaises(ValueError, unicodedata.numeric, u'\u4e2d')
self.assertEqual(unicodedata.numeric(u'\u4e2d', 0), 0)
self.assertEqual(unicodedata.category(u'\u4e2d'), 'Lo')
self.assertEqual(unicodedata.bidirectional(u'\u4e2d'), 'L')
self.assertEqual(unicodedata.combining(u'\u4e2d'), 0)
self.assertEqual(unicodedata.east_asian_width(u'\u4e2d'), 'W')
self.assertEqual(unicodedata.mirrored(u'\u4e2d'), 0)
self.assertEqual(unicodedata.decomposition(u'\u4e2d'), '')
示例5
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
normalized = unicodedata.normalize('NFKD', s)
if normalized == s:
return s
else:
return ''.join([c for c in normalized if not unicodedata.combining(c)])
示例6
def preprocess_text(inputs, lower=False, remove_space=True, keep_accents=False):
if remove_space:
outputs = ' '.join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace("``", '"').replace("''", '"')
if six.PY2 and isinstance(outputs, str):
outputs = outputs.decode('utf-8')
if not keep_accents:
outputs = unicodedata.normalize('NFKD', outputs)
outputs = ''.join([c for c in outputs if not unicodedata.combining(c)])
if lower:
outputs = outputs.lower()
return outputs
示例7
def chars(self, num, truncate=None, html=False):
"""
Return the text truncated to be no longer than the specified number
of characters.
`truncate` specifies what should be used to notify that the string has
been truncated, defaulting to a translatable string of an ellipsis
(...).
"""
self._setup()
length = int(num)
text = unicodedata.normalize('NFC', self._wrapped)
# Calculate the length to truncate to (max length - end_text length)
truncate_len = length
for char in self.add_truncation_text('', truncate):
if not unicodedata.combining(char):
truncate_len -= 1
if truncate_len == 0:
break
if html:
return self._truncate_html(length, truncate, text, truncate_len, False)
return self._text_chars(length, truncate, text, truncate_len)
示例8
def _text_chars(self, length, truncate, text, truncate_len):
"""Truncate a string after a certain number of chars."""
s_len = 0
end_index = None
for i, char in enumerate(text):
if unicodedata.combining(char):
# Don't consider combining characters
# as adding to the string length
continue
s_len += 1
if end_index is None and s_len > truncate_len:
end_index = i
if s_len > length:
# Return the truncated string
return self.add_truncation_text(text[:end_index or 0],
truncate)
# Return the original string since no truncation was necessary
return text
示例9
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
Parameters
----------
s : string
The string to strip
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
normalized = unicodedata.normalize('NFKD', s)
if normalized == s:
return s
else:
return ''.join([c for c in normalized if not unicodedata.combining(c)])
示例10
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
示例11
def _combining_class(cp):
v = unicodedata.combining(unichr(cp))
if v == 0:
if not unicodedata.name(unichr(cp)):
raise ValueError("Unknown character in unicodedata")
return v
示例12
def check_initial_combiner(label):
if unicodedata.category(label[0])[0] == 'M':
raise IDNAError('Label begins with an illegal combining character')
return True
示例13
def _combining_class(cp):
v = unicodedata.combining(unichr(cp))
if v == 0:
if not unicodedata.name(unichr(cp)):
raise ValueError("Unknown character in unicodedata")
return v
示例14
def check_initial_combiner(label):
if unicodedata.category(label[0])[0] == 'M':
raise IDNAError('Label begins with an illegal combining character')
return True
示例15
def _combining_class(cp):
v = unicodedata.combining(unichr(cp))
if v == 0:
if not unicodedata.name(unichr(cp)):
raise ValueError("Unknown character in unicodedata")
return v
示例16
def check_initial_combiner(label):
if unicodedata.category(label[0])[0] == 'M':
raise IDNAError('Label begins with an illegal combining character')
return True
示例17
def _combining_class(cp):
return unicodedata.combining(unichr(cp))
示例18
def check_initial_combiner(label):
if unicodedata.category(label[0])[0] == 'M':
raise IDNAError('Label begins with an illegal combining character')
return True
示例19
def _combining_class(cp):
return unicodedata.combining(unichr(cp))
示例20
def check_initial_combiner(label):
if unicodedata.category(label[0])[0] == 'M':
raise IDNAError('Label begins with an illegal combining character')
return True
示例21
def _combining_class(cp):
return unicodedata.combining(unichr(cp))
示例22
def check_initial_combiner(label):
if unicodedata.category(label[0])[0] == 'M':
raise IDNAError('Label begins with an illegal combining character')
return True
示例23
def _combining_class(cp):
return unicodedata.combining(unichr(cp))
示例24
def _char_block_width(char):
# Basic Latin, which is probably the most common case
#if char in xrange(0x0021, 0x007e):
#if char >= 0x0021 and char <= 0x007e:
if 0x0021 <= char <= 0x007e:
return 1
# Chinese, Japanese, Korean (common)
if 0x4e00 <= char <= 0x9fff:
return 2
# Hangul
if 0xac00 <= char <= 0xd7af:
return 2
# Combining?
if unicodedata.combining(uni_chr(char)):
return 0
# Hiragana and Katakana
if 0x3040 <= char <= 0x309f or 0x30a0 <= char <= 0x30ff:
return 2
# Full-width Latin characters
if 0xff01 <= char <= 0xff60:
return 2
# CJK punctuation
if 0x3000 <= char <= 0x303e:
return 2
# Backspace and delete
if char in (0x0008, 0x007f):
return -1
# Other control characters
elif char in (0x0000, 0x001f):
return 0
# Take a guess
return 1
示例25
def _combining_class(cp):
return unicodedata.combining(unichr(cp))
示例26
def check_initial_combiner(label):
if unicodedata.category(label[0])[0] == 'M':
raise IDNAError('Label begins with an illegal combining character')
return True
示例27
def preprocess_text(self, inputs):
if self.remove_space:
outputs = " ".join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace("``", '"').replace("''", '"')
if not self.keep_accents:
outputs = unicodedata.normalize("NFKD", outputs)
outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
if self.do_lower_case:
outputs = outputs.lower()
return outputs
示例28
def remove_diacritics(text):
"""Remove diacritics from the text.
Args:
text (str): Text.
Returns:
str: Return text without diacritics.
"""
text = unicodedata.normalize('NFD', text)
shaved = unicodedata.normalize('NFC', ''.join(c for c in text if not unicodedata.combining(c)))
return unicodedata.normalize('NFC', shaved)
示例29
def _combining_class(cp):
return unicodedata.combining(unichr(cp))
示例30
def check_initial_combiner(label):
if unicodedata.category(label[0])[0] == 'M':
raise IDNAError('Label begins with an illegal combining character')
return True