repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
neptune-ml/steppy | steppy/base.py | Step.persist_upstream_diagram | def persist_upstream_diagram(self, filepath):
"""Creates upstream steps diagram and persists it to disk as png file.
Pydot graph is created and persisted to disk as png file under the filepath directory.
Args:
filepath (str): filepath to which the png with steps visualization should
be persisted
"""
assert isinstance(filepath, str),\
'Step {} error, filepath must be str. Got {} instead'.format(self.name, type(filepath))
persist_as_png(self.upstream_structure, filepath) | python | def persist_upstream_diagram(self, filepath):
"""Creates upstream steps diagram and persists it to disk as png file.
Pydot graph is created and persisted to disk as png file under the filepath directory.
Args:
filepath (str): filepath to which the png with steps visualization should
be persisted
"""
assert isinstance(filepath, str),\
'Step {} error, filepath must be str. Got {} instead'.format(self.name, type(filepath))
persist_as_png(self.upstream_structure, filepath) | Creates upstream steps diagram and persists it to disk as png file.
Pydot graph is created and persisted to disk as png file under the filepath directory.
Args:
filepath (str): filepath to which the png with steps visualization should
be persisted | https://github.com/neptune-ml/steppy/blob/856b95f1f5189e1d2ca122b891bc670adac9692b/steppy/base.py#L513-L524 |
neptune-ml/steppy | steppy/base.py | BaseTransformer.fit_transform | def fit_transform(self, *args, **kwargs):
"""Performs fit followed by transform.
This method simply combines fit and transform.
Args:
args: positional arguments (can be anything)
kwargs: keyword arguments (can be anything)
Returns:
dict: output
"""
self.fit(*args, **kwargs)
return self.transform(*args, **kwargs) | python | def fit_transform(self, *args, **kwargs):
"""Performs fit followed by transform.
This method simply combines fit and transform.
Args:
args: positional arguments (can be anything)
kwargs: keyword arguments (can be anything)
Returns:
dict: output
"""
self.fit(*args, **kwargs)
return self.transform(*args, **kwargs) | Performs fit followed by transform.
This method simply combines fit and transform.
Args:
args: positional arguments (can be anything)
kwargs: keyword arguments (can be anything)
Returns:
dict: output | https://github.com/neptune-ml/steppy/blob/856b95f1f5189e1d2ca122b891bc670adac9692b/steppy/base.py#L773-L786 |
dveselov/python-yandex-translate | yandex_translate/__init__.py | YandexTranslate.url | def url(self, endpoint):
"""
Returns full URL for specified API endpoint
>>> translate = YandexTranslate("trnsl.1.1.20130421T140201Z.323e508a33e9d84b.f1e0d9ca9bcd0a00b0ef71d82e6cf4158183d09e")
>>> translate.url("langs")
'https://translate.yandex.net/api/v1.5/tr.json/getLangs'
>>> translate.url("detect")
'https://translate.yandex.net/api/v1.5/tr.json/detect'
>>> translate.url("translate")
'https://translate.yandex.net/api/v1.5/tr.json/translate'
"""
return self.api_url.format(version=self.api_version,
endpoint=self.api_endpoints[endpoint]) | python | def url(self, endpoint):
"""
Returns full URL for specified API endpoint
>>> translate = YandexTranslate("trnsl.1.1.20130421T140201Z.323e508a33e9d84b.f1e0d9ca9bcd0a00b0ef71d82e6cf4158183d09e")
>>> translate.url("langs")
'https://translate.yandex.net/api/v1.5/tr.json/getLangs'
>>> translate.url("detect")
'https://translate.yandex.net/api/v1.5/tr.json/detect'
>>> translate.url("translate")
'https://translate.yandex.net/api/v1.5/tr.json/translate'
"""
return self.api_url.format(version=self.api_version,
endpoint=self.api_endpoints[endpoint]) | Returns full URL for specified API endpoint
>>> translate = YandexTranslate("trnsl.1.1.20130421T140201Z.323e508a33e9d84b.f1e0d9ca9bcd0a00b0ef71d82e6cf4158183d09e")
>>> translate.url("langs")
'https://translate.yandex.net/api/v1.5/tr.json/getLangs'
>>> translate.url("detect")
'https://translate.yandex.net/api/v1.5/tr.json/detect'
>>> translate.url("translate")
'https://translate.yandex.net/api/v1.5/tr.json/translate' | https://github.com/dveselov/python-yandex-translate/blob/f26e624f683f10b4e7bf630b40d97241d82d5b01/yandex_translate/__init__.py#L47-L59 |
dveselov/python-yandex-translate | yandex_translate/__init__.py | YandexTranslate.directions | def directions(self, proxies=None):
"""
Returns list with translate directions
>>> translate = YandexTranslate("trnsl.1.1.20130421T140201Z.323e508a33e9d84b.f1e0d9ca9bcd0a00b0ef71d82e6cf4158183d09e")
>>> directions = translate.directions
>>> len(directions) > 0
True
"""
try:
response = requests.get(self.url("langs"), params={"key": self.api_key}, proxies=proxies)
except requests.exceptions.ConnectionError:
raise YandexTranslateException(self.error_codes[503])
else:
response = response.json()
status_code = response.get("code", 200)
if status_code != 200:
raise YandexTranslateException(status_code)
return response.get("dirs") | python | def directions(self, proxies=None):
"""
Returns list with translate directions
>>> translate = YandexTranslate("trnsl.1.1.20130421T140201Z.323e508a33e9d84b.f1e0d9ca9bcd0a00b0ef71d82e6cf4158183d09e")
>>> directions = translate.directions
>>> len(directions) > 0
True
"""
try:
response = requests.get(self.url("langs"), params={"key": self.api_key}, proxies=proxies)
except requests.exceptions.ConnectionError:
raise YandexTranslateException(self.error_codes[503])
else:
response = response.json()
status_code = response.get("code", 200)
if status_code != 200:
raise YandexTranslateException(status_code)
return response.get("dirs") | Returns list with translate directions
>>> translate = YandexTranslate("trnsl.1.1.20130421T140201Z.323e508a33e9d84b.f1e0d9ca9bcd0a00b0ef71d82e6cf4158183d09e")
>>> directions = translate.directions
>>> len(directions) > 0
True | https://github.com/dveselov/python-yandex-translate/blob/f26e624f683f10b4e7bf630b40d97241d82d5b01/yandex_translate/__init__.py#L62-L79 |
dveselov/python-yandex-translate | yandex_translate/__init__.py | YandexTranslate.detect | def detect(self, text, proxies=None, format="plain"):
"""
Specifies language of text
>>> translate = YandexTranslate("trnsl.1.1.20130421T140201Z.323e508a33e9d84b.f1e0d9ca9bcd0a00b0ef71d82e6cf4158183d09e")
>>> result = translate.detect(text="Hello world!")
>>> result == "en"
True
"""
data = {
"text": text,
"format": format,
"key": self.api_key,
}
try:
response = requests.post(self.url("detect"), data=data, proxies=proxies)
except ConnectionError:
raise YandexTranslateException(self.error_codes[503])
except ValueError:
raise YandexTranslateException(response)
else:
response = response.json()
language = response.get("lang", None)
status_code = response.get("code", 200)
if status_code != 200:
raise YandexTranslateException(status_code)
elif not language:
raise YandexTranslateException(501)
return language | python | def detect(self, text, proxies=None, format="plain"):
"""
Specifies language of text
>>> translate = YandexTranslate("trnsl.1.1.20130421T140201Z.323e508a33e9d84b.f1e0d9ca9bcd0a00b0ef71d82e6cf4158183d09e")
>>> result = translate.detect(text="Hello world!")
>>> result == "en"
True
"""
data = {
"text": text,
"format": format,
"key": self.api_key,
}
try:
response = requests.post(self.url("detect"), data=data, proxies=proxies)
except ConnectionError:
raise YandexTranslateException(self.error_codes[503])
except ValueError:
raise YandexTranslateException(response)
else:
response = response.json()
language = response.get("lang", None)
status_code = response.get("code", 200)
if status_code != 200:
raise YandexTranslateException(status_code)
elif not language:
raise YandexTranslateException(501)
return language | Specifies language of text
>>> translate = YandexTranslate("trnsl.1.1.20130421T140201Z.323e508a33e9d84b.f1e0d9ca9bcd0a00b0ef71d82e6cf4158183d09e")
>>> result = translate.detect(text="Hello world!")
>>> result == "en"
True | https://github.com/dveselov/python-yandex-translate/blob/f26e624f683f10b4e7bf630b40d97241d82d5b01/yandex_translate/__init__.py#L92-L119 |
dveselov/python-yandex-translate | yandex_translate/__init__.py | YandexTranslate.translate | def translate(self, text, lang, proxies=None, format="plain"):
"""
Translates text to passed language
>>> translate = YandexTranslate("trnsl.1.1.20130421T140201Z.323e508a33e9d84b.f1e0d9ca9bcd0a00b0ef71d82e6cf4158183d09e")
>>> result = translate.translate(lang="ru", text="Hello, world!")
>>> result["code"] == 200
True
>>> result["lang"] == "en-ru"
True
"""
data = {
"text": text,
"format": format,
"lang": lang,
"key": self.api_key
}
try:
response = requests.post(self.url("translate"), data=data, proxies=proxies)
except ConnectionError:
raise YandexTranslateException(503)
else:
response = response.json()
status_code = response.get("code", 200)
if status_code != 200:
raise YandexTranslateException(status_code)
return response | python | def translate(self, text, lang, proxies=None, format="plain"):
"""
Translates text to passed language
>>> translate = YandexTranslate("trnsl.1.1.20130421T140201Z.323e508a33e9d84b.f1e0d9ca9bcd0a00b0ef71d82e6cf4158183d09e")
>>> result = translate.translate(lang="ru", text="Hello, world!")
>>> result["code"] == 200
True
>>> result["lang"] == "en-ru"
True
"""
data = {
"text": text,
"format": format,
"lang": lang,
"key": self.api_key
}
try:
response = requests.post(self.url("translate"), data=data, proxies=proxies)
except ConnectionError:
raise YandexTranslateException(503)
else:
response = response.json()
status_code = response.get("code", 200)
if status_code != 200:
raise YandexTranslateException(status_code)
return response | Translates text to passed language
>>> translate = YandexTranslate("trnsl.1.1.20130421T140201Z.323e508a33e9d84b.f1e0d9ca9bcd0a00b0ef71d82e6cf4158183d09e")
>>> result = translate.translate(lang="ru", text="Hello, world!")
>>> result["code"] == 200
True
>>> result["lang"] == "en-ru"
True | https://github.com/dveselov/python-yandex-translate/blob/f26e624f683f10b4e7bf630b40d97241d82d5b01/yandex_translate/__init__.py#L121-L146 |
lepture/otpauth | otpauth.py | generate_hotp | def generate_hotp(secret, counter=4):
"""Generate a HOTP code.
:param secret: A secret token for the authentication.
:param counter: HOTP is a counter based algorithm.
"""
# https://tools.ietf.org/html/rfc4226
msg = struct.pack('>Q', counter)
digest = hmac.new(to_bytes(secret), msg, hashlib.sha1).digest()
ob = digest[19]
if PY2:
ob = ord(ob)
pos = ob & 15
base = struct.unpack('>I', digest[pos:pos + 4])[0] & 0x7fffffff
token = base % 1000000
return token | python | def generate_hotp(secret, counter=4):
"""Generate a HOTP code.
:param secret: A secret token for the authentication.
:param counter: HOTP is a counter based algorithm.
"""
# https://tools.ietf.org/html/rfc4226
msg = struct.pack('>Q', counter)
digest = hmac.new(to_bytes(secret), msg, hashlib.sha1).digest()
ob = digest[19]
if PY2:
ob = ord(ob)
pos = ob & 15
base = struct.unpack('>I', digest[pos:pos + 4])[0] & 0x7fffffff
token = base % 1000000
return token | Generate a HOTP code.
:param secret: A secret token for the authentication.
:param counter: HOTP is a counter based algorithm. | https://github.com/lepture/otpauth/blob/49914d83d36dbcd33c9e26f65002b21ce09a6303/otpauth.py#L143-L160 |
lepture/otpauth | otpauth.py | generate_totp | def generate_totp(secret, period=30, timestamp=None):
"""Generate a TOTP code.
A TOTP code is an extension of HOTP algorithm.
:param secret: A secret token for the authentication.
:param period: A period that a TOTP code is valid in seconds
:param timestamp: Current time stamp.
"""
if timestamp is None:
timestamp = time.time()
counter = int(timestamp) // period
return generate_hotp(secret, counter) | python | def generate_totp(secret, period=30, timestamp=None):
"""Generate a TOTP code.
A TOTP code is an extension of HOTP algorithm.
:param secret: A secret token for the authentication.
:param period: A period that a TOTP code is valid in seconds
:param timestamp: Current time stamp.
"""
if timestamp is None:
timestamp = time.time()
counter = int(timestamp) // period
return generate_hotp(secret, counter) | Generate a TOTP code.
A TOTP code is an extension of HOTP algorithm.
:param secret: A secret token for the authentication.
:param period: A period that a TOTP code is valid in seconds
:param timestamp: Current time stamp. | https://github.com/lepture/otpauth/blob/49914d83d36dbcd33c9e26f65002b21ce09a6303/otpauth.py#L163-L175 |
lepture/otpauth | otpauth.py | OtpAuth.valid_hotp | def valid_hotp(self, code, last=0, trials=100):
"""Valid a HOTP code.
:param code: A number that is less than 6 characters.
:param last: Guess HOTP code from last + 1 range.
:param trials: Guest HOTP code end at last + trials + 1.
"""
if not valid_code(code):
return False
code = bytes(int(code))
for i in range(last + 1, last + trials + 1):
if compare_digest(bytes(self.hotp(counter=i)), code):
return i
return False | python | def valid_hotp(self, code, last=0, trials=100):
"""Valid a HOTP code.
:param code: A number that is less than 6 characters.
:param last: Guess HOTP code from last + 1 range.
:param trials: Guest HOTP code end at last + trials + 1.
"""
if not valid_code(code):
return False
code = bytes(int(code))
for i in range(last + 1, last + trials + 1):
if compare_digest(bytes(self.hotp(counter=i)), code):
return i
return False | Valid a HOTP code.
:param code: A number that is less than 6 characters.
:param last: Guess HOTP code from last + 1 range.
:param trials: Guest HOTP code end at last + trials + 1. | https://github.com/lepture/otpauth/blob/49914d83d36dbcd33c9e26f65002b21ce09a6303/otpauth.py#L67-L81 |
lepture/otpauth | otpauth.py | OtpAuth.valid_totp | def valid_totp(self, code, period=30, timestamp=None):
"""Valid a TOTP code.
:param code: A number that is less than 6 characters.
:param period: A period that a TOTP code is valid in seconds
:param timestamp: Validate TOTP at this given timestamp
"""
if not valid_code(code):
return False
return compare_digest(
bytes(self.totp(period, timestamp)),
bytes(int(code))
) | python | def valid_totp(self, code, period=30, timestamp=None):
"""Valid a TOTP code.
:param code: A number that is less than 6 characters.
:param period: A period that a TOTP code is valid in seconds
:param timestamp: Validate TOTP at this given timestamp
"""
if not valid_code(code):
return False
return compare_digest(
bytes(self.totp(period, timestamp)),
bytes(int(code))
) | Valid a TOTP code.
:param code: A number that is less than 6 characters.
:param period: A period that a TOTP code is valid in seconds
:param timestamp: Validate TOTP at this given timestamp | https://github.com/lepture/otpauth/blob/49914d83d36dbcd33c9e26f65002b21ce09a6303/otpauth.py#L83-L95 |
lepture/otpauth | otpauth.py | OtpAuth.to_uri | def to_uri(self, type, label, issuer, counter=None):
"""Generate the otpauth protocal string.
:param type: Algorithm type, hotp or totp.
:param label: Label of the identifier.
:param issuer: The company, the organization or something else.
:param counter: Counter of the HOTP algorithm.
"""
type = type.lower()
if type not in ('hotp', 'totp'):
raise ValueError('type must be hotp or totp')
if type == 'hotp' and not counter:
raise ValueError('HOTP type authentication need counter')
# https://code.google.com/p/google-authenticator/wiki/KeyUriFormat
url = ('otpauth://%(type)s/%(label)s?secret=%(secret)s'
'&issuer=%(issuer)s')
dct = dict(
type=type, label=label, issuer=issuer,
secret=self.encoded_secret, counter=counter
)
ret = url % dct
if type == 'hotp':
ret = '%s&counter=%s' % (ret, counter)
return ret | python | def to_uri(self, type, label, issuer, counter=None):
"""Generate the otpauth protocal string.
:param type: Algorithm type, hotp or totp.
:param label: Label of the identifier.
:param issuer: The company, the organization or something else.
:param counter: Counter of the HOTP algorithm.
"""
type = type.lower()
if type not in ('hotp', 'totp'):
raise ValueError('type must be hotp or totp')
if type == 'hotp' and not counter:
raise ValueError('HOTP type authentication need counter')
# https://code.google.com/p/google-authenticator/wiki/KeyUriFormat
url = ('otpauth://%(type)s/%(label)s?secret=%(secret)s'
'&issuer=%(issuer)s')
dct = dict(
type=type, label=label, issuer=issuer,
secret=self.encoded_secret, counter=counter
)
ret = url % dct
if type == 'hotp':
ret = '%s&counter=%s' % (ret, counter)
return ret | Generate the otpauth protocal string.
:param type: Algorithm type, hotp or totp.
:param label: Label of the identifier.
:param issuer: The company, the organization or something else.
:param counter: Counter of the HOTP algorithm. | https://github.com/lepture/otpauth/blob/49914d83d36dbcd33c9e26f65002b21ce09a6303/otpauth.py#L105-L131 |
lepture/otpauth | otpauth.py | OtpAuth.to_google | def to_google(self, type, label, issuer, counter=None):
"""Generate the otpauth protocal string for Google Authenticator.
.. deprecated:: 0.2.0
Use :func:`to_uri` instead.
"""
warnings.warn('deprecated, use to_uri instead', DeprecationWarning)
return self.to_uri(type, label, issuer, counter) | python | def to_google(self, type, label, issuer, counter=None):
"""Generate the otpauth protocal string for Google Authenticator.
.. deprecated:: 0.2.0
Use :func:`to_uri` instead.
"""
warnings.warn('deprecated, use to_uri instead', DeprecationWarning)
return self.to_uri(type, label, issuer, counter) | Generate the otpauth protocal string for Google Authenticator.
.. deprecated:: 0.2.0
Use :func:`to_uri` instead. | https://github.com/lepture/otpauth/blob/49914d83d36dbcd33c9e26f65002b21ce09a6303/otpauth.py#L133-L140 |
nap/jaro-winkler-distance | pyjarowinkler/distance.py | get_jaro_distance | def get_jaro_distance(first, second, winkler=True, winkler_ajustment=True, scaling=0.1):
"""
:param first: word to calculate distance for
:param second: word to calculate distance with
:param winkler: same as winkler_ajustment
:param winkler_ajustment: add an adjustment factor to the Jaro of the distance
:param scaling: scaling factor for the Winkler adjustment
:return: Jaro distance adjusted (or not)
"""
if not first or not second:
raise JaroDistanceException("Cannot calculate distance from NoneType ({0}, {1})".format(
first.__class__.__name__,
second.__class__.__name__))
jaro = _score(first, second)
cl = min(len(_get_prefix(first, second)), 4)
if all([winkler, winkler_ajustment]): # 0.1 as scaling factor
return round((jaro + (scaling * cl * (1.0 - jaro))) * 100.0) / 100.0
return jaro | python | def get_jaro_distance(first, second, winkler=True, winkler_ajustment=True, scaling=0.1):
"""
:param first: word to calculate distance for
:param second: word to calculate distance with
:param winkler: same as winkler_ajustment
:param winkler_ajustment: add an adjustment factor to the Jaro of the distance
:param scaling: scaling factor for the Winkler adjustment
:return: Jaro distance adjusted (or not)
"""
if not first or not second:
raise JaroDistanceException("Cannot calculate distance from NoneType ({0}, {1})".format(
first.__class__.__name__,
second.__class__.__name__))
jaro = _score(first, second)
cl = min(len(_get_prefix(first, second)), 4)
if all([winkler, winkler_ajustment]): # 0.1 as scaling factor
return round((jaro + (scaling * cl * (1.0 - jaro))) * 100.0) / 100.0
return jaro | :param first: word to calculate distance for
:param second: word to calculate distance with
:param winkler: same as winkler_ajustment
:param winkler_ajustment: add an adjustment factor to the Jaro of the distance
:param scaling: scaling factor for the Winkler adjustment
:return: Jaro distance adjusted (or not) | https://github.com/nap/jaro-winkler-distance/blob/835bede1913232a8255c9e18533c430989c55bde/pyjarowinkler/distance.py#L18-L38 |
carlospalol/money | money/exchange.py | ExchangeRates.install | def install(self, backend='money.exchange.SimpleBackend'):
"""Install an exchange rates backend using a python path string"""
# RADAR: Python2
if isinstance(backend, money.six.string_types):
path, name = backend.rsplit('.', 1)
module = importlib.import_module(path)
backend = getattr(module, name)()
elif isinstance(backend, type):
backend = backend()
if not isinstance(backend, BackendBase):
raise TypeError("backend '{}' is not a subclass of "
"money.xrates.BackendBase".format(backend))
self._backend = backend | python | def install(self, backend='money.exchange.SimpleBackend'):
"""Install an exchange rates backend using a python path string"""
# RADAR: Python2
if isinstance(backend, money.six.string_types):
path, name = backend.rsplit('.', 1)
module = importlib.import_module(path)
backend = getattr(module, name)()
elif isinstance(backend, type):
backend = backend()
if not isinstance(backend, BackendBase):
raise TypeError("backend '{}' is not a subclass of "
"money.xrates.BackendBase".format(backend))
self._backend = backend | Install an exchange rates backend using a python path string | https://github.com/carlospalol/money/blob/1e51f651f93edd62c16eb3d7aa034fec03096046/money/exchange.py#L81-L93 |
carlospalol/money | money/exchange.py | ExchangeRates.rate | def rate(self, currency):
"""Return quotation between the base and another currency"""
if not self._backend:
raise ExchangeBackendNotInstalled()
return self._backend.rate(currency) | python | def rate(self, currency):
"""Return quotation between the base and another currency"""
if not self._backend:
raise ExchangeBackendNotInstalled()
return self._backend.rate(currency) | Return quotation between the base and another currency | https://github.com/carlospalol/money/blob/1e51f651f93edd62c16eb3d7aa034fec03096046/money/exchange.py#L113-L117 |
carlospalol/money | money/exchange.py | ExchangeRates.quotation | def quotation(self, origin, target):
"""Return quotation between two currencies (origin, target)"""
if not self._backend:
raise ExchangeBackendNotInstalled()
return self._backend.quotation(origin, target) | python | def quotation(self, origin, target):
"""Return quotation between two currencies (origin, target)"""
if not self._backend:
raise ExchangeBackendNotInstalled()
return self._backend.quotation(origin, target) | Return quotation between two currencies (origin, target) | https://github.com/carlospalol/money/blob/1e51f651f93edd62c16eb3d7aa034fec03096046/money/exchange.py#L119-L123 |
anntzer/mplcursors | lib/mplcursors/_pick_info.py | _register_scatter | def _register_scatter():
"""
Patch `PathCollection` and `scatter` to register their return values.
This registration allows us to distinguish `PathCollection`s created by
`Axes.scatter`, which should use point-like picking, from others, which
should use path-like picking. The former is more common, so we store the
latter instead; this also lets us guess the type better if this module is
imported late.
"""
@functools.wraps(PathCollection.__init__)
def __init__(self, *args, **kwargs):
_nonscatter_pathcollections.add(self)
return __init__.__wrapped__(self, *args, **kwargs)
PathCollection.__init__ = __init__
@functools.wraps(Axes.scatter)
def scatter(*args, **kwargs):
paths = scatter.__wrapped__(*args, **kwargs)
with suppress(KeyError):
_nonscatter_pathcollections.remove(paths)
return paths
Axes.scatter = scatter | python | def _register_scatter():
"""
Patch `PathCollection` and `scatter` to register their return values.
This registration allows us to distinguish `PathCollection`s created by
`Axes.scatter`, which should use point-like picking, from others, which
should use path-like picking. The former is more common, so we store the
latter instead; this also lets us guess the type better if this module is
imported late.
"""
@functools.wraps(PathCollection.__init__)
def __init__(self, *args, **kwargs):
_nonscatter_pathcollections.add(self)
return __init__.__wrapped__(self, *args, **kwargs)
PathCollection.__init__ = __init__
@functools.wraps(Axes.scatter)
def scatter(*args, **kwargs):
paths = scatter.__wrapped__(*args, **kwargs)
with suppress(KeyError):
_nonscatter_pathcollections.remove(paths)
return paths
Axes.scatter = scatter | Patch `PathCollection` and `scatter` to register their return values.
This registration allows us to distinguish `PathCollection`s created by
`Axes.scatter`, which should use point-like picking, from others, which
should use path-like picking. The former is more common, so we store the
latter instead; this also lets us guess the type better if this module is
imported late. | https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_pick_info.py#L37-L60 |
anntzer/mplcursors | lib/mplcursors/_pick_info.py | _compute_projection_pick | def _compute_projection_pick(artist, path, xy):
"""
Project *xy* on *path* to obtain a `Selection` for *artist*.
*path* is first transformed to screen coordinates using the artist
transform, and the target of the returned `Selection` is transformed
back to data coordinates using the artist *axes* inverse transform. The
`Selection` `index` is returned as a float. This function returns ``None``
for degenerate inputs.
The caller is responsible for converting the index to the proper class if
needed.
"""
transform = artist.get_transform().frozen()
tpath = (path.cleaned(transform) if transform.is_affine
# `cleaned` only handles affine transforms.
else transform.transform_path(path).cleaned())
# `cleaned` should return a path where the first element is `MOVETO`, the
# following are `LINETO` or `CLOSEPOLY`, and the last one is `STOP`, i.e.
# codes = path.codes
# assert (codes[0], codes[-1]) == (path.MOVETO, path.STOP)
# assert np.in1d(codes[1:-1], [path.LINETO, path.CLOSEPOLY]).all()
vertices = tpath.vertices[:-1]
codes = tpath.codes[:-1]
vertices[codes == tpath.CLOSEPOLY] = vertices[0]
# Unit vectors for each segment.
us = vertices[1:] - vertices[:-1]
ls = np.hypot(*us.T)
with np.errstate(invalid="ignore"):
# Results in 0/0 for repeated consecutive points.
us /= ls[:, None]
# Vectors from each vertex to the event (overwritten below).
vs = xy - vertices[:-1]
# Clipped dot products -- `einsum` cannot be done in place, `clip` can.
# `clip` can trigger invalid comparisons if there are nan points.
with np.errstate(invalid="ignore"):
dot = np.clip(np.einsum("ij,ij->i", vs, us), 0, ls, out=vs[:, 0])
# Projections.
projs = vertices[:-1] + dot[:, None] * us
ds = np.hypot(*(xy - projs).T, out=vs[:, 1])
try:
argmin = np.nanargmin(ds)
dmin = ds[argmin]
except (ValueError, IndexError): # See above re: exceptions caught.
return
else:
target = AttrArray(
artist.axes.transData.inverted().transform_point(projs[argmin]))
target.index = (
(argmin + dot[argmin] / ls[argmin])
/ (path._interpolation_steps / tpath._interpolation_steps))
return Selection(artist, target, dmin, None, None) | python | def _compute_projection_pick(artist, path, xy):
"""
Project *xy* on *path* to obtain a `Selection` for *artist*.
*path* is first transformed to screen coordinates using the artist
transform, and the target of the returned `Selection` is transformed
back to data coordinates using the artist *axes* inverse transform. The
`Selection` `index` is returned as a float. This function returns ``None``
for degenerate inputs.
The caller is responsible for converting the index to the proper class if
needed.
"""
transform = artist.get_transform().frozen()
tpath = (path.cleaned(transform) if transform.is_affine
# `cleaned` only handles affine transforms.
else transform.transform_path(path).cleaned())
# `cleaned` should return a path where the first element is `MOVETO`, the
# following are `LINETO` or `CLOSEPOLY`, and the last one is `STOP`, i.e.
# codes = path.codes
# assert (codes[0], codes[-1]) == (path.MOVETO, path.STOP)
# assert np.in1d(codes[1:-1], [path.LINETO, path.CLOSEPOLY]).all()
vertices = tpath.vertices[:-1]
codes = tpath.codes[:-1]
vertices[codes == tpath.CLOSEPOLY] = vertices[0]
# Unit vectors for each segment.
us = vertices[1:] - vertices[:-1]
ls = np.hypot(*us.T)
with np.errstate(invalid="ignore"):
# Results in 0/0 for repeated consecutive points.
us /= ls[:, None]
# Vectors from each vertex to the event (overwritten below).
vs = xy - vertices[:-1]
# Clipped dot products -- `einsum` cannot be done in place, `clip` can.
# `clip` can trigger invalid comparisons if there are nan points.
with np.errstate(invalid="ignore"):
dot = np.clip(np.einsum("ij,ij->i", vs, us), 0, ls, out=vs[:, 0])
# Projections.
projs = vertices[:-1] + dot[:, None] * us
ds = np.hypot(*(xy - projs).T, out=vs[:, 1])
try:
argmin = np.nanargmin(ds)
dmin = ds[argmin]
except (ValueError, IndexError): # See above re: exceptions caught.
return
else:
target = AttrArray(
artist.axes.transData.inverted().transform_point(projs[argmin]))
target.index = (
(argmin + dot[argmin] / ls[argmin])
/ (path._interpolation_steps / tpath._interpolation_steps))
return Selection(artist, target, dmin, None, None) | Project *xy* on *path* to obtain a `Selection` for *artist*.
*path* is first transformed to screen coordinates using the artist
transform, and the target of the returned `Selection` is transformed
back to data coordinates using the artist *axes* inverse transform. The
`Selection` `index` is returned as a float. This function returns ``None``
for degenerate inputs.
The caller is responsible for converting the index to the proper class if
needed. | https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_pick_info.py#L201-L252 |
anntzer/mplcursors | lib/mplcursors/_pick_info.py | _untransform | def _untransform(orig_xy, screen_xy, ax):
"""
Return data coordinates to place an annotation at screen coordinates
*screen_xy* in axes *ax*.
*orig_xy* are the "original" coordinates as stored by the artist; they are
transformed to *screen_xy* by whatever transform the artist uses. If the
artist uses ``ax.transData``, just return *orig_xy*; else, apply
``ax.transData.inverse()`` to *screen_xy*. (The first case is more
accurate than always applying ``ax.transData.inverse()``.)
"""
tr_xy = ax.transData.transform(orig_xy)
return (
orig_xy
if ((tr_xy == screen_xy) | np.isnan(tr_xy) & np.isnan(screen_xy)).all()
else ax.transData.inverted().transform(screen_xy)) | python | def _untransform(orig_xy, screen_xy, ax):
"""
Return data coordinates to place an annotation at screen coordinates
*screen_xy* in axes *ax*.
*orig_xy* are the "original" coordinates as stored by the artist; they are
transformed to *screen_xy* by whatever transform the artist uses. If the
artist uses ``ax.transData``, just return *orig_xy*; else, apply
``ax.transData.inverse()`` to *screen_xy*. (The first case is more
accurate than always applying ``ax.transData.inverse()``.)
"""
tr_xy = ax.transData.transform(orig_xy)
return (
orig_xy
if ((tr_xy == screen_xy) | np.isnan(tr_xy) & np.isnan(screen_xy)).all()
else ax.transData.inverted().transform(screen_xy)) | Return data coordinates to place an annotation at screen coordinates
*screen_xy* in axes *ax*.
*orig_xy* are the "original" coordinates as stored by the artist; they are
transformed to *screen_xy* by whatever transform the artist uses. If the
artist uses ``ax.transData``, just return *orig_xy*; else, apply
``ax.transData.inverse()`` to *screen_xy*. (The first case is more
accurate than always applying ``ax.transData.inverse()``.) | https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_pick_info.py#L255-L270 |
anntzer/mplcursors | lib/mplcursors/_pick_info.py | _call_with_selection | def _call_with_selection(func):
"""Decorator that passes a `Selection` built from the non-kwonly args."""
wrapped_kwonly_params = [
param for param in inspect.signature(func).parameters.values()
if param.kind == param.KEYWORD_ONLY]
sel_sig = inspect.signature(Selection)
default_sel_sig = sel_sig.replace(
parameters=[param.replace(default=None) if param.default is param.empty
else param
for param in sel_sig.parameters.values()])
@functools.wraps(func)
def wrapper(*args, **kwargs):
extra_kw = {param.name: kwargs.pop(param.name)
for param in wrapped_kwonly_params if param.name in kwargs}
ba = default_sel_sig.bind(*args, **kwargs)
# apply_defaults
ba.arguments = ChainMap(
ba.arguments,
{name: param.default
for name, param in default_sel_sig.parameters.items()
if param.default is not param.empty})
sel = Selection(*ba.args, **ba.kwargs)
return func(sel, **extra_kw)
wrapper.__signature__ = Signature(
list(sel_sig.parameters.values()) + wrapped_kwonly_params)
return wrapper | python | def _call_with_selection(func):
"""Decorator that passes a `Selection` built from the non-kwonly args."""
wrapped_kwonly_params = [
param for param in inspect.signature(func).parameters.values()
if param.kind == param.KEYWORD_ONLY]
sel_sig = inspect.signature(Selection)
default_sel_sig = sel_sig.replace(
parameters=[param.replace(default=None) if param.default is param.empty
else param
for param in sel_sig.parameters.values()])
@functools.wraps(func)
def wrapper(*args, **kwargs):
extra_kw = {param.name: kwargs.pop(param.name)
for param in wrapped_kwonly_params if param.name in kwargs}
ba = default_sel_sig.bind(*args, **kwargs)
# apply_defaults
ba.arguments = ChainMap(
ba.arguments,
{name: param.default
for name, param in default_sel_sig.parameters.items()
if param.default is not param.empty})
sel = Selection(*ba.args, **ba.kwargs)
return func(sel, **extra_kw)
wrapper.__signature__ = Signature(
list(sel_sig.parameters.values()) + wrapped_kwonly_params)
return wrapper | Decorator that passes a `Selection` built from the non-kwonly args. | https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_pick_info.py#L481-L508 |
anntzer/mplcursors | lib/mplcursors/_pick_info.py | _set_valid_props | def _set_valid_props(artist, kwargs):
"""Set valid properties for the artist, dropping the others."""
artist.set(**{k: kwargs[k] for k in kwargs if hasattr(artist, "set_" + k)})
return artist | python | def _set_valid_props(artist, kwargs):
"""Set valid properties for the artist, dropping the others."""
artist.set(**{k: kwargs[k] for k in kwargs if hasattr(artist, "set_" + k)})
return artist | Set valid properties for the artist, dropping the others. | https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_pick_info.py#L765-L768 |
carlospalol/money | money/money.py | Money.to | def to(self, currency):
"""Return equivalent money object in another currency"""
if currency == self._currency:
return self
rate = xrates.quotation(self._currency, currency)
if rate is None:
raise ExchangeRateNotFound(xrates.backend_name,
self._currency, currency)
amount = self._amount * rate
return self.__class__(amount, currency) | python | def to(self, currency):
"""Return equivalent money object in another currency"""
if currency == self._currency:
return self
rate = xrates.quotation(self._currency, currency)
if rate is None:
raise ExchangeRateNotFound(xrates.backend_name,
self._currency, currency)
amount = self._amount * rate
return self.__class__(amount, currency) | Return equivalent money object in another currency | https://github.com/carlospalol/money/blob/1e51f651f93edd62c16eb3d7aa034fec03096046/money/money.py#L245-L254 |
carlospalol/money | money/money.py | Money.format | def format(self, locale=LC_NUMERIC, pattern=None, currency_digits=True,
format_type='standard'):
"""
Return a locale-aware, currency-formatted string.
This method emulates babel.numbers.format_currency().
A specific locale identifier (language[_territory]) can be passed,
otherwise the system's default locale will be used. A custom
formatting pattern of the form "¤#,##0.00;(¤#,##0.00)"
(positive[;negative]) can also be passed, otherwise it will be
determined from the locale and the CLDR (Unicode Common Locale Data
Repository) included with Babel.
>>> m = Money('1234.567', 'EUR')
>>> m.format() # assuming the system's locale is 'en_US'
€1,234.57
>>> m.format('de_DE') # German formatting
1.234,57 €
>>> m.format('de', '#,##0 ¤') # German formatting (short), no cents
1.235 €
>>> m.format(pattern='#,##0.00 ¤¤¤') # Default locale, full name
1,235.57 euro
Learn more about this formatting syntaxis at:
http://www.unicode.org/reports/tr35/tr35-numbers.html
"""
if BABEL_AVAILABLE:
if BABEL_VERSION < StrictVersion('2.2'):
raise Exception('Babel {} is unsupported. '
'Please upgrade to 2.2 or higher.'.format(BABEL_VERSION))
return babel.numbers.format_currency(
self._amount, self._currency, format=pattern, locale=locale,
currency_digits=currency_digits, format_type=format_type)
else:
raise NotImplementedError("formatting requires Babel "
"(https://pypi.python.org/pypi/Babel)") | python | def format(self, locale=LC_NUMERIC, pattern=None, currency_digits=True,
format_type='standard'):
"""
Return a locale-aware, currency-formatted string.
This method emulates babel.numbers.format_currency().
A specific locale identifier (language[_territory]) can be passed,
otherwise the system's default locale will be used. A custom
formatting pattern of the form "¤#,##0.00;(¤#,##0.00)"
(positive[;negative]) can also be passed, otherwise it will be
determined from the locale and the CLDR (Unicode Common Locale Data
Repository) included with Babel.
>>> m = Money('1234.567', 'EUR')
>>> m.format() # assuming the system's locale is 'en_US'
€1,234.57
>>> m.format('de_DE') # German formatting
1.234,57 €
>>> m.format('de', '#,##0 ¤') # German formatting (short), no cents
1.235 €
>>> m.format(pattern='#,##0.00 ¤¤¤') # Default locale, full name
1,235.57 euro
Learn more about this formatting syntaxis at:
http://www.unicode.org/reports/tr35/tr35-numbers.html
"""
if BABEL_AVAILABLE:
if BABEL_VERSION < StrictVersion('2.2'):
raise Exception('Babel {} is unsupported. '
'Please upgrade to 2.2 or higher.'.format(BABEL_VERSION))
return babel.numbers.format_currency(
self._amount, self._currency, format=pattern, locale=locale,
currency_digits=currency_digits, format_type=format_type)
else:
raise NotImplementedError("formatting requires Babel "
"(https://pypi.python.org/pypi/Babel)") | Return a locale-aware, currency-formatted string.
This method emulates babel.numbers.format_currency().
A specific locale identifier (language[_territory]) can be passed,
otherwise the system's default locale will be used. A custom
formatting pattern of the form "¤#,##0.00;(¤#,##0.00)"
(positive[;negative]) can also be passed, otherwise it will be
determined from the locale and the CLDR (Unicode Common Locale Data
Repository) included with Babel.
>>> m = Money('1234.567', 'EUR')
>>> m.format() # assuming the system's locale is 'en_US'
€1,234.57
>>> m.format('de_DE') # German formatting
1.234,57 €
>>> m.format('de', '#,##0 ¤') # German formatting (short), no cents
1.235 €
>>> m.format(pattern='#,##0.00 ¤¤¤') # Default locale, full name
1,235.57 euro
Learn more about this formatting syntaxis at:
http://www.unicode.org/reports/tr35/tr35-numbers.html | https://github.com/carlospalol/money/blob/1e51f651f93edd62c16eb3d7aa034fec03096046/money/money.py#L256-L292 |
carlospalol/money | money/money.py | Money.loads | def loads(cls, s):
"""Parse from a string representation (repr)"""
try:
currency, amount = s.strip().split(' ')
return cls(amount, currency)
except ValueError as err:
# RADAR: Python2
money.six.raise_from(ValueError("failed to parse string "
" '{}': {}".format(s, err)), None) | python | def loads(cls, s):
"""Parse from a string representation (repr)"""
try:
currency, amount = s.strip().split(' ')
return cls(amount, currency)
except ValueError as err:
# RADAR: Python2
money.six.raise_from(ValueError("failed to parse string "
" '{}': {}".format(s, err)), None) | Parse from a string representation (repr) | https://github.com/carlospalol/money/blob/1e51f651f93edd62c16eb3d7aa034fec03096046/money/money.py#L295-L303 |
anntzer/mplcursors | lib/mplcursors/_mplcursors.py | _get_rounded_intersection_area | def _get_rounded_intersection_area(bbox_1, bbox_2):
"""Compute the intersection area between two bboxes rounded to 8 digits."""
# The rounding allows sorting areas without floating point issues.
bbox = bbox_1.intersection(bbox_1, bbox_2)
return round(bbox.width * bbox.height, 8) if bbox else 0 | python | def _get_rounded_intersection_area(bbox_1, bbox_2):
"""Compute the intersection area between two bboxes rounded to 8 digits."""
# The rounding allows sorting areas without floating point issues.
bbox = bbox_1.intersection(bbox_1, bbox_2)
return round(bbox.width * bbox.height, 8) if bbox else 0 | Compute the intersection area between two bboxes rounded to 8 digits. | https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L66-L70 |
anntzer/mplcursors | lib/mplcursors/_mplcursors.py | _iter_axes_subartists | def _iter_axes_subartists(ax):
r"""Yield all child `Artist`\s (*not* `Container`\s) of *ax*."""
yield from ax.collections
yield from ax.images
yield from ax.lines
yield from ax.patches
yield from ax.texts | python | def _iter_axes_subartists(ax):
r"""Yield all child `Artist`\s (*not* `Container`\s) of *ax*."""
yield from ax.collections
yield from ax.images
yield from ax.lines
yield from ax.patches
yield from ax.texts | r"""Yield all child `Artist`\s (*not* `Container`\s) of *ax*. | https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L73-L79 |
anntzer/mplcursors | lib/mplcursors/_mplcursors.py | _is_alive | def _is_alive(artist):
"""Check whether *artist* is still present on its parent axes."""
return bool(artist
and artist.axes
and (artist.container in artist.axes.containers
if isinstance(artist, _pick_info.ContainerArtist) else
artist in _iter_axes_subartists(artist.axes))) | python | def _is_alive(artist):
"""Check whether *artist* is still present on its parent axes."""
return bool(artist
and artist.axes
and (artist.container in artist.axes.containers
if isinstance(artist, _pick_info.ContainerArtist) else
artist in _iter_axes_subartists(artist.axes))) | Check whether *artist* is still present on its parent axes. | https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L82-L88 |
anntzer/mplcursors | lib/mplcursors/_mplcursors.py | _reassigned_axes_event | def _reassigned_axes_event(event, ax):
"""Reassign *event* to *ax*."""
event = copy.copy(event)
event.xdata, event.ydata = (
ax.transData.inverted().transform_point((event.x, event.y)))
return event | python | def _reassigned_axes_event(event, ax):
"""Reassign *event* to *ax*."""
event = copy.copy(event)
event.xdata, event.ydata = (
ax.transData.inverted().transform_point((event.x, event.y)))
return event | Reassign *event* to *ax*. | https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L91-L96 |
anntzer/mplcursors | lib/mplcursors/_mplcursors.py | cursor | def cursor(pickables=None, **kwargs):
"""
Create a `Cursor` for a list of artists, containers, and axes.
Parameters
----------
pickables : Optional[List[Union[Artist, Container, Axes, Figure]]]
All artists and containers in the list or on any of the axes or
figures passed in the list are selectable by the constructed `Cursor`.
Defaults to all artists and containers on any of the figures that
:mod:`~matplotlib.pyplot` is tracking. Note that the latter will only
work when relying on pyplot, not when figures are directly instantiated
(e.g., when manually embedding Matplotlib in a GUI toolkit).
**kwargs
Keyword arguments are passed to the `Cursor` constructor.
"""
if pickables is None:
# Do not import pyplot ourselves to avoid forcing the backend.
plt = sys.modules.get("matplotlib.pyplot")
pickables = [
plt.figure(num) for num in plt.get_fignums()] if plt else []
elif (isinstance(pickables, Container)
or not isinstance(pickables, Iterable)):
pickables = [pickables]
def iter_unpack_figures(pickables):
for entry in pickables:
if isinstance(entry, Figure):
yield from entry.axes
else:
yield entry
def iter_unpack_axes(pickables):
for entry in pickables:
if isinstance(entry, Axes):
yield from _iter_axes_subartists(entry)
containers.extend(entry.containers)
elif isinstance(entry, Container):
containers.append(entry)
else:
yield entry
containers = []
artists = list(iter_unpack_axes(iter_unpack_figures(pickables)))
for container in containers:
contained = list(filter(None, container.get_children()))
for artist in contained:
with suppress(ValueError):
artists.remove(artist)
if contained:
artists.append(_pick_info.ContainerArtist(container))
return Cursor(artists, **kwargs) | python | def cursor(pickables=None, **kwargs):
"""
Create a `Cursor` for a list of artists, containers, and axes.
Parameters
----------
pickables : Optional[List[Union[Artist, Container, Axes, Figure]]]
All artists and containers in the list or on any of the axes or
figures passed in the list are selectable by the constructed `Cursor`.
Defaults to all artists and containers on any of the figures that
:mod:`~matplotlib.pyplot` is tracking. Note that the latter will only
work when relying on pyplot, not when figures are directly instantiated
(e.g., when manually embedding Matplotlib in a GUI toolkit).
**kwargs
Keyword arguments are passed to the `Cursor` constructor.
"""
if pickables is None:
# Do not import pyplot ourselves to avoid forcing the backend.
plt = sys.modules.get("matplotlib.pyplot")
pickables = [
plt.figure(num) for num in plt.get_fignums()] if plt else []
elif (isinstance(pickables, Container)
or not isinstance(pickables, Iterable)):
pickables = [pickables]
def iter_unpack_figures(pickables):
for entry in pickables:
if isinstance(entry, Figure):
yield from entry.axes
else:
yield entry
def iter_unpack_axes(pickables):
for entry in pickables:
if isinstance(entry, Axes):
yield from _iter_axes_subartists(entry)
containers.extend(entry.containers)
elif isinstance(entry, Container):
containers.append(entry)
else:
yield entry
containers = []
artists = list(iter_unpack_axes(iter_unpack_figures(pickables)))
for container in containers:
contained = list(filter(None, container.get_children()))
for artist in contained:
with suppress(ValueError):
artists.remove(artist)
if contained:
artists.append(_pick_info.ContainerArtist(container))
return Cursor(artists, **kwargs) | Create a `Cursor` for a list of artists, containers, and axes.
Parameters
----------
pickables : Optional[List[Union[Artist, Container, Axes, Figure]]]
All artists and containers in the list or on any of the axes or
figures passed in the list are selectable by the constructed `Cursor`.
Defaults to all artists and containers on any of the figures that
:mod:`~matplotlib.pyplot` is tracking. Note that the latter will only
work when relying on pyplot, not when figures are directly instantiated
(e.g., when manually embedding Matplotlib in a GUI toolkit).
**kwargs
Keyword arguments are passed to the `Cursor` constructor. | https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L546-L601 |
anntzer/mplcursors | lib/mplcursors/_mplcursors.py | Cursor.selections | def selections(self):
r"""The tuple of current `Selection`\s."""
for sel in self._selections:
if sel.annotation.axes is None:
raise RuntimeError("Annotation unexpectedly removed; "
"use 'cursor.remove_selection' instead")
return tuple(self._selections) | python | def selections(self):
r"""The tuple of current `Selection`\s."""
for sel in self._selections:
if sel.annotation.axes is None:
raise RuntimeError("Annotation unexpectedly removed; "
"use 'cursor.remove_selection' instead")
return tuple(self._selections) | r"""The tuple of current `Selection`\s. | https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L259-L265 |
anntzer/mplcursors | lib/mplcursors/_mplcursors.py | Cursor.add_selection | def add_selection(self, pi):
"""
Create an annotation for a `Selection` and register it.
Returns a new `Selection`, that has been registered by the `Cursor`,
with the added annotation set in the :attr:`annotation` field and, if
applicable, the highlighting artist in the :attr:`extras` field.
Emits the ``"add"`` event with the new `Selection` as argument. When
the event is emitted, the position of the annotation is temporarily
set to ``(nan, nan)``; if this position is not explicitly set by a
callback, then a suitable position will be automatically computed.
Likewise, if the text alignment is not explicitly set but the position
is, then a suitable alignment will be automatically computed.
"""
# pi: "pick_info", i.e. an incomplete selection.
# Pre-fetch the figure and axes, as callbacks may actually unset them.
figure = pi.artist.figure
axes = pi.artist.axes
if axes.get_renderer_cache() is None:
figure.canvas.draw() # Needed by draw_artist below anyways.
renderer = pi.artist.axes.get_renderer_cache()
ann = pi.artist.axes.annotate(
_pick_info.get_ann_text(*pi), xy=pi.target,
xytext=(np.nan, np.nan),
ha=_MarkedStr("center"), va=_MarkedStr("center"),
visible=self.visible,
**self.annotation_kwargs)
ann.draggable(use_blit=not self._multiple)
extras = []
if self._highlight:
hl = self.add_highlight(*pi)
if hl:
extras.append(hl)
sel = pi._replace(annotation=ann, extras=extras)
self._selections.append(sel)
for cb in self._callbacks["add"]:
cb(sel)
# Check that `ann.axes` is still set, as callbacks may have removed the
# annotation.
if ann.axes and ann.xyann == (np.nan, np.nan):
fig_bbox = figure.get_window_extent()
ax_bbox = axes.get_window_extent()
overlaps = []
for idx, annotation_position in enumerate(
self.annotation_positions):
ann.set(**annotation_position)
# Work around matplotlib/matplotlib#7614: position update is
# missing.
ann.update_positions(renderer)
bbox = ann.get_window_extent(renderer)
overlaps.append(
(_get_rounded_intersection_area(fig_bbox, bbox),
_get_rounded_intersection_area(ax_bbox, bbox),
# Avoid needlessly jumping around by breaking ties using
# the last used position as default.
idx == self._last_auto_position))
auto_position = max(range(len(overlaps)), key=overlaps.__getitem__)
ann.set(**self.annotation_positions[auto_position])
self._last_auto_position = auto_position
else:
if isinstance(ann.get_ha(), _MarkedStr):
ann.set_ha({-1: "right", 0: "center", 1: "left"}[
np.sign(np.nan_to_num(ann.xyann[0]))])
if isinstance(ann.get_va(), _MarkedStr):
ann.set_va({-1: "top", 0: "center", 1: "bottom"}[
np.sign(np.nan_to_num(ann.xyann[1]))])
if (extras
or len(self.selections) > 1 and not self._multiple
or not figure.canvas.supports_blit):
# Either:
# - there may be more things to draw, or
# - annotation removal will make a full redraw necessary, or
# - blitting is not (yet) supported.
figure.canvas.draw_idle()
elif ann.axes:
# Fast path, only needed if the annotation has not been immediately
# removed.
figure.draw_artist(ann)
# Explicit argument needed on MacOSX backend.
figure.canvas.blit(figure.bbox)
# Removal comes after addition so that the fast blitting path works.
if not self._multiple:
for sel in self.selections[:-1]:
self.remove_selection(sel)
return sel | python | def add_selection(self, pi):
"""
Create an annotation for a `Selection` and register it.
Returns a new `Selection`, that has been registered by the `Cursor`,
with the added annotation set in the :attr:`annotation` field and, if
applicable, the highlighting artist in the :attr:`extras` field.
Emits the ``"add"`` event with the new `Selection` as argument. When
the event is emitted, the position of the annotation is temporarily
set to ``(nan, nan)``; if this position is not explicitly set by a
callback, then a suitable position will be automatically computed.
Likewise, if the text alignment is not explicitly set but the position
is, then a suitable alignment will be automatically computed.
"""
# pi: "pick_info", i.e. an incomplete selection.
# Pre-fetch the figure and axes, as callbacks may actually unset them.
figure = pi.artist.figure
axes = pi.artist.axes
if axes.get_renderer_cache() is None:
figure.canvas.draw() # Needed by draw_artist below anyways.
renderer = pi.artist.axes.get_renderer_cache()
ann = pi.artist.axes.annotate(
_pick_info.get_ann_text(*pi), xy=pi.target,
xytext=(np.nan, np.nan),
ha=_MarkedStr("center"), va=_MarkedStr("center"),
visible=self.visible,
**self.annotation_kwargs)
ann.draggable(use_blit=not self._multiple)
extras = []
if self._highlight:
hl = self.add_highlight(*pi)
if hl:
extras.append(hl)
sel = pi._replace(annotation=ann, extras=extras)
self._selections.append(sel)
for cb in self._callbacks["add"]:
cb(sel)
# Check that `ann.axes` is still set, as callbacks may have removed the
# annotation.
if ann.axes and ann.xyann == (np.nan, np.nan):
fig_bbox = figure.get_window_extent()
ax_bbox = axes.get_window_extent()
overlaps = []
for idx, annotation_position in enumerate(
self.annotation_positions):
ann.set(**annotation_position)
# Work around matplotlib/matplotlib#7614: position update is
# missing.
ann.update_positions(renderer)
bbox = ann.get_window_extent(renderer)
overlaps.append(
(_get_rounded_intersection_area(fig_bbox, bbox),
_get_rounded_intersection_area(ax_bbox, bbox),
# Avoid needlessly jumping around by breaking ties using
# the last used position as default.
idx == self._last_auto_position))
auto_position = max(range(len(overlaps)), key=overlaps.__getitem__)
ann.set(**self.annotation_positions[auto_position])
self._last_auto_position = auto_position
else:
if isinstance(ann.get_ha(), _MarkedStr):
ann.set_ha({-1: "right", 0: "center", 1: "left"}[
np.sign(np.nan_to_num(ann.xyann[0]))])
if isinstance(ann.get_va(), _MarkedStr):
ann.set_va({-1: "top", 0: "center", 1: "bottom"}[
np.sign(np.nan_to_num(ann.xyann[1]))])
if (extras
or len(self.selections) > 1 and not self._multiple
or not figure.canvas.supports_blit):
# Either:
# - there may be more things to draw, or
# - annotation removal will make a full redraw necessary, or
# - blitting is not (yet) supported.
figure.canvas.draw_idle()
elif ann.axes:
# Fast path, only needed if the annotation has not been immediately
# removed.
figure.draw_artist(ann)
# Explicit argument needed on MacOSX backend.
figure.canvas.blit(figure.bbox)
# Removal comes after addition so that the fast blitting path works.
if not self._multiple:
for sel in self.selections[:-1]:
self.remove_selection(sel)
return sel | Create an annotation for a `Selection` and register it.
Returns a new `Selection`, that has been registered by the `Cursor`,
with the added annotation set in the :attr:`annotation` field and, if
applicable, the highlighting artist in the :attr:`extras` field.
Emits the ``"add"`` event with the new `Selection` as argument. When
the event is emitted, the position of the annotation is temporarily
set to ``(nan, nan)``; if this position is not explicitly set by a
callback, then a suitable position will be automatically computed.
Likewise, if the text alignment is not explicitly set but the position
is, then a suitable alignment will be automatically computed. | https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L284-L372 |
anntzer/mplcursors | lib/mplcursors/_mplcursors.py | Cursor.add_highlight | def add_highlight(self, artist, *args, **kwargs):
"""
Create, add, and return a highlighting artist.
This method is should be called with an "unpacked" `Selection`,
possibly with some fields set to None.
It is up to the caller to register the artist with the proper
`Selection` (by calling ``sel.extras.append`` on the result of this
method) in order to ensure cleanup upon deselection.
"""
hl = _pick_info.make_highlight(
artist, *args,
**ChainMap({"highlight_kwargs": self.highlight_kwargs}, kwargs))
if hl:
artist.axes.add_artist(hl)
return hl | python | def add_highlight(self, artist, *args, **kwargs):
"""
Create, add, and return a highlighting artist.
This method is should be called with an "unpacked" `Selection`,
possibly with some fields set to None.
It is up to the caller to register the artist with the proper
`Selection` (by calling ``sel.extras.append`` on the result of this
method) in order to ensure cleanup upon deselection.
"""
hl = _pick_info.make_highlight(
artist, *args,
**ChainMap({"highlight_kwargs": self.highlight_kwargs}, kwargs))
if hl:
artist.axes.add_artist(hl)
return hl | Create, add, and return a highlighting artist.
This method is should be called with an "unpacked" `Selection`,
possibly with some fields set to None.
It is up to the caller to register the artist with the proper
`Selection` (by calling ``sel.extras.append`` on the result of this
method) in order to ensure cleanup upon deselection. | https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L374-L390 |
anntzer/mplcursors | lib/mplcursors/_mplcursors.py | Cursor.connect | def connect(self, event, func=None):
"""
Connect a callback to a `Cursor` event; return the callback.
Two events can be connected to:
- callbacks connected to the ``"add"`` event are called when a
`Selection` is added, with that selection as only argument;
- callbacks connected to the ``"remove"`` event are called when a
`Selection` is removed, with that selection as only argument.
This method can also be used as a decorator::
@cursor.connect("add")
def on_add(sel):
...
Examples of callbacks::
# Change the annotation text and alignment:
lambda sel: sel.annotation.set(
text=sel.artist.get_label(), # or use e.g. sel.target.index
ha="center", va="bottom")
# Make label non-draggable:
lambda sel: sel.draggable(False)
"""
if event not in self._callbacks:
raise ValueError("{!r} is not a valid cursor event".format(event))
if func is None:
return partial(self.connect, event)
self._callbacks[event].append(func)
return func | python | def connect(self, event, func=None):
"""
Connect a callback to a `Cursor` event; return the callback.
Two events can be connected to:
- callbacks connected to the ``"add"`` event are called when a
`Selection` is added, with that selection as only argument;
- callbacks connected to the ``"remove"`` event are called when a
`Selection` is removed, with that selection as only argument.
This method can also be used as a decorator::
@cursor.connect("add")
def on_add(sel):
...
Examples of callbacks::
# Change the annotation text and alignment:
lambda sel: sel.annotation.set(
text=sel.artist.get_label(), # or use e.g. sel.target.index
ha="center", va="bottom")
# Make label non-draggable:
lambda sel: sel.draggable(False)
"""
if event not in self._callbacks:
raise ValueError("{!r} is not a valid cursor event".format(event))
if func is None:
return partial(self.connect, event)
self._callbacks[event].append(func)
return func | Connect a callback to a `Cursor` event; return the callback.
Two events can be connected to:
- callbacks connected to the ``"add"`` event are called when a
`Selection` is added, with that selection as only argument;
- callbacks connected to the ``"remove"`` event are called when a
`Selection` is removed, with that selection as only argument.
This method can also be used as a decorator::
@cursor.connect("add")
def on_add(sel):
...
Examples of callbacks::
# Change the annotation text and alignment:
lambda sel: sel.annotation.set(
text=sel.artist.get_label(), # or use e.g. sel.target.index
ha="center", va="bottom")
# Make label non-draggable:
lambda sel: sel.draggable(False) | https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L392-L424 |
anntzer/mplcursors | lib/mplcursors/_mplcursors.py | Cursor.disconnect | def disconnect(self, event, cb):
"""
Disconnect a previously connected callback.
If a callback is connected multiple times, only one connection is
removed.
"""
try:
self._callbacks[event].remove(cb)
except KeyError:
raise ValueError("{!r} is not a valid cursor event".format(event))
except ValueError:
raise ValueError("Callback {} is not registered".format(event)) | python | def disconnect(self, event, cb):
"""
Disconnect a previously connected callback.
If a callback is connected multiple times, only one connection is
removed.
"""
try:
self._callbacks[event].remove(cb)
except KeyError:
raise ValueError("{!r} is not a valid cursor event".format(event))
except ValueError:
raise ValueError("Callback {} is not registered".format(event)) | Disconnect a previously connected callback.
If a callback is connected multiple times, only one connection is
removed. | https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L426-L438 |
anntzer/mplcursors | lib/mplcursors/_mplcursors.py | Cursor.remove | def remove(self):
"""
Remove a cursor.
Remove all `Selection`\\s, disconnect all callbacks, and allow the
cursor to be garbage collected.
"""
for disconnectors in self._disconnectors:
disconnectors()
for sel in self.selections:
self.remove_selection(sel)
for s in type(self)._keep_alive.values():
with suppress(KeyError):
s.remove(self) | python | def remove(self):
"""
Remove a cursor.
Remove all `Selection`\\s, disconnect all callbacks, and allow the
cursor to be garbage collected.
"""
for disconnectors in self._disconnectors:
disconnectors()
for sel in self.selections:
self.remove_selection(sel)
for s in type(self)._keep_alive.values():
with suppress(KeyError):
s.remove(self) | Remove a cursor.
Remove all `Selection`\\s, disconnect all callbacks, and allow the
cursor to be garbage collected. | https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L440-L453 |
anntzer/mplcursors | lib/mplcursors/_mplcursors.py | Cursor.remove_selection | def remove_selection(self, sel):
"""Remove a `Selection`."""
self._selections.remove(sel)
# <artist>.figure will be unset so we save them first.
figures = {artist.figure for artist in [sel.annotation] + sel.extras}
# ValueError is raised if the artist has already been removed.
with suppress(ValueError):
sel.annotation.remove()
for artist in sel.extras:
with suppress(ValueError):
artist.remove()
for cb in self._callbacks["remove"]:
cb(sel)
for figure in figures:
figure.canvas.draw_idle() | python | def remove_selection(self, sel):
"""Remove a `Selection`."""
self._selections.remove(sel)
# <artist>.figure will be unset so we save them first.
figures = {artist.figure for artist in [sel.annotation] + sel.extras}
# ValueError is raised if the artist has already been removed.
with suppress(ValueError):
sel.annotation.remove()
for artist in sel.extras:
with suppress(ValueError):
artist.remove()
for cb in self._callbacks["remove"]:
cb(sel)
for figure in figures:
figure.canvas.draw_idle() | Remove a `Selection`. | https://github.com/anntzer/mplcursors/blob/a4bce17a978162b5a1837cc419114c910e7992f9/lib/mplcursors/_mplcursors.py#L529-L543 |
mozilla/jupyter-notebook-gist | src/jupyter_notebook_gist/handlers.py | extract_code_from_args | def extract_code_from_args(args):
"""
Extracts the access code from the arguments dictionary (given back
from github)
"""
if args is None:
raise_error("Couldn't extract GitHub authentication code "
"from response")
# TODO: Is there a case where the length of the error will be < 0?
error = args.get("error_description", None)
if error is not None:
if len(error) >= 0:
raise_github_error(error)
else:
raise_error("Something went wrong")
access_code = args.get("code", None)
# access_code is supposed to be a list with 1 thing in it
if not isinstance(access_code, list) or access_code[0] is None or \
len(access_code) != 1 or len(access_code[0]) <= 0:
raise_error("Couldn't extract GitHub authentication code from "
"response")
# If we get here, everything was good - no errors
access_code = access_code[0].decode('ascii')
return access_code | python | def extract_code_from_args(args):
"""
Extracts the access code from the arguments dictionary (given back
from github)
"""
if args is None:
raise_error("Couldn't extract GitHub authentication code "
"from response")
# TODO: Is there a case where the length of the error will be < 0?
error = args.get("error_description", None)
if error is not None:
if len(error) >= 0:
raise_github_error(error)
else:
raise_error("Something went wrong")
access_code = args.get("code", None)
# access_code is supposed to be a list with 1 thing in it
if not isinstance(access_code, list) or access_code[0] is None or \
len(access_code) != 1 or len(access_code[0]) <= 0:
raise_error("Couldn't extract GitHub authentication code from "
"response")
# If we get here, everything was good - no errors
access_code = access_code[0].decode('ascii')
return access_code | Extracts the access code from the arguments dictionary (given back
from github) | https://github.com/mozilla/jupyter-notebook-gist/blob/dae052f0998fa80dff515345cd516b586eff8e43/src/jupyter_notebook_gist/handlers.py#L163-L191 |
mozilla/jupyter-notebook-gist | src/jupyter_notebook_gist/handlers.py | BaseHandler.request_access_token | def request_access_token(self, access_code):
"Request access token from GitHub"
token_response = request_session.post(
"https://github.com/login/oauth/access_token",
data={
"client_id": self.oauth_client_id,
"client_secret": self.oauth_client_secret,
"code": access_code
},
headers={"Accept": "application/json"},
)
return helper_request_access_token(token_response.json()) | python | def request_access_token(self, access_code):
"Request access token from GitHub"
token_response = request_session.post(
"https://github.com/login/oauth/access_token",
data={
"client_id": self.oauth_client_id,
"client_secret": self.oauth_client_secret,
"code": access_code
},
headers={"Accept": "application/json"},
)
return helper_request_access_token(token_response.json()) | Request access token from GitHub | https://github.com/mozilla/jupyter-notebook-gist/blob/dae052f0998fa80dff515345cd516b586eff8e43/src/jupyter_notebook_gist/handlers.py#L39-L50 |
rdobson/python-hwinfo | hwinfo/util/__init__.py | combine_dicts | def combine_dicts(recs):
"""Combine a list of recs, appending values to matching keys"""
if not recs:
return None
if len(recs) == 1:
return recs.pop()
new_rec = {}
for rec in recs:
for k, v in rec.iteritems():
if k in new_rec:
new_rec[k] = "%s, %s" % (new_rec[k], v)
else:
new_rec[k] = v
return new_rec | python | def combine_dicts(recs):
"""Combine a list of recs, appending values to matching keys"""
if not recs:
return None
if len(recs) == 1:
return recs.pop()
new_rec = {}
for rec in recs:
for k, v in rec.iteritems():
if k in new_rec:
new_rec[k] = "%s, %s" % (new_rec[k], v)
else:
new_rec[k] = v
return new_rec | Combine a list of recs, appending values to matching keys | https://github.com/rdobson/python-hwinfo/blob/ba93a112dac6863396a053636ea87df027daa5de/hwinfo/util/__init__.py#L5-L20 |
rdobson/python-hwinfo | hwinfo/tools/inspector.py | combine_recs | def combine_recs(rec_list, key):
"""Use a common key to combine a list of recs"""
final_recs = {}
for rec in rec_list:
rec_key = rec[key]
if rec_key in final_recs:
for k, v in rec.iteritems():
if k in final_recs[rec_key] and final_recs[rec_key][k] != v:
raise Exception("Mis-match for key '%s'" % k)
final_recs[rec_key][k] = v
else:
final_recs[rec_key] = rec
return final_recs.values() | python | def combine_recs(rec_list, key):
"""Use a common key to combine a list of recs"""
final_recs = {}
for rec in rec_list:
rec_key = rec[key]
if rec_key in final_recs:
for k, v in rec.iteritems():
if k in final_recs[rec_key] and final_recs[rec_key][k] != v:
raise Exception("Mis-match for key '%s'" % k)
final_recs[rec_key][k] = v
else:
final_recs[rec_key] = rec
return final_recs.values() | Use a common key to combine a list of recs | https://github.com/rdobson/python-hwinfo/blob/ba93a112dac6863396a053636ea87df027daa5de/hwinfo/tools/inspector.py#L195-L207 |
rdobson/python-hwinfo | hwinfo/tools/inspector.py | main | def main():
"""Entry Point"""
parser = ArgumentParser(prog="hwinfo")
filter_choices = ['bios', 'nic', 'storage', 'gpu', 'cpu']
parser.add_argument("-f", "--filter", choices=filter_choices, help="Query a specific class.")
parser.add_argument("-m", "--machine", default='localhost', help="Remote host address.")
parser.add_argument("-u", "--username", help="Username for remote host.")
parser.add_argument("-p", "--password", help="Password for remote host.")
parser.add_argument("-l", "--logs", help="Path to the directory with the logfiles.")
parser.add_argument("-e", "--export", action="store_true", help="Export result in JSON format.")
args = parser.parse_args()
validate_args(args)
if args.logs:
if ".tar" in args.logs:
host = HostFromTarball(args.logs)
else:
host = HostFromLogs(args.logs)
else:
host = Host(args.machine, args.username, args.password)
options = []
if args.filter:
filter_args = args.filter.split(',')
for arg in filter_args:
options.append(arg.strip())
else:
options = filter_choices
if args.export:
print export_system_info(host, options)
else:
print system_info(host, options) | python | def main():
"""Entry Point"""
parser = ArgumentParser(prog="hwinfo")
filter_choices = ['bios', 'nic', 'storage', 'gpu', 'cpu']
parser.add_argument("-f", "--filter", choices=filter_choices, help="Query a specific class.")
parser.add_argument("-m", "--machine", default='localhost', help="Remote host address.")
parser.add_argument("-u", "--username", help="Username for remote host.")
parser.add_argument("-p", "--password", help="Password for remote host.")
parser.add_argument("-l", "--logs", help="Path to the directory with the logfiles.")
parser.add_argument("-e", "--export", action="store_true", help="Export result in JSON format.")
args = parser.parse_args()
validate_args(args)
if args.logs:
if ".tar" in args.logs:
host = HostFromTarball(args.logs)
else:
host = HostFromLogs(args.logs)
else:
host = Host(args.machine, args.username, args.password)
options = []
if args.filter:
filter_args = args.filter.split(',')
for arg in filter_args:
options.append(arg.strip())
else:
options = filter_choices
if args.export:
print export_system_info(host, options)
else:
print system_info(host, options) | Entry Point | https://github.com/rdobson/python-hwinfo/blob/ba93a112dac6863396a053636ea87df027daa5de/hwinfo/tools/inspector.py#L398-L434 |
rdobson/python-hwinfo | hwinfo/tools/inspector.py | HostFromTarball._load_from_file | def _load_from_file(self, filename):
"""Find filename in tar, and load it"""
if filename in self.fdata:
return self.fdata[filename]
else:
filepath = find_in_tarball(self.tarloc, filename)
return read_from_tarball(self.tarloc, filepath) | python | def _load_from_file(self, filename):
"""Find filename in tar, and load it"""
if filename in self.fdata:
return self.fdata[filename]
else:
filepath = find_in_tarball(self.tarloc, filename)
return read_from_tarball(self.tarloc, filepath) | Find filename in tar, and load it | https://github.com/rdobson/python-hwinfo/blob/ba93a112dac6863396a053636ea87df027daa5de/hwinfo/tools/inspector.py#L272-L278 |
LuminosoInsight/langcodes | langcodes/tag_parser.py | parse_tag | def parse_tag(tag):
"""
Parse the syntax of a language tag, without looking up anything in the
registry, yet. Returns a list of (type, value) tuples indicating what
information will need to be looked up.
"""
tag = normalize_characters(tag)
if tag in EXCEPTIONS:
return [('grandfathered', tag)]
else:
# The first subtag is always either the language code, or 'x' to mark
# the entire tag as private-use. Other subtags are distinguished
# by their length and format, but the language code is distinguished
# entirely by the fact that it is required to come first.
subtags = tag.split('-')
if subtags[0] == 'x':
if len(subtags) == 1:
raise LanguageTagError("'x' is not a language tag on its own")
else:
# the entire language tag is private use, but we know that,
# whatever it is, it fills the "language" slot
return [('language', tag)]
elif len(subtags[0]) >= 2:
return [('language', subtags[0])] + parse_subtags(subtags[1:])
else:
subtag_error(subtags[0], 'a language code') | python | def parse_tag(tag):
"""
Parse the syntax of a language tag, without looking up anything in the
registry, yet. Returns a list of (type, value) tuples indicating what
information will need to be looked up.
"""
tag = normalize_characters(tag)
if tag in EXCEPTIONS:
return [('grandfathered', tag)]
else:
# The first subtag is always either the language code, or 'x' to mark
# the entire tag as private-use. Other subtags are distinguished
# by their length and format, but the language code is distinguished
# entirely by the fact that it is required to come first.
subtags = tag.split('-')
if subtags[0] == 'x':
if len(subtags) == 1:
raise LanguageTagError("'x' is not a language tag on its own")
else:
# the entire language tag is private use, but we know that,
# whatever it is, it fills the "language" slot
return [('language', tag)]
elif len(subtags[0]) >= 2:
return [('language', subtags[0])] + parse_subtags(subtags[1:])
else:
subtag_error(subtags[0], 'a language code') | Parse the syntax of a language tag, without looking up anything in the
registry, yet. Returns a list of (type, value) tuples indicating what
information will need to be looked up. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/tag_parser.py#L105-L130 |
LuminosoInsight/langcodes | langcodes/tag_parser.py | parse_subtags | def parse_subtags(subtags, expect=EXTLANG):
"""
Parse everything that comes after the language tag: scripts, regions,
variants, and assorted extensions.
"""
# We parse the parts of a language code recursively: each step of
# language code parsing handles one component of the code, recurses
# to handle the rest of the code, and adds what it found onto the
# list of things that were in the rest of the code.
#
# This could just as well have been iterative, but the loops would have
# been convoluted.
#
# So here's the base case.
if not subtags:
return []
# There's a subtag that comes next. We need to find out what it is.
#
# The primary thing that distinguishes different types of subtags is
# length, but the subtags also come in a specified order. The 'expect'
# parameter keeps track of where we are in that order. expect=REGION,
# for example, means we're expecting a region code, or anything later
# (because everything but the language is optional).
subtag = subtags[0]
tag_length = len(subtag)
# In the usual case, our goal is to recognize what kind of tag this is,
# and set it in 'tagtype' -- as an integer, so we can compare where it
# should go in order. You can see the enumerated list of tagtypes above,
# where the SUBTAG_TYPES global is defined.
tagtype = None
if tag_length == 0 or tag_length > 8:
# Unless you're inside a private use tag or something -- in which case,
# you're not in this function at the moment -- every component needs to
# be between 1 and 8 characters.
subtag_error(subtag, '1-8 characters')
elif tag_length == 1:
# A one-character subtag introduces an extension, which can itself have
# sub-subtags, so we dispatch to a different function at this point.
#
# We don't need to check anything about the order, because extensions
# necessarily come last.
return parse_extension(subtags)
elif tag_length == 2:
if subtag.isalpha():
# Two-letter alphabetic subtags are regions. These are the only
# two-character subtags after the language.
tagtype = REGION
elif tag_length == 3:
if subtag.isalpha():
# Three-letter alphabetic subtags are 'extended languages'.
# It's allowed for there to be up to three of them in a row, so we
# need another function to enforce that. Before we dispatch to that
# function, though, we need to check whether we're in the right
# place in order.
if expect <= EXTLANG:
return parse_extlang(subtags)
else:
order_error(subtag, EXTLANG, expect)
elif subtag.isdigit():
# Three-digit subtags are broad regions, such as Latin America
# (419).
tagtype = REGION
elif tag_length == 4:
if subtag.isalpha():
# Four-letter alphabetic subtags are scripts.
tagtype = SCRIPT
elif subtag[0].isdigit():
# Four-character subtags that start with a digit are variants.
tagtype = VARIANT
else:
# Tags of length 5-8 are variants.
tagtype = VARIANT
# That's the end of the big elif block for figuring out what kind of
# subtag we have based on its length. Now we should do something with that
# kind of subtag.
if tagtype is None:
# We haven't recognized a type of tag. This subtag just doesn't fit the
# standard.
subtag_error(subtag)
elif tagtype < expect:
# We got a tag type that was supposed to appear earlier in the order.
order_error(subtag, tagtype, expect)
else:
# We've recognized a subtag of a particular type. If it's a region or
# script, we expect the next subtag to be a strictly later type, because
# there can be at most one region and one script. Otherwise, we expect
# the next subtag to be the type we got or later.
if tagtype in (SCRIPT, REGION):
expect = tagtype + 1
else:
expect = tagtype
# Get the name of this subtag type instead of its integer value.
typename = SUBTAG_TYPES[tagtype]
# Some subtags are conventionally written with capitalization. Apply
# those conventions.
if tagtype == SCRIPT:
subtag = subtag.title()
elif tagtype == REGION:
subtag = subtag.upper()
# Recurse on the remaining subtags.
return [(typename, subtag)] + parse_subtags(subtags[1:], expect) | python | def parse_subtags(subtags, expect=EXTLANG):
"""
Parse everything that comes after the language tag: scripts, regions,
variants, and assorted extensions.
"""
# We parse the parts of a language code recursively: each step of
# language code parsing handles one component of the code, recurses
# to handle the rest of the code, and adds what it found onto the
# list of things that were in the rest of the code.
#
# This could just as well have been iterative, but the loops would have
# been convoluted.
#
# So here's the base case.
if not subtags:
return []
# There's a subtag that comes next. We need to find out what it is.
#
# The primary thing that distinguishes different types of subtags is
# length, but the subtags also come in a specified order. The 'expect'
# parameter keeps track of where we are in that order. expect=REGION,
# for example, means we're expecting a region code, or anything later
# (because everything but the language is optional).
subtag = subtags[0]
tag_length = len(subtag)
# In the usual case, our goal is to recognize what kind of tag this is,
# and set it in 'tagtype' -- as an integer, so we can compare where it
# should go in order. You can see the enumerated list of tagtypes above,
# where the SUBTAG_TYPES global is defined.
tagtype = None
if tag_length == 0 or tag_length > 8:
# Unless you're inside a private use tag or something -- in which case,
# you're not in this function at the moment -- every component needs to
# be between 1 and 8 characters.
subtag_error(subtag, '1-8 characters')
elif tag_length == 1:
# A one-character subtag introduces an extension, which can itself have
# sub-subtags, so we dispatch to a different function at this point.
#
# We don't need to check anything about the order, because extensions
# necessarily come last.
return parse_extension(subtags)
elif tag_length == 2:
if subtag.isalpha():
# Two-letter alphabetic subtags are regions. These are the only
# two-character subtags after the language.
tagtype = REGION
elif tag_length == 3:
if subtag.isalpha():
# Three-letter alphabetic subtags are 'extended languages'.
# It's allowed for there to be up to three of them in a row, so we
# need another function to enforce that. Before we dispatch to that
# function, though, we need to check whether we're in the right
# place in order.
if expect <= EXTLANG:
return parse_extlang(subtags)
else:
order_error(subtag, EXTLANG, expect)
elif subtag.isdigit():
# Three-digit subtags are broad regions, such as Latin America
# (419).
tagtype = REGION
elif tag_length == 4:
if subtag.isalpha():
# Four-letter alphabetic subtags are scripts.
tagtype = SCRIPT
elif subtag[0].isdigit():
# Four-character subtags that start with a digit are variants.
tagtype = VARIANT
else:
# Tags of length 5-8 are variants.
tagtype = VARIANT
# That's the end of the big elif block for figuring out what kind of
# subtag we have based on its length. Now we should do something with that
# kind of subtag.
if tagtype is None:
# We haven't recognized a type of tag. This subtag just doesn't fit the
# standard.
subtag_error(subtag)
elif tagtype < expect:
# We got a tag type that was supposed to appear earlier in the order.
order_error(subtag, tagtype, expect)
else:
# We've recognized a subtag of a particular type. If it's a region or
# script, we expect the next subtag to be a strictly later type, because
# there can be at most one region and one script. Otherwise, we expect
# the next subtag to be the type we got or later.
if tagtype in (SCRIPT, REGION):
expect = tagtype + 1
else:
expect = tagtype
# Get the name of this subtag type instead of its integer value.
typename = SUBTAG_TYPES[tagtype]
# Some subtags are conventionally written with capitalization. Apply
# those conventions.
if tagtype == SCRIPT:
subtag = subtag.title()
elif tagtype == REGION:
subtag = subtag.upper()
# Recurse on the remaining subtags.
return [(typename, subtag)] + parse_subtags(subtags[1:], expect) | Parse everything that comes after the language tag: scripts, regions,
variants, and assorted extensions. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/tag_parser.py#L133-L249 |
LuminosoInsight/langcodes | langcodes/tag_parser.py | parse_extlang | def parse_extlang(subtags):
"""
Parse an 'extended language' tag, which consists of 1 to 3 three-letter
language codes.
Extended languages are used for distinguishing dialects/sublanguages
(depending on your view) of macrolanguages such as Arabic, Bahasa Malay,
and Chinese.
It's supposed to also be acceptable to just use the sublanguage as the
primary language code, and your code should know what's a macrolanguage of
what. For example, 'zh-yue' and 'yue' are the same language (Cantonese),
and differ only in whether they explicitly spell out that Cantonese is a
kind of Chinese.
"""
index = 0
parsed = []
while index < len(subtags) and len(subtags[index]) == 3 and index < 3:
parsed.append(('extlang', subtags[index]))
index += 1
return parsed + parse_subtags(subtags[index:], SCRIPT) | python | def parse_extlang(subtags):
"""
Parse an 'extended language' tag, which consists of 1 to 3 three-letter
language codes.
Extended languages are used for distinguishing dialects/sublanguages
(depending on your view) of macrolanguages such as Arabic, Bahasa Malay,
and Chinese.
It's supposed to also be acceptable to just use the sublanguage as the
primary language code, and your code should know what's a macrolanguage of
what. For example, 'zh-yue' and 'yue' are the same language (Cantonese),
and differ only in whether they explicitly spell out that Cantonese is a
kind of Chinese.
"""
index = 0
parsed = []
while index < len(subtags) and len(subtags[index]) == 3 and index < 3:
parsed.append(('extlang', subtags[index]))
index += 1
return parsed + parse_subtags(subtags[index:], SCRIPT) | Parse an 'extended language' tag, which consists of 1 to 3 three-letter
language codes.
Extended languages are used for distinguishing dialects/sublanguages
(depending on your view) of macrolanguages such as Arabic, Bahasa Malay,
and Chinese.
It's supposed to also be acceptable to just use the sublanguage as the
primary language code, and your code should know what's a macrolanguage of
what. For example, 'zh-yue' and 'yue' are the same language (Cantonese),
and differ only in whether they explicitly spell out that Cantonese is a
kind of Chinese. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/tag_parser.py#L252-L272 |
LuminosoInsight/langcodes | langcodes/tag_parser.py | parse_extension | def parse_extension(subtags):
"""
An extension tag consists of a 'singleton' -- a one-character subtag --
followed by other subtags. Extension tags are in the BCP 47 syntax, but
their meaning is outside the scope of the standard.
For example, there's the u- extension, which is used for setting Unicode
properties in some context I'm not aware of.
If the singleton is 'x', it's a private use extension, and consumes the
rest of the tag. Otherwise, it stops at the next singleton.
"""
subtag = subtags[0]
if len(subtags) == 1:
raise LanguageTagError(
"The subtag %r must be followed by something" % subtag
)
if subtag == 'x':
# Private use. Everything after this is arbitrary codes that we
# can't look up.
return [('private', '-'.join(subtags))]
else:
# Look for the next singleton, if there is one.
boundary = 1
while boundary < len(subtags) and len(subtags[boundary]) != 1:
boundary += 1
# We've parsed a complete extension subtag. Return to the main
# parse_subtags function, but expect to find nothing but more
# extensions at this point.
return ([('extension', '-'.join(subtags[:boundary]))]
+ parse_subtags(subtags[boundary:], EXTENSION)) | python | def parse_extension(subtags):
"""
An extension tag consists of a 'singleton' -- a one-character subtag --
followed by other subtags. Extension tags are in the BCP 47 syntax, but
their meaning is outside the scope of the standard.
For example, there's the u- extension, which is used for setting Unicode
properties in some context I'm not aware of.
If the singleton is 'x', it's a private use extension, and consumes the
rest of the tag. Otherwise, it stops at the next singleton.
"""
subtag = subtags[0]
if len(subtags) == 1:
raise LanguageTagError(
"The subtag %r must be followed by something" % subtag
)
if subtag == 'x':
# Private use. Everything after this is arbitrary codes that we
# can't look up.
return [('private', '-'.join(subtags))]
else:
# Look for the next singleton, if there is one.
boundary = 1
while boundary < len(subtags) and len(subtags[boundary]) != 1:
boundary += 1
# We've parsed a complete extension subtag. Return to the main
# parse_subtags function, but expect to find nothing but more
# extensions at this point.
return ([('extension', '-'.join(subtags[:boundary]))]
+ parse_subtags(subtags[boundary:], EXTENSION)) | An extension tag consists of a 'singleton' -- a one-character subtag --
followed by other subtags. Extension tags are in the BCP 47 syntax, but
their meaning is outside the scope of the standard.
For example, there's the u- extension, which is used for setting Unicode
properties in some context I'm not aware of.
If the singleton is 'x', it's a private use extension, and consumes the
rest of the tag. Otherwise, it stops at the next singleton. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/tag_parser.py#L275-L308 |
LuminosoInsight/langcodes | langcodes/tag_parser.py | order_error | def order_error(subtag, got, expected):
"""
Output an error indicating that tags were out of order.
"""
options = SUBTAG_TYPES[expected:]
if len(options) == 1:
expect_str = options[0]
elif len(options) == 2:
expect_str = '%s or %s' % (options[0], options[1])
else:
expect_str = '%s, or %s' % (', '.join(options[:-1]), options[-1])
got_str = SUBTAG_TYPES[got]
raise LanguageTagError("This %s subtag, %r, is out of place. "
"Expected %s." % (got_str, subtag, expect_str)) | python | def order_error(subtag, got, expected):
"""
Output an error indicating that tags were out of order.
"""
options = SUBTAG_TYPES[expected:]
if len(options) == 1:
expect_str = options[0]
elif len(options) == 2:
expect_str = '%s or %s' % (options[0], options[1])
else:
expect_str = '%s, or %s' % (', '.join(options[:-1]), options[-1])
got_str = SUBTAG_TYPES[got]
raise LanguageTagError("This %s subtag, %r, is out of place. "
"Expected %s." % (got_str, subtag, expect_str)) | Output an error indicating that tags were out of order. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/tag_parser.py#L315-L328 |
LuminosoInsight/langcodes | langcodes/__init__.py | standardize_tag | def standardize_tag(tag: {str, Language}, macro: bool=False) -> str:
"""
Standardize a language tag:
- Replace deprecated values with their updated versions (if those exist)
- Remove script tags that are redundant with the language
- If *macro* is True, use a macrolanguage to represent the most common
standardized language within that macrolanguage. For example, 'cmn'
(Mandarin) becomes 'zh' (Chinese), and 'arb' (Modern Standard Arabic)
becomes 'ar' (Arabic).
- Format the result according to the conventions of BCP 47
Macrolanguage replacement is not required by BCP 47, but it is required
by the Unicode CLDR.
>>> standardize_tag('en_US')
'en-US'
>>> standardize_tag('en-Latn')
'en'
>>> standardize_tag('en-uk')
'en-GB'
>>> standardize_tag('eng')
'en'
>>> standardize_tag('arb-Arab', macro=True)
'ar'
>>> standardize_tag('sh-QU')
'sr-Latn-EU'
>>> standardize_tag('sgn-US')
'ase'
>>> standardize_tag('zh-cmn-hans-cn')
'cmn-Hans-CN'
>>> standardize_tag('zh-cmn-hans-cn', macro=True)
'zh-Hans-CN'
>>> standardize_tag('zsm', macro=True)
'ms'
>>> standardize_tag('ja-latn-hepburn')
'ja-Latn-hepburn'
>>> standardize_tag('spa-latn-mx')
'es-MX'
If the tag can't be parsed according to BCP 47, this will raise a
LanguageTagError (a subclass of ValueError):
>>> standardize_tag('spa-mx-latn')
Traceback (most recent call last):
...
langcodes.tag_parser.LanguageTagError: This script subtag, 'latn', is out of place. Expected variant, extension, or end of string.
"""
langdata = Language.get(tag, normalize=True)
if macro:
langdata = langdata.prefer_macrolanguage()
return langdata.simplify_script().to_tag() | python | def standardize_tag(tag: {str, Language}, macro: bool=False) -> str:
"""
Standardize a language tag:
- Replace deprecated values with their updated versions (if those exist)
- Remove script tags that are redundant with the language
- If *macro* is True, use a macrolanguage to represent the most common
standardized language within that macrolanguage. For example, 'cmn'
(Mandarin) becomes 'zh' (Chinese), and 'arb' (Modern Standard Arabic)
becomes 'ar' (Arabic).
- Format the result according to the conventions of BCP 47
Macrolanguage replacement is not required by BCP 47, but it is required
by the Unicode CLDR.
>>> standardize_tag('en_US')
'en-US'
>>> standardize_tag('en-Latn')
'en'
>>> standardize_tag('en-uk')
'en-GB'
>>> standardize_tag('eng')
'en'
>>> standardize_tag('arb-Arab', macro=True)
'ar'
>>> standardize_tag('sh-QU')
'sr-Latn-EU'
>>> standardize_tag('sgn-US')
'ase'
>>> standardize_tag('zh-cmn-hans-cn')
'cmn-Hans-CN'
>>> standardize_tag('zh-cmn-hans-cn', macro=True)
'zh-Hans-CN'
>>> standardize_tag('zsm', macro=True)
'ms'
>>> standardize_tag('ja-latn-hepburn')
'ja-Latn-hepburn'
>>> standardize_tag('spa-latn-mx')
'es-MX'
If the tag can't be parsed according to BCP 47, this will raise a
LanguageTagError (a subclass of ValueError):
>>> standardize_tag('spa-mx-latn')
Traceback (most recent call last):
...
langcodes.tag_parser.LanguageTagError: This script subtag, 'latn', is out of place. Expected variant, extension, or end of string.
"""
langdata = Language.get(tag, normalize=True)
if macro:
langdata = langdata.prefer_macrolanguage()
return langdata.simplify_script().to_tag() | Standardize a language tag:
- Replace deprecated values with their updated versions (if those exist)
- Remove script tags that are redundant with the language
- If *macro* is True, use a macrolanguage to represent the most common
standardized language within that macrolanguage. For example, 'cmn'
(Mandarin) becomes 'zh' (Chinese), and 'arb' (Modern Standard Arabic)
becomes 'ar' (Arabic).
- Format the result according to the conventions of BCP 47
Macrolanguage replacement is not required by BCP 47, but it is required
by the Unicode CLDR.
>>> standardize_tag('en_US')
'en-US'
>>> standardize_tag('en-Latn')
'en'
>>> standardize_tag('en-uk')
'en-GB'
>>> standardize_tag('eng')
'en'
>>> standardize_tag('arb-Arab', macro=True)
'ar'
>>> standardize_tag('sh-QU')
'sr-Latn-EU'
>>> standardize_tag('sgn-US')
'ase'
>>> standardize_tag('zh-cmn-hans-cn')
'cmn-Hans-CN'
>>> standardize_tag('zh-cmn-hans-cn', macro=True)
'zh-Hans-CN'
>>> standardize_tag('zsm', macro=True)
'ms'
>>> standardize_tag('ja-latn-hepburn')
'ja-Latn-hepburn'
>>> standardize_tag('spa-latn-mx')
'es-MX'
If the tag can't be parsed according to BCP 47, this will raise a
LanguageTagError (a subclass of ValueError):
>>> standardize_tag('spa-mx-latn')
Traceback (most recent call last):
...
langcodes.tag_parser.LanguageTagError: This script subtag, 'latn', is out of place. Expected variant, extension, or end of string. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L955-L1018 |
LuminosoInsight/langcodes | langcodes/__init__.py | tag_match_score | def tag_match_score(desired: {str, Language}, supported: {str, Language}) -> int:
"""
Return a number from 0 to 100 indicating the strength of match between the
language the user desires, D, and a supported language, S. Higher numbers
are better. A reasonable cutoff for not messing with your users is to
only accept scores of 75 or more.
A score of 100 means the languages are the same, possibly after normalizing
and filling in likely values.
>>> tag_match_score('en', 'en')
100
>>> tag_match_score('en', 'en-US')
100
>>> tag_match_score('zh-Hant', 'zh-TW')
100
>>> tag_match_score('ru-Cyrl', 'ru')
100
>>> # Serbo-Croatian is a politically contentious idea, but in practice
>>> # it's considered equivalent to Serbian in Latin characters.
>>> tag_match_score('sh', 'sr-Latn')
100
A score of 92 to 97 indicates a regional difference.
>>> tag_match_score('zh-HK', 'zh-MO') # Chinese is similar in Hong Kong and Macao
97
>>> tag_match_score('en-AU', 'en-GB') # Australian English is similar to British English
96
>>> tag_match_score('en-IN', 'en-GB') # Indian English is also similar to British English
96
>>> tag_match_score('es-PR', 'es-419') # Peruvian Spanish is Latin American Spanish
96
>>> tag_match_score('en-US', 'en-GB') # American and British English are somewhat different
94
>>> tag_match_score('es-MX', 'es-ES') # Mexican Spanish is different from Spanish Spanish
92
>>> # Serbian has two scripts, and people might prefer one but understand both
>>> tag_match_score('sr-Latn', 'sr-Cyrl')
95
>>> # European Portuguese is different from the most common form (Brazilian Portuguese)
>>> tag_match_score('pt', 'pt-PT')
92
A score of 86 to 90 indicates that people who use the desired language
are demographically likely to understand the supported language, even if
the languages themselves are unrelated. There are many languages that have
a one-way connection of this kind to English or French.
>>> tag_match_score('ta', 'en') # Tamil to English
86
>>> tag_match_score('mg', 'fr') # Malagasy to French
86
Sometimes it's more straightforward than that: people who use the desired
language are demographically likely to understand the supported language
because it's demographically relevant and highly related.
>>> tag_match_score('af', 'nl') # Afrikaans to Dutch
86
>>> tag_match_score('ms', 'id') # Malay to Indonesian
86
>>> tag_match_score('nn', 'nb') # Nynorsk to Norwegian Bokmål
90
>>> tag_match_score('nb', 'da') # Norwegian Bokmål to Danish
88
A score of 80 to 85 indicates a particularly contentious difference in
script, where people who understand one script can learn the other but
probably won't be happy with it. This specifically applies to Chinese.
>>> tag_match_score('zh-Hans', 'zh-Hant')
85
>>> tag_match_score('zh-CN', 'zh-HK')
85
>>> tag_match_score('zh-CN', 'zh-TW')
85
>>> tag_match_score('zh-Hant', 'zh-Hans')
81
>>> tag_match_score('zh-TW', 'zh-CN')
81
When the supported script is a different one than desired, this is usually
a major difference with score of 60 or less.
>>> tag_match_score('ja', 'ja-Latn-US-hepburn')
56
>>> # You can read the Shavian script, right?
>>> tag_match_score('en', 'en-Shaw')
56
When there is no indication the supported language will be understood, the
score will be 20 or less, to a minimum of 0.
>>> tag_match_score('es', 'fr') # Spanish and French are different.
16
>>> tag_match_score('en', 'ta') # English speakers generally do not know Tamil.
0
CLDR doesn't take into account which languages are considered part of a
common 'macrolanguage'. We have this data, so we can use it in matching.
If two languages have no other rule that would allow them to match, but
share a macrolanguage, they'll get a match score of 20 less than what
they would get if the language matched.
>>> tag_match_score('arz', 'ar') # Egyptian Arabic to Standard Arabic
80
>>> tag_match_score('arz', 'ary') # Egyptian Arabic to Moroccan Arabic
76
Here's an example that has script, region, and language differences, but
a macrolanguage in common.
Written Chinese is usually presumed to be Mandarin Chinese, but colloquial
Cantonese can be written as well. When it is, it probably has region,
script, and language differences from the usual mainland Chinese. But it is
still part of the 'Chinese' macrolanguage, so there is more similarity
than, say, comparing Mandarin to Hindi.
>>> tag_match_score('yue', 'zh')
36
Comparing Swiss German ('gsw') to standardized German ('de') shows how
these scores can be asymmetrical. Swiss German speakers will understand
German, so the score in that direction is 92. Most German speakers find
Swiss German unintelligible, and CLDR in fact assigns this a score of 16.
This seems a little bit extreme, but the asymmetry is certainly there. And
if your text is tagged as 'gsw', it must be that way for a reason.
>>> tag_match_score('gsw', 'de')
92
>>> tag_match_score('de', 'gsw')
16
"""
desired_ld = Language.get(desired)
supported_ld = Language.get(supported)
return desired_ld.match_score(supported_ld) | python | def tag_match_score(desired: {str, Language}, supported: {str, Language}) -> int:
"""
Return a number from 0 to 100 indicating the strength of match between the
language the user desires, D, and a supported language, S. Higher numbers
are better. A reasonable cutoff for not messing with your users is to
only accept scores of 75 or more.
A score of 100 means the languages are the same, possibly after normalizing
and filling in likely values.
>>> tag_match_score('en', 'en')
100
>>> tag_match_score('en', 'en-US')
100
>>> tag_match_score('zh-Hant', 'zh-TW')
100
>>> tag_match_score('ru-Cyrl', 'ru')
100
>>> # Serbo-Croatian is a politically contentious idea, but in practice
>>> # it's considered equivalent to Serbian in Latin characters.
>>> tag_match_score('sh', 'sr-Latn')
100
A score of 92 to 97 indicates a regional difference.
>>> tag_match_score('zh-HK', 'zh-MO') # Chinese is similar in Hong Kong and Macao
97
>>> tag_match_score('en-AU', 'en-GB') # Australian English is similar to British English
96
>>> tag_match_score('en-IN', 'en-GB') # Indian English is also similar to British English
96
>>> tag_match_score('es-PR', 'es-419') # Peruvian Spanish is Latin American Spanish
96
>>> tag_match_score('en-US', 'en-GB') # American and British English are somewhat different
94
>>> tag_match_score('es-MX', 'es-ES') # Mexican Spanish is different from Spanish Spanish
92
>>> # Serbian has two scripts, and people might prefer one but understand both
>>> tag_match_score('sr-Latn', 'sr-Cyrl')
95
>>> # European Portuguese is different from the most common form (Brazilian Portuguese)
>>> tag_match_score('pt', 'pt-PT')
92
A score of 86 to 90 indicates that people who use the desired language
are demographically likely to understand the supported language, even if
the languages themselves are unrelated. There are many languages that have
a one-way connection of this kind to English or French.
>>> tag_match_score('ta', 'en') # Tamil to English
86
>>> tag_match_score('mg', 'fr') # Malagasy to French
86
Sometimes it's more straightforward than that: people who use the desired
language are demographically likely to understand the supported language
because it's demographically relevant and highly related.
>>> tag_match_score('af', 'nl') # Afrikaans to Dutch
86
>>> tag_match_score('ms', 'id') # Malay to Indonesian
86
>>> tag_match_score('nn', 'nb') # Nynorsk to Norwegian Bokmål
90
>>> tag_match_score('nb', 'da') # Norwegian Bokmål to Danish
88
A score of 80 to 85 indicates a particularly contentious difference in
script, where people who understand one script can learn the other but
probably won't be happy with it. This specifically applies to Chinese.
>>> tag_match_score('zh-Hans', 'zh-Hant')
85
>>> tag_match_score('zh-CN', 'zh-HK')
85
>>> tag_match_score('zh-CN', 'zh-TW')
85
>>> tag_match_score('zh-Hant', 'zh-Hans')
81
>>> tag_match_score('zh-TW', 'zh-CN')
81
When the supported script is a different one than desired, this is usually
a major difference with score of 60 or less.
>>> tag_match_score('ja', 'ja-Latn-US-hepburn')
56
>>> # You can read the Shavian script, right?
>>> tag_match_score('en', 'en-Shaw')
56
When there is no indication the supported language will be understood, the
score will be 20 or less, to a minimum of 0.
>>> tag_match_score('es', 'fr') # Spanish and French are different.
16
>>> tag_match_score('en', 'ta') # English speakers generally do not know Tamil.
0
CLDR doesn't take into account which languages are considered part of a
common 'macrolanguage'. We have this data, so we can use it in matching.
If two languages have no other rule that would allow them to match, but
share a macrolanguage, they'll get a match score of 20 less than what
they would get if the language matched.
>>> tag_match_score('arz', 'ar') # Egyptian Arabic to Standard Arabic
80
>>> tag_match_score('arz', 'ary') # Egyptian Arabic to Moroccan Arabic
76
Here's an example that has script, region, and language differences, but
a macrolanguage in common.
Written Chinese is usually presumed to be Mandarin Chinese, but colloquial
Cantonese can be written as well. When it is, it probably has region,
script, and language differences from the usual mainland Chinese. But it is
still part of the 'Chinese' macrolanguage, so there is more similarity
than, say, comparing Mandarin to Hindi.
>>> tag_match_score('yue', 'zh')
36
Comparing Swiss German ('gsw') to standardized German ('de') shows how
these scores can be asymmetrical. Swiss German speakers will understand
German, so the score in that direction is 92. Most German speakers find
Swiss German unintelligible, and CLDR in fact assigns this a score of 16.
This seems a little bit extreme, but the asymmetry is certainly there. And
if your text is tagged as 'gsw', it must be that way for a reason.
>>> tag_match_score('gsw', 'de')
92
>>> tag_match_score('de', 'gsw')
16
"""
desired_ld = Language.get(desired)
supported_ld = Language.get(supported)
return desired_ld.match_score(supported_ld) | Return a number from 0 to 100 indicating the strength of match between the
language the user desires, D, and a supported language, S. Higher numbers
are better. A reasonable cutoff for not messing with your users is to
only accept scores of 75 or more.
A score of 100 means the languages are the same, possibly after normalizing
and filling in likely values.
>>> tag_match_score('en', 'en')
100
>>> tag_match_score('en', 'en-US')
100
>>> tag_match_score('zh-Hant', 'zh-TW')
100
>>> tag_match_score('ru-Cyrl', 'ru')
100
>>> # Serbo-Croatian is a politically contentious idea, but in practice
>>> # it's considered equivalent to Serbian in Latin characters.
>>> tag_match_score('sh', 'sr-Latn')
100
A score of 92 to 97 indicates a regional difference.
>>> tag_match_score('zh-HK', 'zh-MO') # Chinese is similar in Hong Kong and Macao
97
>>> tag_match_score('en-AU', 'en-GB') # Australian English is similar to British English
96
>>> tag_match_score('en-IN', 'en-GB') # Indian English is also similar to British English
96
>>> tag_match_score('es-PR', 'es-419') # Peruvian Spanish is Latin American Spanish
96
>>> tag_match_score('en-US', 'en-GB') # American and British English are somewhat different
94
>>> tag_match_score('es-MX', 'es-ES') # Mexican Spanish is different from Spanish Spanish
92
>>> # Serbian has two scripts, and people might prefer one but understand both
>>> tag_match_score('sr-Latn', 'sr-Cyrl')
95
>>> # European Portuguese is different from the most common form (Brazilian Portuguese)
>>> tag_match_score('pt', 'pt-PT')
92
A score of 86 to 90 indicates that people who use the desired language
are demographically likely to understand the supported language, even if
the languages themselves are unrelated. There are many languages that have
a one-way connection of this kind to English or French.
>>> tag_match_score('ta', 'en') # Tamil to English
86
>>> tag_match_score('mg', 'fr') # Malagasy to French
86
Sometimes it's more straightforward than that: people who use the desired
language are demographically likely to understand the supported language
because it's demographically relevant and highly related.
>>> tag_match_score('af', 'nl') # Afrikaans to Dutch
86
>>> tag_match_score('ms', 'id') # Malay to Indonesian
86
>>> tag_match_score('nn', 'nb') # Nynorsk to Norwegian Bokmål
90
>>> tag_match_score('nb', 'da') # Norwegian Bokmål to Danish
88
A score of 80 to 85 indicates a particularly contentious difference in
script, where people who understand one script can learn the other but
probably won't be happy with it. This specifically applies to Chinese.
>>> tag_match_score('zh-Hans', 'zh-Hant')
85
>>> tag_match_score('zh-CN', 'zh-HK')
85
>>> tag_match_score('zh-CN', 'zh-TW')
85
>>> tag_match_score('zh-Hant', 'zh-Hans')
81
>>> tag_match_score('zh-TW', 'zh-CN')
81
When the supported script is a different one than desired, this is usually
a major difference with score of 60 or less.
>>> tag_match_score('ja', 'ja-Latn-US-hepburn')
56
>>> # You can read the Shavian script, right?
>>> tag_match_score('en', 'en-Shaw')
56
When there is no indication the supported language will be understood, the
score will be 20 or less, to a minimum of 0.
>>> tag_match_score('es', 'fr') # Spanish and French are different.
16
>>> tag_match_score('en', 'ta') # English speakers generally do not know Tamil.
0
CLDR doesn't take into account which languages are considered part of a
common 'macrolanguage'. We have this data, so we can use it in matching.
If two languages have no other rule that would allow them to match, but
share a macrolanguage, they'll get a match score of 20 less than what
they would get if the language matched.
>>> tag_match_score('arz', 'ar') # Egyptian Arabic to Standard Arabic
80
>>> tag_match_score('arz', 'ary') # Egyptian Arabic to Moroccan Arabic
76
Here's an example that has script, region, and language differences, but
a macrolanguage in common.
Written Chinese is usually presumed to be Mandarin Chinese, but colloquial
Cantonese can be written as well. When it is, it probably has region,
script, and language differences from the usual mainland Chinese. But it is
still part of the 'Chinese' macrolanguage, so there is more similarity
than, say, comparing Mandarin to Hindi.
>>> tag_match_score('yue', 'zh')
36
Comparing Swiss German ('gsw') to standardized German ('de') shows how
these scores can be asymmetrical. Swiss German speakers will understand
German, so the score in that direction is 92. Most German speakers find
Swiss German unintelligible, and CLDR in fact assigns this a score of 16.
This seems a little bit extreme, but the asymmetry is certainly there. And
if your text is tagged as 'gsw', it must be that way for a reason.
>>> tag_match_score('gsw', 'de')
92
>>> tag_match_score('de', 'gsw')
16 | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L1021-L1159 |
LuminosoInsight/langcodes | langcodes/__init__.py | best_match | def best_match(desired_language: {str, Language}, supported_languages: list,
min_score: int=75) -> (str, int):
"""
You have software that supports any of the `supported_languages`. You want
to use `desired_language`. This function lets you choose the right language,
even if there isn't an exact match.
Returns:
- The best-matching language code, which will be one of the
`supported_languages` or 'und'
- The score of the match, from 0 to 100
`min_score` sets the minimum match score. If all languages match with a lower
score than that, the result will be 'und' with a score of 0.
When there is a tie for the best matching language, the first one in the
tie will be used.
Setting `min_score` lower will enable more things to match, at the cost
of possibly mis-handling data or upsetting users. Read the documentation
for :func:`tag_match_score` to understand what the numbers mean.
>>> best_match('fr', ['de', 'en', 'fr'])
('fr', 100)
>>> best_match('sh', ['hr', 'bs', 'sr-Latn', 'sr-Cyrl'])
('sr-Latn', 100)
>>> best_match('zh-CN', ['zh-Hant', 'zh-Hans', 'gan', 'nan'])
('zh-Hans', 100)
>>> best_match('zh-CN', ['cmn-Hant', 'cmn-Hans', 'gan', 'nan'])
('cmn-Hans', 100)
>>> best_match('pt', ['pt-BR', 'pt-PT'])
('pt-BR', 100)
>>> best_match('en-AU', ['en-GB', 'en-US'])
('en-GB', 96)
>>> best_match('es-MX', ['es-ES', 'es-419', 'en-US'])
('es-419', 96)
>>> best_match('es-MX', ['es-PU', 'es-AR', 'es-PY'])
('es-PU', 95)
>>> best_match('es-MX', ['es-AR', 'es-PU', 'es-PY'])
('es-AR', 95)
>>> best_match('zsm', ['id', 'mhp'])
('id', 86)
>>> best_match('eu', ['el', 'en', 'es'])
('es', 90)
>>> best_match('eu', ['el', 'en', 'es'], min_score=92)
('und', 0)
"""
# Quickly return if the desired language is directly supported
if desired_language in supported_languages:
return desired_language, 100
# Reduce the desired language to a standard form that could also match
desired_language = standardize_tag(desired_language)
if desired_language in supported_languages:
return desired_language, 100
match_scores = [
(supported, tag_match_score(desired_language, supported))
for supported in supported_languages
]
match_scores = [
(supported, score) for (supported, score) in match_scores
if score >= min_score
] + [('und', 0)]
match_scores.sort(key=lambda item: -item[1])
return match_scores[0] | python | def best_match(desired_language: {str, Language}, supported_languages: list,
min_score: int=75) -> (str, int):
"""
You have software that supports any of the `supported_languages`. You want
to use `desired_language`. This function lets you choose the right language,
even if there isn't an exact match.
Returns:
- The best-matching language code, which will be one of the
`supported_languages` or 'und'
- The score of the match, from 0 to 100
`min_score` sets the minimum match score. If all languages match with a lower
score than that, the result will be 'und' with a score of 0.
When there is a tie for the best matching language, the first one in the
tie will be used.
Setting `min_score` lower will enable more things to match, at the cost
of possibly mis-handling data or upsetting users. Read the documentation
for :func:`tag_match_score` to understand what the numbers mean.
>>> best_match('fr', ['de', 'en', 'fr'])
('fr', 100)
>>> best_match('sh', ['hr', 'bs', 'sr-Latn', 'sr-Cyrl'])
('sr-Latn', 100)
>>> best_match('zh-CN', ['zh-Hant', 'zh-Hans', 'gan', 'nan'])
('zh-Hans', 100)
>>> best_match('zh-CN', ['cmn-Hant', 'cmn-Hans', 'gan', 'nan'])
('cmn-Hans', 100)
>>> best_match('pt', ['pt-BR', 'pt-PT'])
('pt-BR', 100)
>>> best_match('en-AU', ['en-GB', 'en-US'])
('en-GB', 96)
>>> best_match('es-MX', ['es-ES', 'es-419', 'en-US'])
('es-419', 96)
>>> best_match('es-MX', ['es-PU', 'es-AR', 'es-PY'])
('es-PU', 95)
>>> best_match('es-MX', ['es-AR', 'es-PU', 'es-PY'])
('es-AR', 95)
>>> best_match('zsm', ['id', 'mhp'])
('id', 86)
>>> best_match('eu', ['el', 'en', 'es'])
('es', 90)
>>> best_match('eu', ['el', 'en', 'es'], min_score=92)
('und', 0)
"""
# Quickly return if the desired language is directly supported
if desired_language in supported_languages:
return desired_language, 100
# Reduce the desired language to a standard form that could also match
desired_language = standardize_tag(desired_language)
if desired_language in supported_languages:
return desired_language, 100
match_scores = [
(supported, tag_match_score(desired_language, supported))
for supported in supported_languages
]
match_scores = [
(supported, score) for (supported, score) in match_scores
if score >= min_score
] + [('und', 0)]
match_scores.sort(key=lambda item: -item[1])
return match_scores[0] | You have software that supports any of the `supported_languages`. You want
to use `desired_language`. This function lets you choose the right language,
even if there isn't an exact match.
Returns:
- The best-matching language code, which will be one of the
`supported_languages` or 'und'
- The score of the match, from 0 to 100
`min_score` sets the minimum match score. If all languages match with a lower
score than that, the result will be 'und' with a score of 0.
When there is a tie for the best matching language, the first one in the
tie will be used.
Setting `min_score` lower will enable more things to match, at the cost
of possibly mis-handling data or upsetting users. Read the documentation
for :func:`tag_match_score` to understand what the numbers mean.
>>> best_match('fr', ['de', 'en', 'fr'])
('fr', 100)
>>> best_match('sh', ['hr', 'bs', 'sr-Latn', 'sr-Cyrl'])
('sr-Latn', 100)
>>> best_match('zh-CN', ['zh-Hant', 'zh-Hans', 'gan', 'nan'])
('zh-Hans', 100)
>>> best_match('zh-CN', ['cmn-Hant', 'cmn-Hans', 'gan', 'nan'])
('cmn-Hans', 100)
>>> best_match('pt', ['pt-BR', 'pt-PT'])
('pt-BR', 100)
>>> best_match('en-AU', ['en-GB', 'en-US'])
('en-GB', 96)
>>> best_match('es-MX', ['es-ES', 'es-419', 'en-US'])
('es-419', 96)
>>> best_match('es-MX', ['es-PU', 'es-AR', 'es-PY'])
('es-PU', 95)
>>> best_match('es-MX', ['es-AR', 'es-PU', 'es-PY'])
('es-AR', 95)
>>> best_match('zsm', ['id', 'mhp'])
('id', 86)
>>> best_match('eu', ['el', 'en', 'es'])
('es', 90)
>>> best_match('eu', ['el', 'en', 'es'], min_score=92)
('und', 0) | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L1162-L1229 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language.make | def make(cls, language=None, extlangs=None, script=None,
region=None, variants=None, extensions=None, private=None):
"""
Create a Language object by giving any subset of its attributes.
If this value has been created before, return the existing value.
"""
values = (language, tuple(extlangs or ()), script, region,
tuple(variants or ()), tuple(extensions or ()), private)
if values in cls._INSTANCES:
return cls._INSTANCES[values]
instance = cls(
language=language, extlangs=extlangs,
script=script, region=region, variants=variants,
extensions=extensions, private=private
)
cls._INSTANCES[values] = instance
return instance | python | def make(cls, language=None, extlangs=None, script=None,
region=None, variants=None, extensions=None, private=None):
"""
Create a Language object by giving any subset of its attributes.
If this value has been created before, return the existing value.
"""
values = (language, tuple(extlangs or ()), script, region,
tuple(variants or ()), tuple(extensions or ()), private)
if values in cls._INSTANCES:
return cls._INSTANCES[values]
instance = cls(
language=language, extlangs=extlangs,
script=script, region=region, variants=variants,
extensions=extensions, private=private
)
cls._INSTANCES[values] = instance
return instance | Create a Language object by giving any subset of its attributes.
If this value has been created before, return the existing value. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L104-L122 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language.get | def get(tag: {str, 'Language'}, normalize=True) -> 'Language':
"""
Create a Language object from a language tag string.
If normalize=True, non-standard or overlong tags will be replaced as
they're interpreted. This is recommended.
Here are several examples of language codes, which are also test cases.
Most language codes are straightforward, but these examples will get
pretty obscure toward the end.
>>> Language.get('en-US')
Language.make(language='en', region='US')
>>> Language.get('zh-Hant')
Language.make(language='zh', script='Hant')
>>> Language.get('und')
Language.make()
This function is idempotent, in case you already have a Language object:
>>> Language.get(Language.get('en-us'))
Language.make(language='en', region='US')
The non-code 'root' is sometimes used to represent the lack of any
language information, similar to 'und'.
>>> Language.get('root')
Language.make()
By default, getting a Language object will automatically convert
deprecated tags:
>>> Language.get('iw')
Language.make(language='he')
>>> Language.get('in')
Language.make(language='id')
One type of deprecated tag that should be replaced is for sign
languages, which used to all be coded as regional variants of a
fictitious global sign language called 'sgn'. Of course, there is no
global sign language, so sign languages now have their own language
codes.
>>> Language.get('sgn-US')
Language.make(language='ase')
>>> Language.get('sgn-US', normalize=False)
Language.make(language='sgn', region='US')
'en-gb-oed' is a tag that's grandfathered into the standard because it
has been used to mean "spell-check this with Oxford English Dictionary
spelling", but that tag has the wrong shape. We interpret this as the
new standardized tag 'en-gb-oxendict', unless asked not to normalize.
>>> Language.get('en-gb-oed')
Language.make(language='en', region='GB', variants=['oxendict'])
>>> Language.get('en-gb-oed', normalize=False)
Language.make(language='en-gb-oed')
'zh-min-nan' is another oddly-formed tag, used to represent the
Southern Min language, which includes Taiwanese as a regional form. It
now has its own language code.
>>> Language.get('zh-min-nan')
Language.make(language='nan')
There's not much we can do with the vague tag 'zh-min':
>>> Language.get('zh-min')
Language.make(language='zh-min')
Occasionally Wiktionary will use 'extlang' tags in strange ways, such
as using the tag 'und-ibe' for some unspecified Iberian language.
>>> Language.get('und-ibe')
Language.make(extlangs=['ibe'])
Here's an example of replacing multiple deprecated tags.
The language tag 'sh' (Serbo-Croatian) ended up being politically
problematic, and different standards took different steps to address
this. The IANA made it into a macrolanguage that contains 'sr', 'hr',
and 'bs'. Unicode further decided that it's a legacy tag that should
be interpreted as 'sr-Latn', which the language matching rules say
is mutually intelligible with all those languages.
We complicate the example by adding on the region tag 'QU', an old
provisional tag for the European Union, which is now standardized as
'EU'.
>>> Language.get('sh-QU')
Language.make(language='sr', script='Latn', region='EU')
"""
if isinstance(tag, Language):
if not normalize:
# shortcut: we have the tag already
return tag
# We might need to normalize this tag. Convert it back into a
# string tag, to cover all the edge cases of normalization in a
# way that we've already solved.
tag = tag.to_tag()
if (tag, normalize) in Language._PARSE_CACHE:
return Language._PARSE_CACHE[tag, normalize]
data = {}
# if the complete tag appears as something to normalize, do the
# normalization right away. Smash case when checking, because the
# case normalization that comes from parse_tag() hasn't been applied
# yet.
tag_lower = tag.lower()
if normalize and tag_lower in LANGUAGE_REPLACEMENTS:
tag = LANGUAGE_REPLACEMENTS[tag_lower]
components = parse_tag(tag)
for typ, value in components:
if typ == 'extlang' and normalize and 'language' in data:
# smash extlangs when possible
minitag = '%s-%s' % (data['language'], value)
norm = LANGUAGE_REPLACEMENTS.get(minitag.lower())
if norm is not None:
data.update(
Language.get(norm, normalize).to_dict()
)
else:
data.setdefault('extlangs', []).append(value)
elif typ in {'extlang', 'variant', 'extension'}:
data.setdefault(typ + 's', []).append(value)
elif typ == 'language':
if value == 'und':
pass
elif normalize:
replacement = LANGUAGE_REPLACEMENTS.get(value.lower())
if replacement is not None:
# parse the replacement if necessary -- this helps with
# Serbian and Moldovan
data.update(
Language.get(replacement, normalize).to_dict()
)
else:
data['language'] = value
else:
data['language'] = value
elif typ == 'region':
if normalize:
data['region'] = REGION_REPLACEMENTS.get(value.lower(), value)
else:
data['region'] = value
elif typ == 'grandfathered':
# If we got here, we got a grandfathered tag but we were asked
# not to normalize it, or the CLDR data doesn't know how to
# normalize it. The best we can do is set the entire tag as the
# language.
data['language'] = value
else:
data[typ] = value
result = Language.make(**data)
Language._PARSE_CACHE[tag, normalize] = result
return result | python | def get(tag: {str, 'Language'}, normalize=True) -> 'Language':
"""
Create a Language object from a language tag string.
If normalize=True, non-standard or overlong tags will be replaced as
they're interpreted. This is recommended.
Here are several examples of language codes, which are also test cases.
Most language codes are straightforward, but these examples will get
pretty obscure toward the end.
>>> Language.get('en-US')
Language.make(language='en', region='US')
>>> Language.get('zh-Hant')
Language.make(language='zh', script='Hant')
>>> Language.get('und')
Language.make()
This function is idempotent, in case you already have a Language object:
>>> Language.get(Language.get('en-us'))
Language.make(language='en', region='US')
The non-code 'root' is sometimes used to represent the lack of any
language information, similar to 'und'.
>>> Language.get('root')
Language.make()
By default, getting a Language object will automatically convert
deprecated tags:
>>> Language.get('iw')
Language.make(language='he')
>>> Language.get('in')
Language.make(language='id')
One type of deprecated tag that should be replaced is for sign
languages, which used to all be coded as regional variants of a
fictitious global sign language called 'sgn'. Of course, there is no
global sign language, so sign languages now have their own language
codes.
>>> Language.get('sgn-US')
Language.make(language='ase')
>>> Language.get('sgn-US', normalize=False)
Language.make(language='sgn', region='US')
'en-gb-oed' is a tag that's grandfathered into the standard because it
has been used to mean "spell-check this with Oxford English Dictionary
spelling", but that tag has the wrong shape. We interpret this as the
new standardized tag 'en-gb-oxendict', unless asked not to normalize.
>>> Language.get('en-gb-oed')
Language.make(language='en', region='GB', variants=['oxendict'])
>>> Language.get('en-gb-oed', normalize=False)
Language.make(language='en-gb-oed')
'zh-min-nan' is another oddly-formed tag, used to represent the
Southern Min language, which includes Taiwanese as a regional form. It
now has its own language code.
>>> Language.get('zh-min-nan')
Language.make(language='nan')
There's not much we can do with the vague tag 'zh-min':
>>> Language.get('zh-min')
Language.make(language='zh-min')
Occasionally Wiktionary will use 'extlang' tags in strange ways, such
as using the tag 'und-ibe' for some unspecified Iberian language.
>>> Language.get('und-ibe')
Language.make(extlangs=['ibe'])
Here's an example of replacing multiple deprecated tags.
The language tag 'sh' (Serbo-Croatian) ended up being politically
problematic, and different standards took different steps to address
this. The IANA made it into a macrolanguage that contains 'sr', 'hr',
and 'bs'. Unicode further decided that it's a legacy tag that should
be interpreted as 'sr-Latn', which the language matching rules say
is mutually intelligible with all those languages.
We complicate the example by adding on the region tag 'QU', an old
provisional tag for the European Union, which is now standardized as
'EU'.
>>> Language.get('sh-QU')
Language.make(language='sr', script='Latn', region='EU')
"""
if isinstance(tag, Language):
if not normalize:
# shortcut: we have the tag already
return tag
# We might need to normalize this tag. Convert it back into a
# string tag, to cover all the edge cases of normalization in a
# way that we've already solved.
tag = tag.to_tag()
if (tag, normalize) in Language._PARSE_CACHE:
return Language._PARSE_CACHE[tag, normalize]
data = {}
# if the complete tag appears as something to normalize, do the
# normalization right away. Smash case when checking, because the
# case normalization that comes from parse_tag() hasn't been applied
# yet.
tag_lower = tag.lower()
if normalize and tag_lower in LANGUAGE_REPLACEMENTS:
tag = LANGUAGE_REPLACEMENTS[tag_lower]
components = parse_tag(tag)
for typ, value in components:
if typ == 'extlang' and normalize and 'language' in data:
# smash extlangs when possible
minitag = '%s-%s' % (data['language'], value)
norm = LANGUAGE_REPLACEMENTS.get(minitag.lower())
if norm is not None:
data.update(
Language.get(norm, normalize).to_dict()
)
else:
data.setdefault('extlangs', []).append(value)
elif typ in {'extlang', 'variant', 'extension'}:
data.setdefault(typ + 's', []).append(value)
elif typ == 'language':
if value == 'und':
pass
elif normalize:
replacement = LANGUAGE_REPLACEMENTS.get(value.lower())
if replacement is not None:
# parse the replacement if necessary -- this helps with
# Serbian and Moldovan
data.update(
Language.get(replacement, normalize).to_dict()
)
else:
data['language'] = value
else:
data['language'] = value
elif typ == 'region':
if normalize:
data['region'] = REGION_REPLACEMENTS.get(value.lower(), value)
else:
data['region'] = value
elif typ == 'grandfathered':
# If we got here, we got a grandfathered tag but we were asked
# not to normalize it, or the CLDR data doesn't know how to
# normalize it. The best we can do is set the entire tag as the
# language.
data['language'] = value
else:
data[typ] = value
result = Language.make(**data)
Language._PARSE_CACHE[tag, normalize] = result
return result | Create a Language object from a language tag string.
If normalize=True, non-standard or overlong tags will be replaced as
they're interpreted. This is recommended.
Here are several examples of language codes, which are also test cases.
Most language codes are straightforward, but these examples will get
pretty obscure toward the end.
>>> Language.get('en-US')
Language.make(language='en', region='US')
>>> Language.get('zh-Hant')
Language.make(language='zh', script='Hant')
>>> Language.get('und')
Language.make()
This function is idempotent, in case you already have a Language object:
>>> Language.get(Language.get('en-us'))
Language.make(language='en', region='US')
The non-code 'root' is sometimes used to represent the lack of any
language information, similar to 'und'.
>>> Language.get('root')
Language.make()
By default, getting a Language object will automatically convert
deprecated tags:
>>> Language.get('iw')
Language.make(language='he')
>>> Language.get('in')
Language.make(language='id')
One type of deprecated tag that should be replaced is for sign
languages, which used to all be coded as regional variants of a
fictitious global sign language called 'sgn'. Of course, there is no
global sign language, so sign languages now have their own language
codes.
>>> Language.get('sgn-US')
Language.make(language='ase')
>>> Language.get('sgn-US', normalize=False)
Language.make(language='sgn', region='US')
'en-gb-oed' is a tag that's grandfathered into the standard because it
has been used to mean "spell-check this with Oxford English Dictionary
spelling", but that tag has the wrong shape. We interpret this as the
new standardized tag 'en-gb-oxendict', unless asked not to normalize.
>>> Language.get('en-gb-oed')
Language.make(language='en', region='GB', variants=['oxendict'])
>>> Language.get('en-gb-oed', normalize=False)
Language.make(language='en-gb-oed')
'zh-min-nan' is another oddly-formed tag, used to represent the
Southern Min language, which includes Taiwanese as a regional form. It
now has its own language code.
>>> Language.get('zh-min-nan')
Language.make(language='nan')
There's not much we can do with the vague tag 'zh-min':
>>> Language.get('zh-min')
Language.make(language='zh-min')
Occasionally Wiktionary will use 'extlang' tags in strange ways, such
as using the tag 'und-ibe' for some unspecified Iberian language.
>>> Language.get('und-ibe')
Language.make(extlangs=['ibe'])
Here's an example of replacing multiple deprecated tags.
The language tag 'sh' (Serbo-Croatian) ended up being politically
problematic, and different standards took different steps to address
this. The IANA made it into a macrolanguage that contains 'sr', 'hr',
and 'bs'. Unicode further decided that it's a legacy tag that should
be interpreted as 'sr-Latn', which the language matching rules say
is mutually intelligible with all those languages.
We complicate the example by adding on the region tag 'QU', an old
provisional tag for the European Union, which is now standardized as
'EU'.
>>> Language.get('sh-QU')
Language.make(language='sr', script='Latn', region='EU') | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L125-L290 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language.to_tag | def to_tag(self) -> str:
"""
Convert a Language back to a standard language tag, as a string.
This is also the str() representation of a Language object.
>>> Language.make(language='en', region='GB').to_tag()
'en-GB'
>>> Language.make(language='yue', script='Hant', region='HK').to_tag()
'yue-Hant-HK'
>>> Language.make(script='Arab').to_tag()
'und-Arab'
>>> str(Language.make(region='IN'))
'und-IN'
"""
if self._str_tag is not None:
return self._str_tag
subtags = ['und']
if self.language:
subtags[0] = self.language
if self.extlangs:
for extlang in sorted(self.extlangs):
subtags.append(extlang)
if self.script:
subtags.append(self.script)
if self.region:
subtags.append(self.region)
if self.variants:
for variant in sorted(self.variants):
subtags.append(variant)
if self.extensions:
for ext in self.extensions:
subtags.append(ext)
if self.private:
subtags.append(self.private)
self._str_tag = '-'.join(subtags)
return self._str_tag | python | def to_tag(self) -> str:
"""
Convert a Language back to a standard language tag, as a string.
This is also the str() representation of a Language object.
>>> Language.make(language='en', region='GB').to_tag()
'en-GB'
>>> Language.make(language='yue', script='Hant', region='HK').to_tag()
'yue-Hant-HK'
>>> Language.make(script='Arab').to_tag()
'und-Arab'
>>> str(Language.make(region='IN'))
'und-IN'
"""
if self._str_tag is not None:
return self._str_tag
subtags = ['und']
if self.language:
subtags[0] = self.language
if self.extlangs:
for extlang in sorted(self.extlangs):
subtags.append(extlang)
if self.script:
subtags.append(self.script)
if self.region:
subtags.append(self.region)
if self.variants:
for variant in sorted(self.variants):
subtags.append(variant)
if self.extensions:
for ext in self.extensions:
subtags.append(ext)
if self.private:
subtags.append(self.private)
self._str_tag = '-'.join(subtags)
return self._str_tag | Convert a Language back to a standard language tag, as a string.
This is also the str() representation of a Language object.
>>> Language.make(language='en', region='GB').to_tag()
'en-GB'
>>> Language.make(language='yue', script='Hant', region='HK').to_tag()
'yue-Hant-HK'
>>> Language.make(script='Arab').to_tag()
'und-Arab'
>>> str(Language.make(region='IN'))
'und-IN' | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L292-L330 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language.simplify_script | def simplify_script(self) -> 'Language':
"""
Remove the script from some parsed language data, if the script is
redundant with the language.
>>> Language.make(language='en', script='Latn').simplify_script()
Language.make(language='en')
>>> Language.make(language='yi', script='Latn').simplify_script()
Language.make(language='yi', script='Latn')
>>> Language.make(language='yi', script='Hebr').simplify_script()
Language.make(language='yi')
"""
if self._simplified is not None:
return self._simplified
if self.language and self.script:
if DEFAULT_SCRIPTS.get(self.language) == self.script:
result = self.update_dict({'script': None})
self._simplified = result
return self._simplified
self._simplified = self
return self._simplified | python | def simplify_script(self) -> 'Language':
"""
Remove the script from some parsed language data, if the script is
redundant with the language.
>>> Language.make(language='en', script='Latn').simplify_script()
Language.make(language='en')
>>> Language.make(language='yi', script='Latn').simplify_script()
Language.make(language='yi', script='Latn')
>>> Language.make(language='yi', script='Hebr').simplify_script()
Language.make(language='yi')
"""
if self._simplified is not None:
return self._simplified
if self.language and self.script:
if DEFAULT_SCRIPTS.get(self.language) == self.script:
result = self.update_dict({'script': None})
self._simplified = result
return self._simplified
self._simplified = self
return self._simplified | Remove the script from some parsed language data, if the script is
redundant with the language.
>>> Language.make(language='en', script='Latn').simplify_script()
Language.make(language='en')
>>> Language.make(language='yi', script='Latn').simplify_script()
Language.make(language='yi', script='Latn')
>>> Language.make(language='yi', script='Hebr').simplify_script()
Language.make(language='yi') | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L332-L356 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language.assume_script | def assume_script(self) -> 'Language':
"""
Fill in the script if it's missing, and if it can be assumed from the
language subtag. This is the opposite of `simplify_script`.
>>> Language.make(language='en').assume_script()
Language.make(language='en', script='Latn')
>>> Language.make(language='yi').assume_script()
Language.make(language='yi', script='Hebr')
>>> Language.make(language='yi', script='Latn').assume_script()
Language.make(language='yi', script='Latn')
This fills in nothing when the script cannot be assumed -- such as when
the language has multiple scripts, or it has no standard orthography:
>>> Language.make(language='sr').assume_script()
Language.make(language='sr')
>>> Language.make(language='eee').assume_script()
Language.make(language='eee')
It also dosn't fill anything in when the language is unspecified.
>>> Language.make(region='US').assume_script()
Language.make(region='US')
"""
if self._assumed is not None:
return self._assumed
if self.language and not self.script:
try:
self._assumed = self.update_dict({'script': DEFAULT_SCRIPTS[self.language]})
except KeyError:
self._assumed = self
else:
self._assumed = self
return self._assumed | python | def assume_script(self) -> 'Language':
"""
Fill in the script if it's missing, and if it can be assumed from the
language subtag. This is the opposite of `simplify_script`.
>>> Language.make(language='en').assume_script()
Language.make(language='en', script='Latn')
>>> Language.make(language='yi').assume_script()
Language.make(language='yi', script='Hebr')
>>> Language.make(language='yi', script='Latn').assume_script()
Language.make(language='yi', script='Latn')
This fills in nothing when the script cannot be assumed -- such as when
the language has multiple scripts, or it has no standard orthography:
>>> Language.make(language='sr').assume_script()
Language.make(language='sr')
>>> Language.make(language='eee').assume_script()
Language.make(language='eee')
It also dosn't fill anything in when the language is unspecified.
>>> Language.make(region='US').assume_script()
Language.make(region='US')
"""
if self._assumed is not None:
return self._assumed
if self.language and not self.script:
try:
self._assumed = self.update_dict({'script': DEFAULT_SCRIPTS[self.language]})
except KeyError:
self._assumed = self
else:
self._assumed = self
return self._assumed | Fill in the script if it's missing, and if it can be assumed from the
language subtag. This is the opposite of `simplify_script`.
>>> Language.make(language='en').assume_script()
Language.make(language='en', script='Latn')
>>> Language.make(language='yi').assume_script()
Language.make(language='yi', script='Hebr')
>>> Language.make(language='yi', script='Latn').assume_script()
Language.make(language='yi', script='Latn')
This fills in nothing when the script cannot be assumed -- such as when
the language has multiple scripts, or it has no standard orthography:
>>> Language.make(language='sr').assume_script()
Language.make(language='sr')
>>> Language.make(language='eee').assume_script()
Language.make(language='eee')
It also dosn't fill anything in when the language is unspecified.
>>> Language.make(region='US').assume_script()
Language.make(region='US') | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L358-L395 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language.prefer_macrolanguage | def prefer_macrolanguage(self) -> 'Language':
"""
BCP 47 doesn't specify what to do with macrolanguages and the languages
they contain. The Unicode CLDR, on the other hand, says that when a
macrolanguage has a dominant standardized language, the macrolanguage
code should be used for that language. For example, Mandarin Chinese
is 'zh', not 'cmn', according to Unicode, and Malay is 'ms', not 'zsm'.
This isn't a rule you'd want to follow in all cases -- for example, you may
want to be able to specifically say that 'ms' (the Malay macrolanguage)
contains both 'zsm' (Standard Malay) and 'id' (Indonesian). But applying
this rule helps when interoperating with the Unicode CLDR.
So, applying `prefer_macrolanguage` to a Language object will
return a new object, replacing the language with the macrolanguage if
it is the dominant language within that macrolanguage. It will leave
non-dominant languages that have macrolanguages alone.
>>> Language.get('arb').prefer_macrolanguage()
Language.make(language='ar')
>>> Language.get('cmn-Hant').prefer_macrolanguage()
Language.make(language='zh', script='Hant')
>>> Language.get('yue-Hant').prefer_macrolanguage()
Language.make(language='yue', script='Hant')
"""
if self._macrolanguage is not None:
return self._macrolanguage
language = self.language or 'und'
if language in NORMALIZED_MACROLANGUAGES:
self._macrolanguage = self.update_dict({
'language': NORMALIZED_MACROLANGUAGES[language]
})
else:
self._macrolanguage = self
return self._macrolanguage | python | def prefer_macrolanguage(self) -> 'Language':
"""
BCP 47 doesn't specify what to do with macrolanguages and the languages
they contain. The Unicode CLDR, on the other hand, says that when a
macrolanguage has a dominant standardized language, the macrolanguage
code should be used for that language. For example, Mandarin Chinese
is 'zh', not 'cmn', according to Unicode, and Malay is 'ms', not 'zsm'.
This isn't a rule you'd want to follow in all cases -- for example, you may
want to be able to specifically say that 'ms' (the Malay macrolanguage)
contains both 'zsm' (Standard Malay) and 'id' (Indonesian). But applying
this rule helps when interoperating with the Unicode CLDR.
So, applying `prefer_macrolanguage` to a Language object will
return a new object, replacing the language with the macrolanguage if
it is the dominant language within that macrolanguage. It will leave
non-dominant languages that have macrolanguages alone.
>>> Language.get('arb').prefer_macrolanguage()
Language.make(language='ar')
>>> Language.get('cmn-Hant').prefer_macrolanguage()
Language.make(language='zh', script='Hant')
>>> Language.get('yue-Hant').prefer_macrolanguage()
Language.make(language='yue', script='Hant')
"""
if self._macrolanguage is not None:
return self._macrolanguage
language = self.language or 'und'
if language in NORMALIZED_MACROLANGUAGES:
self._macrolanguage = self.update_dict({
'language': NORMALIZED_MACROLANGUAGES[language]
})
else:
self._macrolanguage = self
return self._macrolanguage | BCP 47 doesn't specify what to do with macrolanguages and the languages
they contain. The Unicode CLDR, on the other hand, says that when a
macrolanguage has a dominant standardized language, the macrolanguage
code should be used for that language. For example, Mandarin Chinese
is 'zh', not 'cmn', according to Unicode, and Malay is 'ms', not 'zsm'.
This isn't a rule you'd want to follow in all cases -- for example, you may
want to be able to specifically say that 'ms' (the Malay macrolanguage)
contains both 'zsm' (Standard Malay) and 'id' (Indonesian). But applying
this rule helps when interoperating with the Unicode CLDR.
So, applying `prefer_macrolanguage` to a Language object will
return a new object, replacing the language with the macrolanguage if
it is the dominant language within that macrolanguage. It will leave
non-dominant languages that have macrolanguages alone.
>>> Language.get('arb').prefer_macrolanguage()
Language.make(language='ar')
>>> Language.get('cmn-Hant').prefer_macrolanguage()
Language.make(language='zh', script='Hant')
>>> Language.get('yue-Hant').prefer_macrolanguage()
Language.make(language='yue', script='Hant') | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L397-L433 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language.broaden | def broaden(self) -> 'List[Language]':
"""
Iterate through increasingly general versions of this parsed language tag.
This isn't actually that useful for matching two arbitrary language tags
against each other, but it is useful for matching them against a known
standardized form, such as in the CLDR data.
The list of broader versions to try appears in UTR 35, section 4.3,
"Likely Subtags".
>>> for langdata in Language.get('nn-Latn-NO-x-thingy').broaden():
... print(langdata)
nn-Latn-NO-x-thingy
nn-Latn-NO
nn-NO
nn-Latn
nn
und-Latn
und
"""
if self._broader is not None:
return self._broader
self._broader = [self]
seen = set(self.to_tag())
for keyset in self.BROADER_KEYSETS:
filtered = self._filter_attributes(keyset)
tag = filtered.to_tag()
if tag not in seen:
self._broader.append(filtered)
seen.add(tag)
return self._broader | python | def broaden(self) -> 'List[Language]':
"""
Iterate through increasingly general versions of this parsed language tag.
This isn't actually that useful for matching two arbitrary language tags
against each other, but it is useful for matching them against a known
standardized form, such as in the CLDR data.
The list of broader versions to try appears in UTR 35, section 4.3,
"Likely Subtags".
>>> for langdata in Language.get('nn-Latn-NO-x-thingy').broaden():
... print(langdata)
nn-Latn-NO-x-thingy
nn-Latn-NO
nn-NO
nn-Latn
nn
und-Latn
und
"""
if self._broader is not None:
return self._broader
self._broader = [self]
seen = set(self.to_tag())
for keyset in self.BROADER_KEYSETS:
filtered = self._filter_attributes(keyset)
tag = filtered.to_tag()
if tag not in seen:
self._broader.append(filtered)
seen.add(tag)
return self._broader | Iterate through increasingly general versions of this parsed language tag.
This isn't actually that useful for matching two arbitrary language tags
against each other, but it is useful for matching them against a known
standardized form, such as in the CLDR data.
The list of broader versions to try appears in UTR 35, section 4.3,
"Likely Subtags".
>>> for langdata in Language.get('nn-Latn-NO-x-thingy').broaden():
... print(langdata)
nn-Latn-NO-x-thingy
nn-Latn-NO
nn-NO
nn-Latn
nn
und-Latn
und | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L435-L466 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language.maximize | def maximize(self) -> 'Language':
"""
The Unicode CLDR contains a "likelySubtags" data file, which can guess
reasonable values for fields that are missing from a language tag.
This is particularly useful for comparing, for example, "zh-Hant" and
"zh-TW", two common language tags that say approximately the same thing
via rather different information. (Using traditional Han characters is
not the same as being in Taiwan, but each implies that the other is
likely.)
These implications are provided in the CLDR supplemental data, and are
based on the likelihood of people using the language to transmit
information on the Internet. (This is why the overall default is English,
not Chinese.)
>>> str(Language.get('zh-Hant').maximize())
'zh-Hant-TW'
>>> str(Language.get('zh-TW').maximize())
'zh-Hant-TW'
>>> str(Language.get('ja').maximize())
'ja-Jpan-JP'
>>> str(Language.get('pt').maximize())
'pt-Latn-BR'
>>> str(Language.get('und-Arab').maximize())
'ar-Arab-EG'
>>> str(Language.get('und-CH').maximize())
'de-Latn-CH'
>>> str(Language.make().maximize()) # 'MURICA.
'en-Latn-US'
>>> str(Language.get('und-ibe').maximize())
'en-ibe-Latn-US'
"""
if self._filled is not None:
return self._filled
for broader in self.broaden():
tag = broader.to_tag()
if tag in LIKELY_SUBTAGS:
result = Language.get(LIKELY_SUBTAGS[tag], normalize=False)
result = result.update(self)
self._filled = result
return result
raise RuntimeError(
"Couldn't fill in likely values. This represents a problem with "
"the LIKELY_SUBTAGS data."
) | python | def maximize(self) -> 'Language':
"""
The Unicode CLDR contains a "likelySubtags" data file, which can guess
reasonable values for fields that are missing from a language tag.
This is particularly useful for comparing, for example, "zh-Hant" and
"zh-TW", two common language tags that say approximately the same thing
via rather different information. (Using traditional Han characters is
not the same as being in Taiwan, but each implies that the other is
likely.)
These implications are provided in the CLDR supplemental data, and are
based on the likelihood of people using the language to transmit
information on the Internet. (This is why the overall default is English,
not Chinese.)
>>> str(Language.get('zh-Hant').maximize())
'zh-Hant-TW'
>>> str(Language.get('zh-TW').maximize())
'zh-Hant-TW'
>>> str(Language.get('ja').maximize())
'ja-Jpan-JP'
>>> str(Language.get('pt').maximize())
'pt-Latn-BR'
>>> str(Language.get('und-Arab').maximize())
'ar-Arab-EG'
>>> str(Language.get('und-CH').maximize())
'de-Latn-CH'
>>> str(Language.make().maximize()) # 'MURICA.
'en-Latn-US'
>>> str(Language.get('und-ibe').maximize())
'en-ibe-Latn-US'
"""
if self._filled is not None:
return self._filled
for broader in self.broaden():
tag = broader.to_tag()
if tag in LIKELY_SUBTAGS:
result = Language.get(LIKELY_SUBTAGS[tag], normalize=False)
result = result.update(self)
self._filled = result
return result
raise RuntimeError(
"Couldn't fill in likely values. This represents a problem with "
"the LIKELY_SUBTAGS data."
) | The Unicode CLDR contains a "likelySubtags" data file, which can guess
reasonable values for fields that are missing from a language tag.
This is particularly useful for comparing, for example, "zh-Hant" and
"zh-TW", two common language tags that say approximately the same thing
via rather different information. (Using traditional Han characters is
not the same as being in Taiwan, but each implies that the other is
likely.)
These implications are provided in the CLDR supplemental data, and are
based on the likelihood of people using the language to transmit
information on the Internet. (This is why the overall default is English,
not Chinese.)
>>> str(Language.get('zh-Hant').maximize())
'zh-Hant-TW'
>>> str(Language.get('zh-TW').maximize())
'zh-Hant-TW'
>>> str(Language.get('ja').maximize())
'ja-Jpan-JP'
>>> str(Language.get('pt').maximize())
'pt-Latn-BR'
>>> str(Language.get('und-Arab').maximize())
'ar-Arab-EG'
>>> str(Language.get('und-CH').maximize())
'de-Latn-CH'
>>> str(Language.make().maximize()) # 'MURICA.
'en-Latn-US'
>>> str(Language.get('und-ibe').maximize())
'en-ibe-Latn-US' | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L477-L524 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language.match_score | def match_score(self, supported: 'Language') -> int:
"""
Suppose that `self` is the language that the user desires, and
`supported` is a language that is actually supported. This method
returns a number from 0 to 100 indicating how similar the supported
language is (higher numbers are better). This is not a symmetric
relation.
The algorithm here is described (badly) in a Unicode technical report
at http://unicode.org/reports/tr35/#LanguageMatching. If you find these
results bothersome, take it up with Unicode, unless it's particular
tweaks we implemented such as macrolanguage matching.
See :func:`tag_match_score` for a function that works on strings,
instead of requiring you to instantiate Language objects first.
Further documentation and examples appear with that function.
"""
if supported == self:
return 100
desired_complete = self.prefer_macrolanguage().maximize()
supported_complete = supported.prefer_macrolanguage().maximize()
desired_triple = (desired_complete.language, desired_complete.script, desired_complete.region)
supported_triple = (supported_complete.language, supported_complete.script, supported_complete.region)
return 100 - raw_distance(desired_triple, supported_triple) | python | def match_score(self, supported: 'Language') -> int:
"""
Suppose that `self` is the language that the user desires, and
`supported` is a language that is actually supported. This method
returns a number from 0 to 100 indicating how similar the supported
language is (higher numbers are better). This is not a symmetric
relation.
The algorithm here is described (badly) in a Unicode technical report
at http://unicode.org/reports/tr35/#LanguageMatching. If you find these
results bothersome, take it up with Unicode, unless it's particular
tweaks we implemented such as macrolanguage matching.
See :func:`tag_match_score` for a function that works on strings,
instead of requiring you to instantiate Language objects first.
Further documentation and examples appear with that function.
"""
if supported == self:
return 100
desired_complete = self.prefer_macrolanguage().maximize()
supported_complete = supported.prefer_macrolanguage().maximize()
desired_triple = (desired_complete.language, desired_complete.script, desired_complete.region)
supported_triple = (supported_complete.language, supported_complete.script, supported_complete.region)
return 100 - raw_distance(desired_triple, supported_triple) | Suppose that `self` is the language that the user desires, and
`supported` is a language that is actually supported. This method
returns a number from 0 to 100 indicating how similar the supported
language is (higher numbers are better). This is not a symmetric
relation.
The algorithm here is described (badly) in a Unicode technical report
at http://unicode.org/reports/tr35/#LanguageMatching. If you find these
results bothersome, take it up with Unicode, unless it's particular
tweaks we implemented such as macrolanguage matching.
See :func:`tag_match_score` for a function that works on strings,
instead of requiring you to instantiate Language objects first.
Further documentation and examples appear with that function. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L529-L555 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language.language_name | def language_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str:
"""
Give the name of the language (not the entire tag, just the language part)
in a natural language. The target language can be given as a string or
another Language object.
By default, things are named in English:
>>> Language.get('fr').language_name()
'French'
>>> Language.get('el').language_name()
'Greek'
But you can ask for language names in numerous other languages:
>>> Language.get('fr').language_name('fr')
'français'
>>> Language.get('el').language_name('fr')
'grec'
Why does everyone get Slovak and Slovenian confused? Let's ask them.
>>> Language.get('sl').language_name('sl')
'slovenščina'
>>> Language.get('sk').language_name('sk')
'slovenčina'
>>> Language.get('sl').language_name('sk')
'slovinčina'
>>> Language.get('sk').language_name('sl')
'slovaščina'
"""
return self._get_name('language', language, min_score) | python | def language_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str:
"""
Give the name of the language (not the entire tag, just the language part)
in a natural language. The target language can be given as a string or
another Language object.
By default, things are named in English:
>>> Language.get('fr').language_name()
'French'
>>> Language.get('el').language_name()
'Greek'
But you can ask for language names in numerous other languages:
>>> Language.get('fr').language_name('fr')
'français'
>>> Language.get('el').language_name('fr')
'grec'
Why does everyone get Slovak and Slovenian confused? Let's ask them.
>>> Language.get('sl').language_name('sl')
'slovenščina'
>>> Language.get('sk').language_name('sk')
'slovenčina'
>>> Language.get('sl').language_name('sk')
'slovinčina'
>>> Language.get('sk').language_name('sl')
'slovaščina'
"""
return self._get_name('language', language, min_score) | Give the name of the language (not the entire tag, just the language part)
in a natural language. The target language can be given as a string or
another Language object.
By default, things are named in English:
>>> Language.get('fr').language_name()
'French'
>>> Language.get('el').language_name()
'Greek'
But you can ask for language names in numerous other languages:
>>> Language.get('fr').language_name('fr')
'français'
>>> Language.get('el').language_name('fr')
'grec'
Why does everyone get Slovak and Slovenian confused? Let's ask them.
>>> Language.get('sl').language_name('sl')
'slovenščina'
>>> Language.get('sk').language_name('sk')
'slovenčina'
>>> Language.get('sl').language_name('sk')
'slovinčina'
>>> Language.get('sk').language_name('sl')
'slovaščina' | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L578-L609 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language.autonym | def autonym(self, min_score: int=95) -> str:
"""
Give the name of this language *in* this language.
>>> Language.get('fr').autonym()
'français'
>>> Language.get('es').autonym()
'español'
>>> Language.get('ja').autonym()
'日本語'
This doesn't give the name of the region or script, but in some cases
the language can name itself in multiple scripts:
>>> Language.get('sr-Latn').autonym()
'srpski'
>>> Language.get('sr-Cyrl').autonym()
'српски'
>>> Language.get('pa').autonym()
'ਪੰਜਾਬੀ'
>>> Language.get('pa-Arab').autonym()
'پنجابی'
This only works for language codes that CLDR has locale data for. You
can't ask for the autonym of 'ja-Latn' and get 'nihongo'.
"""
return self.language_name(language=self, min_score=min_score) | python | def autonym(self, min_score: int=95) -> str:
"""
Give the name of this language *in* this language.
>>> Language.get('fr').autonym()
'français'
>>> Language.get('es').autonym()
'español'
>>> Language.get('ja').autonym()
'日本語'
This doesn't give the name of the region or script, but in some cases
the language can name itself in multiple scripts:
>>> Language.get('sr-Latn').autonym()
'srpski'
>>> Language.get('sr-Cyrl').autonym()
'српски'
>>> Language.get('pa').autonym()
'ਪੰਜਾਬੀ'
>>> Language.get('pa-Arab').autonym()
'پنجابی'
This only works for language codes that CLDR has locale data for. You
can't ask for the autonym of 'ja-Latn' and get 'nihongo'.
"""
return self.language_name(language=self, min_score=min_score) | Give the name of this language *in* this language.
>>> Language.get('fr').autonym()
'français'
>>> Language.get('es').autonym()
'español'
>>> Language.get('ja').autonym()
'日本語'
This doesn't give the name of the region or script, but in some cases
the language can name itself in multiple scripts:
>>> Language.get('sr-Latn').autonym()
'srpski'
>>> Language.get('sr-Cyrl').autonym()
'српски'
>>> Language.get('pa').autonym()
'ਪੰਜਾਬੀ'
>>> Language.get('pa-Arab').autonym()
'پنجابی'
This only works for language codes that CLDR has locale data for. You
can't ask for the autonym of 'ja-Latn' and get 'nihongo'. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L611-L637 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language.script_name | def script_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str:
"""
Describe the script part of the language tag in a natural language.
"""
return self._get_name('script', language, min_score) | python | def script_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str:
"""
Describe the script part of the language tag in a natural language.
"""
return self._get_name('script', language, min_score) | Describe the script part of the language tag in a natural language. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L639-L643 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language.region_name | def region_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str:
"""
Describe the region part of the language tag in a natural language.
"""
return self._get_name('region', language, min_score) | python | def region_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str:
"""
Describe the region part of the language tag in a natural language.
"""
return self._get_name('region', language, min_score) | Describe the region part of the language tag in a natural language. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L645-L649 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language.variant_names | def variant_names(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> list:
"""
Describe each of the variant parts of the language tag in a natural
language.
"""
names = []
for variant in self.variants:
var_names = code_to_names('variant', variant)
names.append(self._best_name(var_names, language, min_score))
return names | python | def variant_names(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> list:
"""
Describe each of the variant parts of the language tag in a natural
language.
"""
names = []
for variant in self.variants:
var_names = code_to_names('variant', variant)
names.append(self._best_name(var_names, language, min_score))
return names | Describe each of the variant parts of the language tag in a natural
language. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L651-L660 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language.describe | def describe(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> dict:
"""
Return a dictionary that describes a given language tag in a specified
natural language.
See `language_name` and related methods for more specific versions of this.
The desired `language` will in fact be matched against the available
options using the matching technique that this module provides. We can
illustrate many aspects of this by asking for a description of Shavian
script (a script devised by author George Bernard Shaw), and where you
might find it, in various languages.
>>> from pprint import pprint
>>> shaw = Language.make(script='Shaw').maximize()
>>> pprint(shaw.describe('en'))
{'language': 'English', 'region': 'United Kingdom', 'script': 'Shavian'}
>>> pprint(shaw.describe('fr'))
{'language': 'anglais', 'region': 'Royaume-Uni', 'script': 'shavien'}
>>> pprint(shaw.describe('es'))
{'language': 'inglés', 'region': 'Reino Unido', 'script': 'shaviano'}
>>> pprint(shaw.describe('pt'))
{'language': 'inglês', 'region': 'Reino Unido', 'script': 'shaviano'}
>>> pprint(shaw.describe('uk'))
{'language': 'англійська', 'region': 'Велика Британія', 'script': 'шоу'}
>>> pprint(shaw.describe('arb'))
{'language': 'الإنجليزية', 'region': 'المملكة المتحدة', 'script': 'الشواني'}
>>> pprint(shaw.describe('th'))
{'language': 'อังกฤษ', 'region': 'สหราชอาณาจักร', 'script': 'ซอเวียน'}
>>> pprint(shaw.describe('zh-Hans'))
{'language': '英语', 'region': '英国', 'script': '萧伯纳式文'}
>>> pprint(shaw.describe('zh-Hant'))
{'language': '英文', 'region': '英國', 'script': '簫柏納字符'}
>>> pprint(shaw.describe('ja'))
{'language': '英語', 'region': 'イギリス', 'script': 'ショー文字'}
When we don't have a localization for the language, we fall back on
'und', which just shows the language codes.
>>> pprint(shaw.describe('lol'))
{'language': 'en', 'region': 'GB', 'script': 'Shaw'}
Wait, is that a real language?
>>> pprint(Language.get('lol').maximize().describe())
{'language': 'Mongo', 'region': 'Congo - Kinshasa', 'script': 'Latin'}
"""
names = {}
if self.language:
names['language'] = self.language_name(language, min_score)
if self.script:
names['script'] = self.script_name(language, min_score)
if self.region:
names['region'] = self.region_name(language, min_score)
if self.variants:
names['variants'] = self.variant_names(language, min_score)
return names | python | def describe(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> dict:
"""
Return a dictionary that describes a given language tag in a specified
natural language.
See `language_name` and related methods for more specific versions of this.
The desired `language` will in fact be matched against the available
options using the matching technique that this module provides. We can
illustrate many aspects of this by asking for a description of Shavian
script (a script devised by author George Bernard Shaw), and where you
might find it, in various languages.
>>> from pprint import pprint
>>> shaw = Language.make(script='Shaw').maximize()
>>> pprint(shaw.describe('en'))
{'language': 'English', 'region': 'United Kingdom', 'script': 'Shavian'}
>>> pprint(shaw.describe('fr'))
{'language': 'anglais', 'region': 'Royaume-Uni', 'script': 'shavien'}
>>> pprint(shaw.describe('es'))
{'language': 'inglés', 'region': 'Reino Unido', 'script': 'shaviano'}
>>> pprint(shaw.describe('pt'))
{'language': 'inglês', 'region': 'Reino Unido', 'script': 'shaviano'}
>>> pprint(shaw.describe('uk'))
{'language': 'англійська', 'region': 'Велика Британія', 'script': 'шоу'}
>>> pprint(shaw.describe('arb'))
{'language': 'الإنجليزية', 'region': 'المملكة المتحدة', 'script': 'الشواني'}
>>> pprint(shaw.describe('th'))
{'language': 'อังกฤษ', 'region': 'สหราชอาณาจักร', 'script': 'ซอเวียน'}
>>> pprint(shaw.describe('zh-Hans'))
{'language': '英语', 'region': '英国', 'script': '萧伯纳式文'}
>>> pprint(shaw.describe('zh-Hant'))
{'language': '英文', 'region': '英國', 'script': '簫柏納字符'}
>>> pprint(shaw.describe('ja'))
{'language': '英語', 'region': 'イギリス', 'script': 'ショー文字'}
When we don't have a localization for the language, we fall back on
'und', which just shows the language codes.
>>> pprint(shaw.describe('lol'))
{'language': 'en', 'region': 'GB', 'script': 'Shaw'}
Wait, is that a real language?
>>> pprint(Language.get('lol').maximize().describe())
{'language': 'Mongo', 'region': 'Congo - Kinshasa', 'script': 'Latin'}
"""
names = {}
if self.language:
names['language'] = self.language_name(language, min_score)
if self.script:
names['script'] = self.script_name(language, min_score)
if self.region:
names['region'] = self.region_name(language, min_score)
if self.variants:
names['variants'] = self.variant_names(language, min_score)
return names | Return a dictionary that describes a given language tag in a specified
natural language.
See `language_name` and related methods for more specific versions of this.
The desired `language` will in fact be matched against the available
options using the matching technique that this module provides. We can
illustrate many aspects of this by asking for a description of Shavian
script (a script devised by author George Bernard Shaw), and where you
might find it, in various languages.
>>> from pprint import pprint
>>> shaw = Language.make(script='Shaw').maximize()
>>> pprint(shaw.describe('en'))
{'language': 'English', 'region': 'United Kingdom', 'script': 'Shavian'}
>>> pprint(shaw.describe('fr'))
{'language': 'anglais', 'region': 'Royaume-Uni', 'script': 'shavien'}
>>> pprint(shaw.describe('es'))
{'language': 'inglés', 'region': 'Reino Unido', 'script': 'shaviano'}
>>> pprint(shaw.describe('pt'))
{'language': 'inglês', 'region': 'Reino Unido', 'script': 'shaviano'}
>>> pprint(shaw.describe('uk'))
{'language': 'англійська', 'region': 'Велика Британія', 'script': 'шоу'}
>>> pprint(shaw.describe('arb'))
{'language': 'الإنجليزية', 'region': 'المملكة المتحدة', 'script': 'الشواني'}
>>> pprint(shaw.describe('th'))
{'language': 'อังกฤษ', 'region': 'สหราชอาณาจักร', 'script': 'ซอเวียน'}
>>> pprint(shaw.describe('zh-Hans'))
{'language': '英语', 'region': '英国', 'script': '萧伯纳式文'}
>>> pprint(shaw.describe('zh-Hant'))
{'language': '英文', 'region': '英國', 'script': '簫柏納字符'}
>>> pprint(shaw.describe('ja'))
{'language': '英語', 'region': 'イギリス', 'script': 'ショー文字'}
When we don't have a localization for the language, we fall back on
'und', which just shows the language codes.
>>> pprint(shaw.describe('lol'))
{'language': 'en', 'region': 'GB', 'script': 'Shaw'}
Wait, is that a real language?
>>> pprint(Language.get('lol').maximize().describe())
{'language': 'Mongo', 'region': 'Congo - Kinshasa', 'script': 'Latin'} | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L662-L727 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language.find_name | def find_name(tagtype: str, name: str, language: {str, 'Language', None}=None):
"""
Find the subtag of a particular `tagtype` that has the given `name`.
The default language, "und", will allow matching names in any language,
so you can get the code 'fr' by looking up "French", "Français", or
"francés".
Occasionally, names are ambiguous in a way that can be resolved by
specifying what name the language is supposed to be in. For example,
there is a language named 'Malayo' in English, but it's different from
the language named 'Malayo' in Spanish (which is Malay). Specifying the
language will look up the name in a trie that is only in that language.
In a previous version, we thought we were going to deprecate the
`language` parameter, as there weren't significant cases of conflicts
in names of things between languages. Well, we got more data, and
conflicts in names are everywhere.
Specifying the language that the name should be in is still not
required, but it will help to make sure that names can be
round-tripped.
>>> Language.find_name('language', 'francés')
Language.make(language='fr')
>>> Language.find_name('region', 'United Kingdom')
Language.make(region='GB')
>>> Language.find_name('script', 'Arabic')
Language.make(script='Arab')
>>> Language.find_name('language', 'norsk bokmål')
Language.make(language='nb')
>>> Language.find_name('language', 'norsk')
Language.make(language='no')
>>> Language.find_name('language', 'norsk', 'en')
Traceback (most recent call last):
...
LookupError: Can't find any language named 'norsk'
>>> Language.find_name('language', 'norsk', 'no')
Language.make(language='no')
>>> Language.find_name('language', 'malayo', 'en')
Language.make(language='mbp')
>>> Language.find_name('language', 'malayo', 'es')
Language.make(language='ms')
Some langauge names resolve to more than a language. For example,
the name 'Brazilian Portuguese' resolves to a language and a region,
and 'Simplified Chinese' resolves to a language and a script. In these
cases, a Language object with multiple subtags will be returned.
>>> Language.find_name('language', 'Brazilian Portuguese', 'en')
Language.make(language='pt', region='BR')
>>> Language.find_name('language', 'Simplified Chinese', 'en')
Language.make(language='zh', script='Hans')
A small amount of fuzzy matching is supported: if the name can be
shortened to match a single language name, you get that language.
This allows, for example, "Hakka dialect" to match "Hakka".
>>> Language.find_name('language', 'Hakka dialect')
Language.make(language='hak')
"""
# No matter what form of language we got, normalize it to a single
# language subtag
if isinstance(language, Language):
language = language.language
elif isinstance(language, str):
language = get(language).language
if language is None:
language = 'und'
code = name_to_code(tagtype, name, language)
if code is None:
raise LookupError("Can't find any %s named %r" % (tagtype, name))
if '-' in code:
return Language.get(code)
else:
data = {tagtype: code}
return Language.make(**data) | python | def find_name(tagtype: str, name: str, language: {str, 'Language', None}=None):
"""
Find the subtag of a particular `tagtype` that has the given `name`.
The default language, "und", will allow matching names in any language,
so you can get the code 'fr' by looking up "French", "Français", or
"francés".
Occasionally, names are ambiguous in a way that can be resolved by
specifying what name the language is supposed to be in. For example,
there is a language named 'Malayo' in English, but it's different from
the language named 'Malayo' in Spanish (which is Malay). Specifying the
language will look up the name in a trie that is only in that language.
In a previous version, we thought we were going to deprecate the
`language` parameter, as there weren't significant cases of conflicts
in names of things between languages. Well, we got more data, and
conflicts in names are everywhere.
Specifying the language that the name should be in is still not
required, but it will help to make sure that names can be
round-tripped.
>>> Language.find_name('language', 'francés')
Language.make(language='fr')
>>> Language.find_name('region', 'United Kingdom')
Language.make(region='GB')
>>> Language.find_name('script', 'Arabic')
Language.make(script='Arab')
>>> Language.find_name('language', 'norsk bokmål')
Language.make(language='nb')
>>> Language.find_name('language', 'norsk')
Language.make(language='no')
>>> Language.find_name('language', 'norsk', 'en')
Traceback (most recent call last):
...
LookupError: Can't find any language named 'norsk'
>>> Language.find_name('language', 'norsk', 'no')
Language.make(language='no')
>>> Language.find_name('language', 'malayo', 'en')
Language.make(language='mbp')
>>> Language.find_name('language', 'malayo', 'es')
Language.make(language='ms')
Some langauge names resolve to more than a language. For example,
the name 'Brazilian Portuguese' resolves to a language and a region,
and 'Simplified Chinese' resolves to a language and a script. In these
cases, a Language object with multiple subtags will be returned.
>>> Language.find_name('language', 'Brazilian Portuguese', 'en')
Language.make(language='pt', region='BR')
>>> Language.find_name('language', 'Simplified Chinese', 'en')
Language.make(language='zh', script='Hans')
A small amount of fuzzy matching is supported: if the name can be
shortened to match a single language name, you get that language.
This allows, for example, "Hakka dialect" to match "Hakka".
>>> Language.find_name('language', 'Hakka dialect')
Language.make(language='hak')
"""
# No matter what form of language we got, normalize it to a single
# language subtag
if isinstance(language, Language):
language = language.language
elif isinstance(language, str):
language = get(language).language
if language is None:
language = 'und'
code = name_to_code(tagtype, name, language)
if code is None:
raise LookupError("Can't find any %s named %r" % (tagtype, name))
if '-' in code:
return Language.get(code)
else:
data = {tagtype: code}
return Language.make(**data) | Find the subtag of a particular `tagtype` that has the given `name`.
The default language, "und", will allow matching names in any language,
so you can get the code 'fr' by looking up "French", "Français", or
"francés".
Occasionally, names are ambiguous in a way that can be resolved by
specifying what name the language is supposed to be in. For example,
there is a language named 'Malayo' in English, but it's different from
the language named 'Malayo' in Spanish (which is Malay). Specifying the
language will look up the name in a trie that is only in that language.
In a previous version, we thought we were going to deprecate the
`language` parameter, as there weren't significant cases of conflicts
in names of things between languages. Well, we got more data, and
conflicts in names are everywhere.
Specifying the language that the name should be in is still not
required, but it will help to make sure that names can be
round-tripped.
>>> Language.find_name('language', 'francés')
Language.make(language='fr')
>>> Language.find_name('region', 'United Kingdom')
Language.make(region='GB')
>>> Language.find_name('script', 'Arabic')
Language.make(script='Arab')
>>> Language.find_name('language', 'norsk bokmål')
Language.make(language='nb')
>>> Language.find_name('language', 'norsk')
Language.make(language='no')
>>> Language.find_name('language', 'norsk', 'en')
Traceback (most recent call last):
...
LookupError: Can't find any language named 'norsk'
>>> Language.find_name('language', 'norsk', 'no')
Language.make(language='no')
>>> Language.find_name('language', 'malayo', 'en')
Language.make(language='mbp')
>>> Language.find_name('language', 'malayo', 'es')
Language.make(language='ms')
Some langauge names resolve to more than a language. For example,
the name 'Brazilian Portuguese' resolves to a language and a region,
and 'Simplified Chinese' resolves to a language and a script. In these
cases, a Language object with multiple subtags will be returned.
>>> Language.find_name('language', 'Brazilian Portuguese', 'en')
Language.make(language='pt', region='BR')
>>> Language.find_name('language', 'Simplified Chinese', 'en')
Language.make(language='zh', script='Hans')
A small amount of fuzzy matching is supported: if the name can be
shortened to match a single language name, you get that language.
This allows, for example, "Hakka dialect" to match "Hakka".
>>> Language.find_name('language', 'Hakka dialect')
Language.make(language='hak') | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L730-L817 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language.to_dict | def to_dict(self):
"""
Get a dictionary of the attributes of this Language object, which
can be useful for constructing a similar object.
"""
if self._dict is not None:
return self._dict
result = {}
for key in self.ATTRIBUTES:
value = getattr(self, key)
if value:
result[key] = value
self._dict = result
return result | python | def to_dict(self):
"""
Get a dictionary of the attributes of this Language object, which
can be useful for constructing a similar object.
"""
if self._dict is not None:
return self._dict
result = {}
for key in self.ATTRIBUTES:
value = getattr(self, key)
if value:
result[key] = value
self._dict = result
return result | Get a dictionary of the attributes of this Language object, which
can be useful for constructing a similar object. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L845-L859 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language.update | def update(self, other: 'Language') -> 'Language':
"""
Update this Language with the fields of another Language.
"""
return Language.make(
language=other.language or self.language,
extlangs=other.extlangs or self.extlangs,
script=other.script or self.script,
region=other.region or self.region,
variants=other.variants or self.variants,
extensions=other.extensions or self.extensions,
private=other.private or self.private
) | python | def update(self, other: 'Language') -> 'Language':
"""
Update this Language with the fields of another Language.
"""
return Language.make(
language=other.language or self.language,
extlangs=other.extlangs or self.extlangs,
script=other.script or self.script,
region=other.region or self.region,
variants=other.variants or self.variants,
extensions=other.extensions or self.extensions,
private=other.private or self.private
) | Update this Language with the fields of another Language. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L861-L873 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language.update_dict | def update_dict(self, newdata: dict) -> 'Language':
"""
Update the attributes of this Language from a dictionary.
"""
return Language.make(
language=newdata.get('language', self.language),
extlangs=newdata.get('extlangs', self.extlangs),
script=newdata.get('script', self.script),
region=newdata.get('region', self.region),
variants=newdata.get('variants', self.variants),
extensions=newdata.get('extensions', self.extensions),
private=newdata.get('private', self.private)
) | python | def update_dict(self, newdata: dict) -> 'Language':
"""
Update the attributes of this Language from a dictionary.
"""
return Language.make(
language=newdata.get('language', self.language),
extlangs=newdata.get('extlangs', self.extlangs),
script=newdata.get('script', self.script),
region=newdata.get('region', self.region),
variants=newdata.get('variants', self.variants),
extensions=newdata.get('extensions', self.extensions),
private=newdata.get('private', self.private)
) | Update the attributes of this Language from a dictionary. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L875-L887 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language._filter_keys | def _filter_keys(d: dict, keys: set) -> dict:
"""
Select a subset of keys from a dictionary.
"""
return {key: d[key] for key in keys if key in d} | python | def _filter_keys(d: dict, keys: set) -> dict:
"""
Select a subset of keys from a dictionary.
"""
return {key: d[key] for key in keys if key in d} | Select a subset of keys from a dictionary. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L890-L894 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language._filter_attributes | def _filter_attributes(self, keyset):
"""
Return a copy of this object with a subset of its attributes set.
"""
filtered = self._filter_keys(self.to_dict(), keyset)
return Language.make(**filtered) | python | def _filter_attributes(self, keyset):
"""
Return a copy of this object with a subset of its attributes set.
"""
filtered = self._filter_keys(self.to_dict(), keyset)
return Language.make(**filtered) | Return a copy of this object with a subset of its attributes set. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L896-L901 |
LuminosoInsight/langcodes | langcodes/__init__.py | Language._searchable_form | def _searchable_form(self) -> 'Language':
"""
Convert a parsed language tag so that the information it contains is in
the best form for looking up information in the CLDR.
"""
if self._searchable is not None:
return self._searchable
self._searchable = self._filter_attributes(
{'language', 'script', 'region'}
).simplify_script().prefer_macrolanguage()
return self._searchable | python | def _searchable_form(self) -> 'Language':
"""
Convert a parsed language tag so that the information it contains is in
the best form for looking up information in the CLDR.
"""
if self._searchable is not None:
return self._searchable
self._searchable = self._filter_attributes(
{'language', 'script', 'region'}
).simplify_script().prefer_macrolanguage()
return self._searchable | Convert a parsed language tag so that the information it contains is in
the best form for looking up information in the CLDR. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/__init__.py#L903-L914 |
LuminosoInsight/langcodes | langcodes/build_data.py | resolve_name | def resolve_name(key, vals, debug=False):
"""
Given a name, and a number of possible values it could resolve to,
find the single value it should resolve to, in the following way:
- Apply the priority order
- If names with the highest priority all agree, use that name
- If there is disagreement that can be resolved by AMBIGUOUS_PREFERENCES,
use that
- Otherwise, don't resolve the name (and possibly show a debugging message
when building the data)
"""
max_priority = max([val[2] for val in vals])
val_count = Counter([val[1] for val in vals if val[2] == max_priority])
if len(val_count) == 1:
unanimous = val_count.most_common(1)
return unanimous[0][0]
for pkey in val_count:
if pkey in AMBIGUOUS_PREFERENCES:
others = set(val_count)
others.remove(pkey)
if others == others & AMBIGUOUS_PREFERENCES[pkey]:
if debug:
print("Resolved: {} -> {}".format(key, pkey))
return pkey
# In debug mode, show which languages vote for which name
if debug and max_priority >= 0:
votes = defaultdict(list)
for voter, val, prio in vals:
if prio == max_priority:
votes[val].append(voter)
print("{}:".format(key))
for val, voters in sorted(votes.items()):
print("\t{}: {}".format(val, ' '.join(voters)))
# Don't use names that remain ambiguous
return None | python | def resolve_name(key, vals, debug=False):
"""
Given a name, and a number of possible values it could resolve to,
find the single value it should resolve to, in the following way:
- Apply the priority order
- If names with the highest priority all agree, use that name
- If there is disagreement that can be resolved by AMBIGUOUS_PREFERENCES,
use that
- Otherwise, don't resolve the name (and possibly show a debugging message
when building the data)
"""
max_priority = max([val[2] for val in vals])
val_count = Counter([val[1] for val in vals if val[2] == max_priority])
if len(val_count) == 1:
unanimous = val_count.most_common(1)
return unanimous[0][0]
for pkey in val_count:
if pkey in AMBIGUOUS_PREFERENCES:
others = set(val_count)
others.remove(pkey)
if others == others & AMBIGUOUS_PREFERENCES[pkey]:
if debug:
print("Resolved: {} -> {}".format(key, pkey))
return pkey
# In debug mode, show which languages vote for which name
if debug and max_priority >= 0:
votes = defaultdict(list)
for voter, val, prio in vals:
if prio == max_priority:
votes[val].append(voter)
print("{}:".format(key))
for val, voters in sorted(votes.items()):
print("\t{}: {}".format(val, ' '.join(voters)))
# Don't use names that remain ambiguous
return None | Given a name, and a number of possible values it could resolve to,
find the single value it should resolve to, in the following way:
- Apply the priority order
- If names with the highest priority all agree, use that name
- If there is disagreement that can be resolved by AMBIGUOUS_PREFERENCES,
use that
- Otherwise, don't resolve the name (and possibly show a debugging message
when building the data) | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/build_data.py#L141-L180 |
LuminosoInsight/langcodes | langcodes/build_data.py | read_cldr_names | def read_cldr_names(path, language, category):
"""
Read CLDR's names for things in a particular language.
"""
filename = data_filename('{}/{}/{}.json'.format(path, language, category))
fulldata = json.load(open(filename, encoding='utf-8'))
data = fulldata['main'][language]['localeDisplayNames'][category]
return data | python | def read_cldr_names(path, language, category):
"""
Read CLDR's names for things in a particular language.
"""
filename = data_filename('{}/{}/{}.json'.format(path, language, category))
fulldata = json.load(open(filename, encoding='utf-8'))
data = fulldata['main'][language]['localeDisplayNames'][category]
return data | Read CLDR's names for things in a particular language. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/build_data.py#L192-L199 |
LuminosoInsight/langcodes | langcodes/registry_parser.py | parse_file | def parse_file(file):
"""
Take an open file containing the IANA subtag registry, and yield a
dictionary of information for each subtag it describes.
"""
lines = []
for line in file:
line = line.rstrip('\n')
if line == '%%':
# This is a separator between items. Parse the data we've
# collected and yield the result.
yield from parse_item(lines)
lines.clear()
elif line.startswith(' '):
# This is a continuation line. Concatenate it to the previous
# line, including one of the spaces.
lines[-1] += line[1:]
else:
lines.append(line)
yield from parse_item(lines) | python | def parse_file(file):
"""
Take an open file containing the IANA subtag registry, and yield a
dictionary of information for each subtag it describes.
"""
lines = []
for line in file:
line = line.rstrip('\n')
if line == '%%':
# This is a separator between items. Parse the data we've
# collected and yield the result.
yield from parse_item(lines)
lines.clear()
elif line.startswith(' '):
# This is a continuation line. Concatenate it to the previous
# line, including one of the spaces.
lines[-1] += line[1:]
else:
lines.append(line)
yield from parse_item(lines) | Take an open file containing the IANA subtag registry, and yield a
dictionary of information for each subtag it describes. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/registry_parser.py#L6-L25 |
LuminosoInsight/langcodes | langcodes/registry_parser.py | parse_item | def parse_item(lines):
"""
Given the lines that form a subtag entry (after joining wrapped lines
back together), parse the data they contain.
Returns a generator that yields once if there was any data there
(and an empty generator if this was just the header).
"""
info = {}
for line in lines:
key, value = line.split(': ', 1)
if key in LIST_KEYS:
info.setdefault(key, []).append(value)
else:
assert key not in info
info[key] = value
if 'Subtag' in info or 'Tag' in info:
yield info | python | def parse_item(lines):
"""
Given the lines that form a subtag entry (after joining wrapped lines
back together), parse the data they contain.
Returns a generator that yields once if there was any data there
(and an empty generator if this was just the header).
"""
info = {}
for line in lines:
key, value = line.split(': ', 1)
if key in LIST_KEYS:
info.setdefault(key, []).append(value)
else:
assert key not in info
info[key] = value
if 'Subtag' in info or 'Tag' in info:
yield info | Given the lines that form a subtag entry (after joining wrapped lines
back together), parse the data they contain.
Returns a generator that yields once if there was any data there
(and an empty generator if this was just the header). | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/registry_parser.py#L28-L46 |
LuminosoInsight/langcodes | langcodes/distance.py | _make_simple_distances | def _make_simple_distances():
"""
This is a translation of the non-wildcard rules in
http://www.unicode.org/cldr/charts/29/supplemental/language_matching.html.
It defines a few functions to make the chart easy to mindlessly transcribe,
instead of having to parse it out of idiosyncratic XML or HTML, which
actually seems harder.
"""
distances = {}
def sym(desired, supported, strength):
"Define a symmetric distance between languages."
desired_t = tuple(desired.split('-'))
supported_t = tuple(supported.split('-'))
distances[desired_t, supported_t] = strength
distances[supported_t, desired_t] = strength
def one(desired, supported, strength):
"Define a one-way distance between languages."
desired_t = tuple(desired.split('-'))
supported_t = tuple(supported.split('-'))
distances[desired_t, supported_t] = strength
def ok(desired, supported):
"Define the most common type of link: a one-way distance of 10."
one(desired, supported, 10)
sym('no', 'nb', 1)
sym('hr', 'bs', 4)
sym('sh', 'bs', 4)
sym('sr', 'bs', 4)
sym('sh', 'hr', 4)
sym('sr', 'hr', 4)
sym('sh', 'sr', 4)
sym('ssy', 'aa', 4)
one('gsw', 'de', 4)
one('lb', 'de', 4)
sym('da', 'no', 8)
sym('da', 'nb', 8)
ok('ab', 'ru')
ok('ach', 'en')
ok('af', 'nl')
ok('ak', 'en')
ok('ay', 'es')
ok('az', 'ru')
ok('az-Latn', 'ru-Cyrl')
ok('be', 'ru')
ok('bem', 'en')
ok('bh', 'hi')
ok('bn', 'en')
ok('bn-Beng', 'en-Latn')
ok('br', 'fr')
ok('ceb', 'fil')
ok('chr', 'en')
ok('ckb', 'ar')
ok('co', 'fr')
ok('crs', 'fr')
ok('cy', 'en')
ok('ee', 'en')
ok('eo', 'en')
ok('et', 'fi')
ok('eu', 'es')
ok('fo', 'da')
ok('fy', 'nl')
ok('ga', 'en')
ok('gaa', 'en')
ok('gd', 'en')
ok('gl', 'es')
ok('gn', 'es')
ok('gu', 'hi')
ok('ha', 'en')
ok('haw', 'en')
ok('ht', 'fr')
ok('hy', 'ru')
ok('hy-Armn', 'ru-Cyrl')
ok('ia', 'en')
ok('ig', 'en')
ok('is', 'en')
ok('jv', 'id')
ok('ka-Geor', 'en-Latn')
ok('ka', 'en')
ok('kg', 'fr')
ok('kk', 'ru')
ok('km', 'en')
ok('km-Khmr', 'en-Latn')
ok('kn', 'en')
ok('kn-Knda', 'en-Latn')
ok('kri', 'en')
ok('ku', 'tr')
ok('ky', 'ru')
ok('la', 'it')
ok('lg', 'en')
ok('ln', 'fr')
ok('lo', 'en')
ok('lo-Laoo', 'en-Latn')
ok('loz', 'en')
ok('lua', 'fr')
ok('mfe', 'en')
ok('mg', 'fr')
ok('mi', 'en')
ok('mk', 'bg')
ok('ml', 'en')
ok('ml-Mlym', 'en-Latn')
ok('mn', 'ru')
ok('mr', 'hi')
ok('ms', 'id')
ok('mt', 'en')
ok('my', 'en')
ok('my-Mymr', 'en-Latn')
ok('ne', 'en')
ok('ne-Deva', 'en-Latn')
sym('nn', 'nb', 10)
ok('nn', 'no')
ok('nso', 'en')
ok('ny', 'en')
ok('nyn', 'en')
ok('oc', 'fr')
ok('om', 'en')
ok('or', 'en')
ok('or-Orya', 'en-Latn')
ok('pa', 'en')
ok('pa-Guru', 'en-Latn')
ok('pcm', 'en')
ok('ps', 'en')
ok('ps-Arab', 'en-Latn')
ok('qu', 'es')
ok('rm', 'de')
ok('rn', 'en')
ok('rw', 'fr')
ok('sa', 'hi')
ok('sd', 'en')
ok('sd-Arab', 'en-Latn')
ok('si', 'en')
ok('si-Sinh', 'en-Latn')
ok('sn', 'en')
ok('so', 'en')
ok('sq', 'en')
ok('st', 'en')
ok('su', 'id')
ok('sw', 'en')
ok('ta', 'en')
ok('ta-Taml', 'en-Latn')
ok('te', 'en')
ok('te-Telu', 'en-Latn')
ok('tg', 'ru')
ok('ti', 'en')
ok('ti-Ethi', 'en-Latn')
ok('tk', 'ru')
ok('tk-Latn', 'ru-Cyrl')
ok('tlh', 'en')
ok('tn', 'en')
ok('to', 'en')
ok('tt', 'ru')
ok('tum', 'en')
ok('ug', 'zh')
ok('ur', 'en')
ok('ur-Arab', 'en-Latn')
ok('uz', 'ru')
ok('uz-Latn', 'ru-Cyrl')
ok('wo', 'fr')
ok('xh', 'en')
ok('yi', 'en')
ok('yi-Hebr', 'en-Latn')
ok('yo', 'en')
ok('zu', 'en')
sym('sr-Latn', 'sr-Cyrl', 5)
one('zh-Hans', 'zh-Hant', 15)
one('zh-Hant', 'zh-Hans', 19)
sym('zh-Hant-HK', 'zh-Hant-MO', 3)
return distances | python | def _make_simple_distances():
"""
This is a translation of the non-wildcard rules in
http://www.unicode.org/cldr/charts/29/supplemental/language_matching.html.
It defines a few functions to make the chart easy to mindlessly transcribe,
instead of having to parse it out of idiosyncratic XML or HTML, which
actually seems harder.
"""
distances = {}
def sym(desired, supported, strength):
"Define a symmetric distance between languages."
desired_t = tuple(desired.split('-'))
supported_t = tuple(supported.split('-'))
distances[desired_t, supported_t] = strength
distances[supported_t, desired_t] = strength
def one(desired, supported, strength):
"Define a one-way distance between languages."
desired_t = tuple(desired.split('-'))
supported_t = tuple(supported.split('-'))
distances[desired_t, supported_t] = strength
def ok(desired, supported):
"Define the most common type of link: a one-way distance of 10."
one(desired, supported, 10)
sym('no', 'nb', 1)
sym('hr', 'bs', 4)
sym('sh', 'bs', 4)
sym('sr', 'bs', 4)
sym('sh', 'hr', 4)
sym('sr', 'hr', 4)
sym('sh', 'sr', 4)
sym('ssy', 'aa', 4)
one('gsw', 'de', 4)
one('lb', 'de', 4)
sym('da', 'no', 8)
sym('da', 'nb', 8)
ok('ab', 'ru')
ok('ach', 'en')
ok('af', 'nl')
ok('ak', 'en')
ok('ay', 'es')
ok('az', 'ru')
ok('az-Latn', 'ru-Cyrl')
ok('be', 'ru')
ok('bem', 'en')
ok('bh', 'hi')
ok('bn', 'en')
ok('bn-Beng', 'en-Latn')
ok('br', 'fr')
ok('ceb', 'fil')
ok('chr', 'en')
ok('ckb', 'ar')
ok('co', 'fr')
ok('crs', 'fr')
ok('cy', 'en')
ok('ee', 'en')
ok('eo', 'en')
ok('et', 'fi')
ok('eu', 'es')
ok('fo', 'da')
ok('fy', 'nl')
ok('ga', 'en')
ok('gaa', 'en')
ok('gd', 'en')
ok('gl', 'es')
ok('gn', 'es')
ok('gu', 'hi')
ok('ha', 'en')
ok('haw', 'en')
ok('ht', 'fr')
ok('hy', 'ru')
ok('hy-Armn', 'ru-Cyrl')
ok('ia', 'en')
ok('ig', 'en')
ok('is', 'en')
ok('jv', 'id')
ok('ka-Geor', 'en-Latn')
ok('ka', 'en')
ok('kg', 'fr')
ok('kk', 'ru')
ok('km', 'en')
ok('km-Khmr', 'en-Latn')
ok('kn', 'en')
ok('kn-Knda', 'en-Latn')
ok('kri', 'en')
ok('ku', 'tr')
ok('ky', 'ru')
ok('la', 'it')
ok('lg', 'en')
ok('ln', 'fr')
ok('lo', 'en')
ok('lo-Laoo', 'en-Latn')
ok('loz', 'en')
ok('lua', 'fr')
ok('mfe', 'en')
ok('mg', 'fr')
ok('mi', 'en')
ok('mk', 'bg')
ok('ml', 'en')
ok('ml-Mlym', 'en-Latn')
ok('mn', 'ru')
ok('mr', 'hi')
ok('ms', 'id')
ok('mt', 'en')
ok('my', 'en')
ok('my-Mymr', 'en-Latn')
ok('ne', 'en')
ok('ne-Deva', 'en-Latn')
sym('nn', 'nb', 10)
ok('nn', 'no')
ok('nso', 'en')
ok('ny', 'en')
ok('nyn', 'en')
ok('oc', 'fr')
ok('om', 'en')
ok('or', 'en')
ok('or-Orya', 'en-Latn')
ok('pa', 'en')
ok('pa-Guru', 'en-Latn')
ok('pcm', 'en')
ok('ps', 'en')
ok('ps-Arab', 'en-Latn')
ok('qu', 'es')
ok('rm', 'de')
ok('rn', 'en')
ok('rw', 'fr')
ok('sa', 'hi')
ok('sd', 'en')
ok('sd-Arab', 'en-Latn')
ok('si', 'en')
ok('si-Sinh', 'en-Latn')
ok('sn', 'en')
ok('so', 'en')
ok('sq', 'en')
ok('st', 'en')
ok('su', 'id')
ok('sw', 'en')
ok('ta', 'en')
ok('ta-Taml', 'en-Latn')
ok('te', 'en')
ok('te-Telu', 'en-Latn')
ok('tg', 'ru')
ok('ti', 'en')
ok('ti-Ethi', 'en-Latn')
ok('tk', 'ru')
ok('tk-Latn', 'ru-Cyrl')
ok('tlh', 'en')
ok('tn', 'en')
ok('to', 'en')
ok('tt', 'ru')
ok('tum', 'en')
ok('ug', 'zh')
ok('ur', 'en')
ok('ur-Arab', 'en-Latn')
ok('uz', 'ru')
ok('uz-Latn', 'ru-Cyrl')
ok('wo', 'fr')
ok('xh', 'en')
ok('yi', 'en')
ok('yi-Hebr', 'en-Latn')
ok('yo', 'en')
ok('zu', 'en')
sym('sr-Latn', 'sr-Cyrl', 5)
one('zh-Hans', 'zh-Hant', 15)
one('zh-Hant', 'zh-Hans', 19)
sym('zh-Hant-HK', 'zh-Hant-MO', 3)
return distances | This is a translation of the non-wildcard rules in
http://www.unicode.org/cldr/charts/29/supplemental/language_matching.html.
It defines a few functions to make the chart easy to mindlessly transcribe,
instead of having to parse it out of idiosyncratic XML or HTML, which
actually seems harder. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/distance.py#L12-L182 |
LuminosoInsight/langcodes | langcodes/names.py | normalize_name | def normalize_name(name):
"""
When looking up a language-code component by name, we would rather ignore
distinctions of case and certain punctuation. "Chinese (Traditional)"
should be matched by "Chinese Traditional" and "chinese traditional".
"""
name = name.casefold()
name = name.replace("’", "'")
name = name.replace("-", " ")
name = name.replace("(", "")
name = name.replace(")", "")
name = name.replace(",", "")
return name.strip() | python | def normalize_name(name):
"""
When looking up a language-code component by name, we would rather ignore
distinctions of case and certain punctuation. "Chinese (Traditional)"
should be matched by "Chinese Traditional" and "chinese traditional".
"""
name = name.casefold()
name = name.replace("’", "'")
name = name.replace("-", " ")
name = name.replace("(", "")
name = name.replace(")", "")
name = name.replace(",", "")
return name.strip() | When looking up a language-code component by name, we would rather ignore
distinctions of case and certain punctuation. "Chinese (Traditional)"
should be matched by "Chinese Traditional" and "chinese traditional". | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/names.py#L12-L24 |
LuminosoInsight/langcodes | langcodes/names.py | load_trie | def load_trie(filename):
"""
Load a BytesTrie from the marisa_trie on-disk format.
"""
trie = marisa_trie.BytesTrie()
# marisa_trie raises warnings that make no sense. Ignore them.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
trie.load(filename)
return trie | python | def load_trie(filename):
"""
Load a BytesTrie from the marisa_trie on-disk format.
"""
trie = marisa_trie.BytesTrie()
# marisa_trie raises warnings that make no sense. Ignore them.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
trie.load(filename)
return trie | Load a BytesTrie from the marisa_trie on-disk format. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/names.py#L27-L36 |
LuminosoInsight/langcodes | langcodes/names.py | name_to_code | def name_to_code(category, name, language: str='und'):
"""
Get a language, script, or region by its name in some language.
The language here must be a string representing a language subtag only.
The `Language.find` method can handle other representations of a language
and normalize them to this form.
The default language, "und", will allow matching names in any language,
so you can get the code 'fr' by looking up "French", "Français", or
"francés".
A small amount of fuzzy matching is supported: if the name can be
shortened or lengthened to match a single language name, you get that
language. This allows, for example, "Hakka Chinese" to match "Hakka".
Occasionally, names are ambiguous in a way that can be resolved by
specifying what name the language is supposed to be in. For example,
there is a language named 'Malayo' in English, but it's different from
the language named 'Malayo' in Spanish (which is Malay). Specifying the
language will look up the name in a trie that is only in that language.
"""
assert '/' not in language, "Language codes cannot contain slashes"
assert '-' not in language, "This code should be reduced to a language subtag only"
trie_name = '{}/name_to_{}'.format(language, category)
if trie_name not in TRIES:
TRIES[trie_name] = load_trie(data_filename('trie/{}.marisa'.format(trie_name)))
trie = TRIES[trie_name]
lookup = normalize_name(name)
if lookup in trie:
return get_trie_value(trie, lookup)
else:
# Is this a language plus extra junk? Maybe it has "...isch", "... language",
# or "... Chinese" attached to it, for example.
prefixes = trie.prefixes(lookup)
if prefixes and len(prefixes[-1]) >= 4:
return get_trie_value(trie, prefixes[-1])
else:
return None | python | def name_to_code(category, name, language: str='und'):
"""
Get a language, script, or region by its name in some language.
The language here must be a string representing a language subtag only.
The `Language.find` method can handle other representations of a language
and normalize them to this form.
The default language, "und", will allow matching names in any language,
so you can get the code 'fr' by looking up "French", "Français", or
"francés".
A small amount of fuzzy matching is supported: if the name can be
shortened or lengthened to match a single language name, you get that
language. This allows, for example, "Hakka Chinese" to match "Hakka".
Occasionally, names are ambiguous in a way that can be resolved by
specifying what name the language is supposed to be in. For example,
there is a language named 'Malayo' in English, but it's different from
the language named 'Malayo' in Spanish (which is Malay). Specifying the
language will look up the name in a trie that is only in that language.
"""
assert '/' not in language, "Language codes cannot contain slashes"
assert '-' not in language, "This code should be reduced to a language subtag only"
trie_name = '{}/name_to_{}'.format(language, category)
if trie_name not in TRIES:
TRIES[trie_name] = load_trie(data_filename('trie/{}.marisa'.format(trie_name)))
trie = TRIES[trie_name]
lookup = normalize_name(name)
if lookup in trie:
return get_trie_value(trie, lookup)
else:
# Is this a language plus extra junk? Maybe it has "...isch", "... language",
# or "... Chinese" attached to it, for example.
prefixes = trie.prefixes(lookup)
if prefixes and len(prefixes[-1]) >= 4:
return get_trie_value(trie, prefixes[-1])
else:
return None | Get a language, script, or region by its name in some language.
The language here must be a string representing a language subtag only.
The `Language.find` method can handle other representations of a language
and normalize them to this form.
The default language, "und", will allow matching names in any language,
so you can get the code 'fr' by looking up "French", "Français", or
"francés".
A small amount of fuzzy matching is supported: if the name can be
shortened or lengthened to match a single language name, you get that
language. This allows, for example, "Hakka Chinese" to match "Hakka".
Occasionally, names are ambiguous in a way that can be resolved by
specifying what name the language is supposed to be in. For example,
there is a language named 'Malayo' in English, but it's different from
the language named 'Malayo' in Spanish (which is Malay). Specifying the
language will look up the name in a trie that is only in that language. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/names.py#L47-L86 |
LuminosoInsight/langcodes | langcodes/names.py | code_to_names | def code_to_names(category, code):
"""
Given the code for a language, script, or region, get a dictionary of its
names in various languages.
"""
trie_name = '{}_to_name'.format(category)
if trie_name not in TRIES:
TRIES[trie_name] = load_trie(data_filename('trie/{}.marisa'.format(trie_name)))
trie = TRIES[trie_name]
lookup = code.lower() + '@'
possible_keys = trie.keys(lookup)
names = {}
for key in possible_keys:
target_language = key.split('@')[1]
names[target_language] = get_trie_value(trie, key)
return names | python | def code_to_names(category, code):
"""
Given the code for a language, script, or region, get a dictionary of its
names in various languages.
"""
trie_name = '{}_to_name'.format(category)
if trie_name not in TRIES:
TRIES[trie_name] = load_trie(data_filename('trie/{}.marisa'.format(trie_name)))
trie = TRIES[trie_name]
lookup = code.lower() + '@'
possible_keys = trie.keys(lookup)
names = {}
for key in possible_keys:
target_language = key.split('@')[1]
names[target_language] = get_trie_value(trie, key)
return names | Given the code for a language, script, or region, get a dictionary of its
names in various languages. | https://github.com/LuminosoInsight/langcodes/blob/0cedf9ca257ebf7250de5d3a63ec33a7d198db58/langcodes/names.py#L89-L105 |
Ezhil-Language-Foundation/open-tamil | tamil/txt2unicode/unicode2encode.py | unicode2encode | def unicode2encode(text, charmap):
'''
charmap : dictionary which has both encode as key, unicode as value
'''
if isinstance(text, (list, tuple)):
unitxt = ''
for line in text:
for val,key in charmap.items():
if key in line:
line = line.replace(key, val)
# end of if val in text:
unitxt += line
# end of for line in text:
return unitxt
elif isinstance(text, str):
for val,key in charmap.items():
if key in text:
text = text.replace(key, val)
return text | python | def unicode2encode(text, charmap):
'''
charmap : dictionary which has both encode as key, unicode as value
'''
if isinstance(text, (list, tuple)):
unitxt = ''
for line in text:
for val,key in charmap.items():
if key in line:
line = line.replace(key, val)
# end of if val in text:
unitxt += line
# end of for line in text:
return unitxt
elif isinstance(text, str):
for val,key in charmap.items():
if key in text:
text = text.replace(key, val)
return text | charmap : dictionary which has both encode as key, unicode as value | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/txt2unicode/unicode2encode.py#L47-L65 |
Ezhil-Language-Foundation/open-tamil | tamil/txt2unicode/unicode2encode.py | unicode2auto | def unicode2auto(unicode_text, encode_text):
"""
This function will convert unicode (first argument) text into other
encodes by auto find the encode (from available encodes) by using sample
encode text in second argument of this function.
unicode_text : Pass unicode string which has to convert into other encode.
encode_text : Pass sample encode string to identify suitable encode for it.
This function tries to identify encode in available encodings.
If it finds, then it will convert unicode_text into encode string.
Author : Arulalan.T
08.08.2014
"""
_all_unique_encodes_, _all_common_encodes_ = _get_unique_common_encodes()
# get unique word which falls under any one of available encodes from
# user passed text lines
unique_chars = _get_unique_ch(encode_text, _all_common_encodes_)
# count common encode chars
clen = len(_all_common_encodes_)
msg = "Sorry, couldn't find encode :-(\n"
msg += 'Need more words to find unique encode out side of %d ' % clen
msg += 'common compound characters'
if not unique_chars:
print(msg)
return ''
# end of if not unique_chars:
for encode_name, encode_keys in _all_unique_encodes_:
if not len(encode_keys): continue
for ch in encode_keys:
# check either encode char is presnent in word
if ch in unique_chars:
# found encode
print(("Found encode : ", encode_name))
encode = _all_encodes_[encode_name]
return unicode2encode(unicode_text, encode)
# end of if ch in unique_chars:
# end of ifor ch in encode_keys:
else:
print(msg)
return '' | python | def unicode2auto(unicode_text, encode_text):
"""
This function will convert unicode (first argument) text into other
encodes by auto find the encode (from available encodes) by using sample
encode text in second argument of this function.
unicode_text : Pass unicode string which has to convert into other encode.
encode_text : Pass sample encode string to identify suitable encode for it.
This function tries to identify encode in available encodings.
If it finds, then it will convert unicode_text into encode string.
Author : Arulalan.T
08.08.2014
"""
_all_unique_encodes_, _all_common_encodes_ = _get_unique_common_encodes()
# get unique word which falls under any one of available encodes from
# user passed text lines
unique_chars = _get_unique_ch(encode_text, _all_common_encodes_)
# count common encode chars
clen = len(_all_common_encodes_)
msg = "Sorry, couldn't find encode :-(\n"
msg += 'Need more words to find unique encode out side of %d ' % clen
msg += 'common compound characters'
if not unique_chars:
print(msg)
return ''
# end of if not unique_chars:
for encode_name, encode_keys in _all_unique_encodes_:
if not len(encode_keys): continue
for ch in encode_keys:
# check either encode char is presnent in word
if ch in unique_chars:
# found encode
print(("Found encode : ", encode_name))
encode = _all_encodes_[encode_name]
return unicode2encode(unicode_text, encode)
# end of if ch in unique_chars:
# end of ifor ch in encode_keys:
else:
print(msg)
return '' | This function will convert unicode (first argument) text into other
encodes by auto find the encode (from available encodes) by using sample
encode text in second argument of this function.
unicode_text : Pass unicode string which has to convert into other encode.
encode_text : Pass sample encode string to identify suitable encode for it.
This function tries to identify encode in available encodings.
If it finds, then it will convert unicode_text into encode string.
Author : Arulalan.T
08.08.2014 | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/txt2unicode/unicode2encode.py#L151-L196 |
Ezhil-Language-Foundation/open-tamil | examples/tamilmorse/huffman.py | print_huffman_code_cwl | def print_huffman_code_cwl(code,p,v):
""" code - code dictionary with symbol -> code map, p, v is probability map """
cwl = 0.0
for k,_v in code.items():
print(u"%s -> %s"%(k,_v))
cwl += p[v.index(k)]*len(_v)
print(u"cwl = %g"%cwl)
return cwl,code.values() | python | def print_huffman_code_cwl(code,p,v):
""" code - code dictionary with symbol -> code map, p, v is probability map """
cwl = 0.0
for k,_v in code.items():
print(u"%s -> %s"%(k,_v))
cwl += p[v.index(k)]*len(_v)
print(u"cwl = %g"%cwl)
return cwl,code.values() | code - code dictionary with symbol -> code map, p, v is probability map | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/examples/tamilmorse/huffman.py#L77-L84 |
Ezhil-Language-Foundation/open-tamil | ngram/Distance.py | edit_distance | def edit_distance(wordA,wordB):
"""" Implements Daegmar-Levenshtein edit distance algorithm:
Ref: https://en.wikipedia.org/wiki/Edit_distance
Ref: https://en.wikipedia.org/wiki/Levenshtein_distance"""
if not type(wordA) is list:
lettersA = tamil.utf8.get_letters(wordA)
else:
lettersA = wordA
if not type(wordB) is list:
lettersB = tamil.utf8.get_letters(wordB)
else:
lettersB = wordB
n_A = len(lettersA)
n_B = len(lettersB)
dist_AB = [[0 for i in range(0,n_B+1)] for i in range(0,(n_A+1))]
# Target prefix reached by insertion
for j in range(1,n_B+1):
dist_AB[0][j] = j
for i in range(1,n_A+1):
dist_AB[i][0] = i
for j in range(1,n_B+1):
for i in range(1,n_A+1):
if (lettersA[i-1] == lettersB[j-1]):
new_dist = dist_AB[i-1][j-1]
else:
new_dist = min( [dist_AB[i-1][j]+1, dist_AB[i][j-1]+1, dist_AB[i-1][j-1]+1] ) #del, ins, or sub
dist_AB[i][j] = new_dist
return dist_AB[-1][-1] | python | def edit_distance(wordA,wordB):
"""" Implements Daegmar-Levenshtein edit distance algorithm:
Ref: https://en.wikipedia.org/wiki/Edit_distance
Ref: https://en.wikipedia.org/wiki/Levenshtein_distance"""
if not type(wordA) is list:
lettersA = tamil.utf8.get_letters(wordA)
else:
lettersA = wordA
if not type(wordB) is list:
lettersB = tamil.utf8.get_letters(wordB)
else:
lettersB = wordB
n_A = len(lettersA)
n_B = len(lettersB)
dist_AB = [[0 for i in range(0,n_B+1)] for i in range(0,(n_A+1))]
# Target prefix reached by insertion
for j in range(1,n_B+1):
dist_AB[0][j] = j
for i in range(1,n_A+1):
dist_AB[i][0] = i
for j in range(1,n_B+1):
for i in range(1,n_A+1):
if (lettersA[i-1] == lettersB[j-1]):
new_dist = dist_AB[i-1][j-1]
else:
new_dist = min( [dist_AB[i-1][j]+1, dist_AB[i][j-1]+1, dist_AB[i-1][j-1]+1] ) #del, ins, or sub
dist_AB[i][j] = new_dist
return dist_AB[-1][-1] | Implements Daegmar-Levenshtein edit distance algorithm:
Ref: https://en.wikipedia.org/wiki/Edit_distance
Ref: https://en.wikipedia.org/wiki/Levenshtein_distance | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/ngram/Distance.py#L10-L38 |
Ezhil-Language-Foundation/open-tamil | ngram/Distance.py | Dice_coeff | def Dice_coeff(wordA,wordB):
"""
# Calculate edit-distance - Implements the Dice coefficent
# Ref: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient
# distance should be between 0 - 1.0. can be used as a similarity match
"""
if not type(wordA) is list:
lettersA = tamil.utf8.get_letters(wordA)
else:
lettersA = wordA
if not type(wordB) is list:
lettersB = tamil.utf8.get_letters(wordB)
else:
lettersB = wordB
n_A = len(lettersA)
n_B = len(lettersB)
# OK only if unique - set(lettersA).intersection(set(lettersB))
n_AB = len( list( filter( lambda cmnL: cmnL in lettersB, lettersA) ) )
return (2.0*n_AB)/(n_A+n_B) | python | def Dice_coeff(wordA,wordB):
"""
# Calculate edit-distance - Implements the Dice coefficent
# Ref: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient
# distance should be between 0 - 1.0. can be used as a similarity match
"""
if not type(wordA) is list:
lettersA = tamil.utf8.get_letters(wordA)
else:
lettersA = wordA
if not type(wordB) is list:
lettersB = tamil.utf8.get_letters(wordB)
else:
lettersB = wordB
n_A = len(lettersA)
n_B = len(lettersB)
# OK only if unique - set(lettersA).intersection(set(lettersB))
n_AB = len( list( filter( lambda cmnL: cmnL in lettersB, lettersA) ) )
return (2.0*n_AB)/(n_A+n_B) | # Calculate edit-distance - Implements the Dice coefficent
# Ref: https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient
# distance should be between 0 - 1.0. can be used as a similarity match | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/ngram/Distance.py#L43-L62 |
Ezhil-Language-Foundation/open-tamil | spell/spell.py | OttruSplit.generate_splits | def generate_splits(self):
"""
யாரிகழ்ந்து =
[['ய்', 'ஆரிகழ்ந்து'],
['யார்', 'இகழ்ந்து'],
['யாரிக்', 'அழ்ந்து'],
['யாரிகழ்ந்த்', 'உ']]
"""
L = len(self.letters)-1
for idx,letter in enumerate(self.letters):
if not( letter in tamil.utf8.grantha_uyirmei_letters):
continue
muthal = idx == 0 and u"" or u"".join(self.letters[0:idx])
meethi = idx == L and u"" or u"".join(self.letters[idx+1:])
mei,uyir = tamil.utf8.splitMeiUyir(letter)
muthal = muthal + mei
meethi = uyir + meethi
self.results.append([muthal,meethi])
return len(self.results) > 0 | python | def generate_splits(self):
"""
யாரிகழ்ந்து =
[['ய்', 'ஆரிகழ்ந்து'],
['யார்', 'இகழ்ந்து'],
['யாரிக்', 'அழ்ந்து'],
['யாரிகழ்ந்த்', 'உ']]
"""
L = len(self.letters)-1
for idx,letter in enumerate(self.letters):
if not( letter in tamil.utf8.grantha_uyirmei_letters):
continue
muthal = idx == 0 and u"" or u"".join(self.letters[0:idx])
meethi = idx == L and u"" or u"".join(self.letters[idx+1:])
mei,uyir = tamil.utf8.splitMeiUyir(letter)
muthal = muthal + mei
meethi = uyir + meethi
self.results.append([muthal,meethi])
return len(self.results) > 0 | யாரிகழ்ந்து =
[['ய்', 'ஆரிகழ்ந்து'],
['யார்', 'இகழ்ந்து'],
['யாரிக்', 'அழ்ந்து'],
['யாரிகழ்ந்த்', 'உ']] | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/spell/spell.py#L78-L96 |
Ezhil-Language-Foundation/open-tamil | solthiruthi/typographical.py | oridam_generate_patterns | def oridam_generate_patterns(word_in,cm,ed=1,level=0,pos=0,candidates=None):
""" ed = 1 by default, pos - internal variable for algorithm """
alternates = cm.get(word_in[pos],[])
if not candidates:
candidates = []
assert ed <= len(word_in), 'edit distance has to be comparable to word size [ins/del not explored]'
if (pos >len(word_in)) or ed == 0:
return candidates
pfx = ''
sfx = ''
curr_candidates = []
for p in range(0,pos):
pfx = pfx + word_in[p]
for p in range(pos+1,len(word_in)):
sfx = sfx + word_in[p]
for alt in alternates:
word_alt = pfx + alt + sfx
if not (word_alt in candidates):
candidates.append( word_alt )
curr_candidates.append( word_alt )
for n_pos in range(pos,len(word_in)):
# already what we have ' candidates ' of this round are edit-distance 1
for word in curr_candidates:
oridam_generate_patterns(word,cm,ed-1,level+1,n_pos,candidates)
if level == 0:
#candidates.append(word_in)
for n_pos in range(pos,len(word_in)):
oridam_generate_patterns(word_in,cm,ed, level+1,n_pos,candidates)
return candidates | python | def oridam_generate_patterns(word_in,cm,ed=1,level=0,pos=0,candidates=None):
""" ed = 1 by default, pos - internal variable for algorithm """
alternates = cm.get(word_in[pos],[])
if not candidates:
candidates = []
assert ed <= len(word_in), 'edit distance has to be comparable to word size [ins/del not explored]'
if (pos >len(word_in)) or ed == 0:
return candidates
pfx = ''
sfx = ''
curr_candidates = []
for p in range(0,pos):
pfx = pfx + word_in[p]
for p in range(pos+1,len(word_in)):
sfx = sfx + word_in[p]
for alt in alternates:
word_alt = pfx + alt + sfx
if not (word_alt in candidates):
candidates.append( word_alt )
curr_candidates.append( word_alt )
for n_pos in range(pos,len(word_in)):
# already what we have ' candidates ' of this round are edit-distance 1
for word in curr_candidates:
oridam_generate_patterns(word,cm,ed-1,level+1,n_pos,candidates)
if level == 0:
#candidates.append(word_in)
for n_pos in range(pos,len(word_in)):
oridam_generate_patterns(word_in,cm,ed, level+1,n_pos,candidates)
return candidates | ed = 1 by default, pos - internal variable for algorithm | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/solthiruthi/typographical.py#L20-L48 |
Ezhil-Language-Foundation/open-tamil | tamil/utf8.py | to_unicode_repr | def to_unicode_repr( _letter ):
""" helpful in situations where browser/app may recognize Unicode encoding
in the \u0b8e type syntax but not actual unicode glyph/code-point"""
# Python 2-3 compatible
return u"u'"+ u"".join( [ u"\\u%04x"%ord(l) for l in _letter ] ) + u"'" | python | def to_unicode_repr( _letter ):
""" helpful in situations where browser/app may recognize Unicode encoding
in the \u0b8e type syntax but not actual unicode glyph/code-point"""
# Python 2-3 compatible
return u"u'"+ u"".join( [ u"\\u%04x"%ord(l) for l in _letter ] ) + u"'" | helpful in situations where browser/app may recognize Unicode encoding
in the \u0b8e type syntax but not actual unicode glyph/code-point | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/utf8.py#L33-L37 |
Ezhil-Language-Foundation/open-tamil | tamil/utf8.py | uyirmei_constructed | def uyirmei_constructed( mei_idx, uyir_idx):
""" construct uyirmei letter give mei index and uyir index """
idx,idy = mei_idx,uyir_idx
assert ( idy >= 0 and idy < uyir_len() )
assert ( idx >= 0 and idx < 6+mei_len() )
return grantha_agaram_letters[mei_idx]+accent_symbols[uyir_idx] | python | def uyirmei_constructed( mei_idx, uyir_idx):
""" construct uyirmei letter give mei index and uyir index """
idx,idy = mei_idx,uyir_idx
assert ( idy >= 0 and idy < uyir_len() )
assert ( idx >= 0 and idx < 6+mei_len() )
return grantha_agaram_letters[mei_idx]+accent_symbols[uyir_idx] | construct uyirmei letter give mei index and uyir index | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/utf8.py#L265-L270 |
Ezhil-Language-Foundation/open-tamil | tamil/utf8.py | has_english | def has_english( word_in ):
""" return True if word_in has any English letters in the string"""
return not all_tamil(word_in) and len(word_in) > 0 and any([l in word_in for l in string.ascii_letters]) | python | def has_english( word_in ):
""" return True if word_in has any English letters in the string"""
return not all_tamil(word_in) and len(word_in) > 0 and any([l in word_in for l in string.ascii_letters]) | return True if word_in has any English letters in the string | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/utf8.py#L305-L307 |
Ezhil-Language-Foundation/open-tamil | tamil/utf8.py | all_tamil | def all_tamil( word_in ):
""" predicate checks if all letters of the input word are Tamil letters """
if isinstance(word_in,list):
word = word_in
else:
word = get_letters( word_in )
return all( [(letter in tamil_letters) for letter in word] ) | python | def all_tamil( word_in ):
""" predicate checks if all letters of the input word are Tamil letters """
if isinstance(word_in,list):
word = word_in
else:
word = get_letters( word_in )
return all( [(letter in tamil_letters) for letter in word] ) | predicate checks if all letters of the input word are Tamil letters | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/utf8.py#L309-L315 |
Ezhil-Language-Foundation/open-tamil | tamil/utf8.py | reverse_word | def reverse_word( word ):
""" reverse a Tamil word according to letters not unicode-points """
op = get_letters( word )
op.reverse()
return u"".join(op) | python | def reverse_word( word ):
""" reverse a Tamil word according to letters not unicode-points """
op = get_letters( word )
op.reverse()
return u"".join(op) | reverse a Tamil word according to letters not unicode-points | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/utf8.py#L337-L341 |
Ezhil-Language-Foundation/open-tamil | tamil/utf8.py | get_letters | def get_letters( word ):
""" splits the word into a character-list of tamil/english
characters present in the stream """
ta_letters = list()
not_empty = False
WLEN,idx = len(word),0
while (idx < WLEN):
c = word[idx]
#print(idx,hex(ord(c)),len(ta_letters))
if c in uyir_letter_set or c == ayudha_letter:
ta_letters.append(c)
not_empty = True
elif c in grantha_agaram_set:
ta_letters.append(c)
not_empty = True
elif c in accent_symbol_set:
if not not_empty:
# odd situation
ta_letters.append(c)
not_empty = True
else:
#print("Merge/accent")
ta_letters[-1] += c
else:
if ord(c) < 256 or not (is_tamil_unicode(c)):
ta_letters.append( c )
else:
if not_empty:
#print("Merge/??")
ta_letters[-1]+= c
else:
ta_letters.append(c)
not_empty = True
idx = idx + 1
return ta_letters | python | def get_letters( word ):
""" splits the word into a character-list of tamil/english
characters present in the stream """
ta_letters = list()
not_empty = False
WLEN,idx = len(word),0
while (idx < WLEN):
c = word[idx]
#print(idx,hex(ord(c)),len(ta_letters))
if c in uyir_letter_set or c == ayudha_letter:
ta_letters.append(c)
not_empty = True
elif c in grantha_agaram_set:
ta_letters.append(c)
not_empty = True
elif c in accent_symbol_set:
if not not_empty:
# odd situation
ta_letters.append(c)
not_empty = True
else:
#print("Merge/accent")
ta_letters[-1] += c
else:
if ord(c) < 256 or not (is_tamil_unicode(c)):
ta_letters.append( c )
else:
if not_empty:
#print("Merge/??")
ta_letters[-1]+= c
else:
ta_letters.append(c)
not_empty = True
idx = idx + 1
return ta_letters | splits the word into a character-list of tamil/english
characters present in the stream | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/utf8.py#L389-L423 |
Ezhil-Language-Foundation/open-tamil | tamil/utf8.py | get_letters_iterable | def get_letters_iterable( word ):
""" splits the word into a character-list of tamil/english
characters present in the stream """
WLEN,idx = len(word),0
while (idx < WLEN):
c = word[idx]
#print(idx,hex(ord(c)),len(ta_letters))
if c in uyir_letter_set or c == ayudha_letter:
idx = idx + 1
yield c
elif c in grantha_agaram_set:
if idx + 1 < WLEN and word[idx+1] in all_symbol_set:
c2 = word[idx+1]
idx = idx + 2
yield (c + c2)
else:
idx = idx + 1
yield c
else:
idx = idx + 1
yield c
return | python | def get_letters_iterable( word ):
""" splits the word into a character-list of tamil/english
characters present in the stream """
WLEN,idx = len(word),0
while (idx < WLEN):
c = word[idx]
#print(idx,hex(ord(c)),len(ta_letters))
if c in uyir_letter_set or c == ayudha_letter:
idx = idx + 1
yield c
elif c in grantha_agaram_set:
if idx + 1 < WLEN and word[idx+1] in all_symbol_set:
c2 = word[idx+1]
idx = idx + 2
yield (c + c2)
else:
idx = idx + 1
yield c
else:
idx = idx + 1
yield c
return | splits the word into a character-list of tamil/english
characters present in the stream | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/utf8.py#L431-L453 |
Ezhil-Language-Foundation/open-tamil | tamil/utf8.py | get_words_iterable | def get_words_iterable( letters, tamil_only=False ):
""" given a list of UTF-8 letters section them into words, grouping them at spaces """
# correct algorithm for get-tamil-words
buf = []
for idx,let in enumerate(letters):
if not let.isspace():
if istamil(let) or (not tamil_only):
buf.append( let )
else:
if len(buf) > 0:
yield u"".join( buf )
buf = []
if len(buf) > 0:
yield u"".join(buf) | python | def get_words_iterable( letters, tamil_only=False ):
""" given a list of UTF-8 letters section them into words, grouping them at spaces """
# correct algorithm for get-tamil-words
buf = []
for idx,let in enumerate(letters):
if not let.isspace():
if istamil(let) or (not tamil_only):
buf.append( let )
else:
if len(buf) > 0:
yield u"".join( buf )
buf = []
if len(buf) > 0:
yield u"".join(buf) | given a list of UTF-8 letters section them into words, grouping them at spaces | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/utf8.py#L499-L513 |
Ezhil-Language-Foundation/open-tamil | tamil/utf8.py | get_tamil_words | def get_tamil_words( letters ):
""" reverse a Tamil word according to letters, not unicode-points """
if not isinstance(letters,list):
raise Exception("metehod needs to be used with list generated from 'tamil.utf8.get_letters(...)'")
return [word for word in get_words_iterable( letters, tamil_only = True )] | python | def get_tamil_words( letters ):
""" reverse a Tamil word according to letters, not unicode-points """
if not isinstance(letters,list):
raise Exception("metehod needs to be used with list generated from 'tamil.utf8.get_letters(...)'")
return [word for word in get_words_iterable( letters, tamil_only = True )] | reverse a Tamil word according to letters, not unicode-points | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/utf8.py#L515-L519 |
Ezhil-Language-Foundation/open-tamil | tamil/utf8.py | compare_words_lexicographic | def compare_words_lexicographic( word_a, word_b ):
""" compare words in Tamil lexicographic order """
# sanity check for words to be all Tamil
if ( not all_tamil(word_a) ) or (not all_tamil(word_b)) :
#print("## ")
#print(word_a)
#print(word_b)
#print("Both operands need to be Tamil words")
pass
La = len(word_a)
Lb = len(word_b)
all_TA_letters = u"".join(tamil_letters)
for itr in range(0,min(La,Lb)):
pos1 = all_TA_letters.find( word_a[itr] )
pos2 = all_TA_letters.find( word_b[itr] )
if pos1 != pos2 :
#print not( pos1 > pos2), pos1, pos2
return cmp(pos1, pos2)
# result depends on if La is shorter than Lb, or 0 if La == Lb i.e. cmp
return cmp(La,Lb) | python | def compare_words_lexicographic( word_a, word_b ):
""" compare words in Tamil lexicographic order """
# sanity check for words to be all Tamil
if ( not all_tamil(word_a) ) or (not all_tamil(word_b)) :
#print("## ")
#print(word_a)
#print(word_b)
#print("Both operands need to be Tamil words")
pass
La = len(word_a)
Lb = len(word_b)
all_TA_letters = u"".join(tamil_letters)
for itr in range(0,min(La,Lb)):
pos1 = all_TA_letters.find( word_a[itr] )
pos2 = all_TA_letters.find( word_b[itr] )
if pos1 != pos2 :
#print not( pos1 > pos2), pos1, pos2
return cmp(pos1, pos2)
# result depends on if La is shorter than Lb, or 0 if La == Lb i.e. cmp
return cmp(La,Lb) | compare words in Tamil lexicographic order | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/utf8.py#L531-L552 |
Ezhil-Language-Foundation/open-tamil | tamil/utf8.py | word_intersection | def word_intersection( word_a, word_b ):
""" return a list of tuples where word_a, word_b intersect """
positions = []
word_a_letters = get_letters( word_a )
word_b_letters = get_letters( word_b )
for idx,wa in enumerate(word_a_letters):
for idy,wb in enumerate(word_b_letters):
if ( wa == wb ):
positions.append( (idx, idy) )
return positions | python | def word_intersection( word_a, word_b ):
""" return a list of tuples where word_a, word_b intersect """
positions = []
word_a_letters = get_letters( word_a )
word_b_letters = get_letters( word_b )
for idx,wa in enumerate(word_a_letters):
for idy,wb in enumerate(word_b_letters):
if ( wa == wb ):
positions.append( (idx, idy) )
return positions | return a list of tuples where word_a, word_b intersect | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/utf8.py#L558-L567 |
Ezhil-Language-Foundation/open-tamil | tamil/utf8.py | splitMeiUyir | def splitMeiUyir(uyirmei_char):
"""
This function split uyirmei compound character into mei + uyir characters
and returns in tuple.
Input : It must be unicode tamil char.
Written By : Arulalan.T
Date : 22.09.2014
"""
if not isinstance(uyirmei_char, PYTHON3 and str or unicode):
raise ValueError("Passed input letter '%s' must be unicode, \
not just string" % uyirmei_char)
if uyirmei_char in mei_letters or uyirmei_char in uyir_letters or uyirmei_char in ayudha_letter:
return uyirmei_char
if uyirmei_char not in grantha_uyirmei_letters:
if not is_normalized( uyirmei_char ):
norm_char = unicode_normalize(uyirmei_char)
rval = splitMeiUyir( norm_char )
return rval
raise ValueError("Passed input letter '%s' is not tamil letter" % uyirmei_char)
idx = grantha_uyirmei_letters.index(uyirmei_char)
uyiridx = idx % 12
meiidx = int((idx - uyiridx)/ 12)
return (grantha_mei_letters[meiidx], uyir_letters[uyiridx]) | python | def splitMeiUyir(uyirmei_char):
"""
This function split uyirmei compound character into mei + uyir characters
and returns in tuple.
Input : It must be unicode tamil char.
Written By : Arulalan.T
Date : 22.09.2014
"""
if not isinstance(uyirmei_char, PYTHON3 and str or unicode):
raise ValueError("Passed input letter '%s' must be unicode, \
not just string" % uyirmei_char)
if uyirmei_char in mei_letters or uyirmei_char in uyir_letters or uyirmei_char in ayudha_letter:
return uyirmei_char
if uyirmei_char not in grantha_uyirmei_letters:
if not is_normalized( uyirmei_char ):
norm_char = unicode_normalize(uyirmei_char)
rval = splitMeiUyir( norm_char )
return rval
raise ValueError("Passed input letter '%s' is not tamil letter" % uyirmei_char)
idx = grantha_uyirmei_letters.index(uyirmei_char)
uyiridx = idx % 12
meiidx = int((idx - uyiridx)/ 12)
return (grantha_mei_letters[meiidx], uyir_letters[uyiridx]) | This function split uyirmei compound character into mei + uyir characters
and returns in tuple.
Input : It must be unicode tamil char.
Written By : Arulalan.T
Date : 22.09.2014 | https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/utf8.py#L590-L619 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.