index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
29,813 | isbnlib._core | get_isbnlike | Extract all substrings that seem like ISBNs.
level:
strict almost as certain they are ISBNs
normal (default)
loose catch many as possible
| def get_isbnlike(text, level='normal'):
"""Extract all substrings that seem like ISBNs.
level:
strict almost as certain they are ISBNs
normal (default)
loose catch many as possible
"""
if level == 'normal': # pragma: no cover
isbnlike = RE_NORMAL
elif level == 'strict':
isbnlike = RE_STRICT
elif level == 'loose':
isbnlike = RE_LOOSE
else:
LOGGER.error('level as no option %s', level)
return []
return isbnlike.findall(text)
| (text, level='normal') |
29,814 | isbnlib._goom | query | Query the Google Books (JSON API v1) for metadata. | # -*- coding: utf-8 -*-
"""Query the Google Books (JSON API v1) for metadata."""
import logging
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
from .dev import cache, stdmeta
from .dev._bouth23 import u
from .dev._exceptions import NoDataForSelectorError, RecordMappingError
from .dev.webquery import query as wquery
UA = 'isbnlib (gzip)'
SERVICE_URL = (
'https://www.googleapis.com/books/v1/volumes?q={words}'
'&fields=items/volumeInfo(title,authors,publisher,publishedDate,'
'language,industryIdentifiers)&maxResults=10')
LOGGER = logging.getLogger(__name__)
# pylint: disable=broad-except
def _mapper(record):
"""Map canonical <- record."""
# canonical:
# -> ISBN-13, Title, Authors, Publisher, Year, Language
try:
# mapping: canonical <- records
if 'industryIdentifiers' not in record: # pragma: no cover
return {}
canonical = {}
isbn = None
for ident in record['industryIdentifiers']:
if ident['type'] == 'ISBN_13':
isbn = ident['identifier']
break
if not isbn: # pragma: no cover
return {}
canonical['ISBN-13'] = isbn
canonical['Title'] = record.get('title', u('')).replace(' :', ':')
canonical['Authors'] = record.get('authors', [])
canonical['Publisher'] = record.get('publisher', u(''))
if 'publishedDate' in record and len(record['publishedDate']) >= 4:
canonical['Year'] = record['publishedDate'][0:4]
else: # pragma: no cover
canonical['Year'] = u('')
canonical['Language'] = record.get('language', u(''))
except Exception: # pragma: no cover
raise RecordMappingError(isbn)
# call stdmeta for extra cleaning and validation
return stdmeta(canonical)
| (words) |
29,815 | isbnlib._ext | info | Get language or country assigned to this ISBN. | def info(isbn):
"""Get language or country assigned to this ISBN."""
return infogroup(isbn)
| (isbn) |
29,816 | isbnlib._core | is_isbn10 | Validate as ISBN-10. | def is_isbn10(isbn10):
"""Validate as ISBN-10."""
isbn10 = canonical(isbn10)
if len(isbn10) != 10:
return False # pragma: no cover
return bool(not check_digit10(isbn10[:-1]) != isbn10[-1])
| (isbn10) |
29,817 | isbnlib._core | is_isbn13 | Validate as ISBN-13. | def is_isbn13(isbn13):
"""Validate as ISBN-13."""
isbn13 = canonical(isbn13)
if len(isbn13) != 13:
return False # pragma: no cover
if isbn13[0:3] not in ('978', '979'):
return False
return bool(not check_digit13(isbn13[:-1]) != isbn13[-1])
| (isbn13) |
29,818 | isbnlib._ext | isbn_from_words | Return the most probable ISBN from a list of words. | def isbn_from_words(words):
"""Return the most probable ISBN from a list of words."""
return goos(words)
| (words) |
29,819 | isbnlib._ext | mask | `Mask` a canonical ISBN. | def mask(isbn, separator='-'):
"""`Mask` a canonical ISBN."""
return msk(isbn, separator)
| (isbn, separator='-') |
29,820 | isbnlib._ext | meta | Get metadata from Google Books ('goob'), Open Library ('openl'), ... | def meta(isbn, service='default'):
"""Get metadata from Google Books ('goob'), Open Library ('openl'), ..."""
return query(isbn, service) if isbn else {}
| (isbn, service='default') |
29,821 | isbnlib._core | notisbn | Check with the goal to invalidate isbn-like.
level:
'strict' when certain they are not ISBNs (default)
'loose' only filters obvious NO ISBNs
| def notisbn(isbnlike, level='strict'):
"""Check with the goal to invalidate isbn-like.
level:
'strict' when certain they are not ISBNs (default)
'loose' only filters obvious NO ISBNs
"""
if level not in ('strict', 'loose'): # pragma: no cover
LOGGER.error('level as no option %s', level)
return None
isbnlike = canonical(isbnlike)
if len(isbnlike) not in (10, 13):
return True
if level != 'strict':
return False
if len(isbnlike) == 10:
return not is_isbn10(isbnlike)
return not is_isbn13(isbnlike)
| (isbnlike, level='strict') |
29,822 | isbnlib._exceptions | quiet_errors | Define error format suitable for end user scripts.
Usage: enter the following lines in your script
from isbnlib import quiet_errors
sys.excepthook = quiet_errors
| def quiet_errors(exc_type, exc_value, traceback):
"""Define error format suitable for end user scripts.
Usage: enter the following lines in your script
from isbnlib import quiet_errors
sys.excepthook = quiet_errors
"""
sys.stderr.write('Error: %s\n' % exc_value) # pragma: no cover
| (exc_type, exc_value, traceback) |
29,823 | isbnlib._ext | ren | Rename a file using metadata from an ISBN in his filename. | def ren(fp):
"""Rename a file using metadata from an ISBN in his filename."""
cfp = File(fp)
isbn = EAN13(cfp.name)
if not isbn: # pragma: no cover
return None
data = meta(isbn)
author = data.get('Authors', u('UNKNOWN'))
if author != u('UNKNOWN'): # pragma: no cover
author = last_first(author[0])['last']
year = data.get('Year', u('UNKNOWN'))
maxlen = 98 - (20 + len(author) + len(year))
title = data.get('Title', u('UNKNOWN'))
if title != u('UNKNOWN'):
regex1 = re.compile(r'[.,_!?/\\]')
regex2 = re.compile(r'\s\s+')
title = regex1.sub(' ', title)
title = regex2.sub(' ', title)
title = title.strip()
if title == u('UNKNOWN') or not title: # pragma: no cover
return None
if ' ' in title: # pragma: no cover
tokens = title.split(' ')
stitle = cutoff_tokens(tokens, maxlen)
title = ' '.join(stitle)
isbn13 = data.get('ISBN-13', u('UNKNOWN'))
new_name = '%s%s_%s_%s' % (author, year, title, isbn13)
return cfp.baserename(b2u3(new_name + cfp.ext))
| (fp) |
29,824 | isbnlib._core | to_isbn10 | Transform isbn-13 to isbn-10. | def to_isbn10(isbn13):
"""Transform isbn-13 to isbn-10."""
isbn13 = canonical(isbn13)
# Check prefix
if isbn13[:3] != ISBN13_PREFIX:
return isbn13 if len(isbn13) == 10 and is_isbn10(isbn13) else ''
if not is_isbn13(isbn13):
return ''
isbn10 = isbn13[3:]
check = check_digit10(isbn10[:-1])
# Change check digit
return isbn10[:-1] + check if check else ''
| (isbn13) |
29,825 | isbnlib._core | to_isbn13 | Transform isbn-10 to isbn-13. | def to_isbn13(isbn10):
"""Transform isbn-10 to isbn-13."""
isbn10 = canonical(isbn10)
if len(isbn10) == 13 and is_isbn13(isbn10):
return isbn10
if not is_isbn10(isbn10):
return ''
isbn13 = ISBN13_PREFIX + isbn10[:-1]
check = check_digit13(isbn13)
return isbn13 + check if check else ''
| (isbn10) |
29,826 | cron_converter.cron | Cron | Creates an instance of Cron.
Cron objects each represent a cron schedule.
Attributes:
options (dict): The options to use
| class Cron:
"""Creates an instance of Cron.
Cron objects each represent a cron schedule.
Attributes:
options (dict): The options to use
"""
def __init__(self, cron_string: Optional[str] = None, options=None):
self.options = options if bool(options) else dict()
self.parts: List[Part] = []
if cron_string:
self.from_string(cron_string)
def __str__(self) -> str:
"""Print directly the Cron Object"""
return self.to_string()
def __lt__(self, other) -> bool:
""" This Cron object is lower than the other Cron.
The comparison is made by 'total_ordering' comparing the number of Cron schedule times.
"""
reordered_parts = self.parts[:3] + [self.parts[4], self.parts[3]]
reordered_parts_other = other.parts[:3] + [other.parts[4], other.parts[3]]
for part, other_part in zip(reversed(reordered_parts), reversed(reordered_parts_other)):
if part < other_part:
return True
return False
def __eq__(self, other) -> bool:
""" This Cron object is equal to the other Cron.
The comparison is made by 'total_ordering' comparing the number of Cron schedule times.
"""
return all(part == other_part for part, other_part in zip(self.parts, other.parts))
def __contains__(self, item: Union[datetime, date]) -> bool:
return self.validate(item)
def from_string(self, cron_string: str) -> None:
"""Parses a cron string (minutes - hours - days - months - weekday)
:param cron_string: (str) The cron string to parse. It has to be made up 5 parts.
:raises ValueError: Incorrect length of the cron string.
"""
if type(cron_string) is not str:
raise TypeError('Invalid cron string')
raw_cron_parts = cron_string.strip().split()
if len(raw_cron_parts) != 5:
raise ValueError("Invalid cron string format")
for item, unit in zip(raw_cron_parts, units):
part = Part(unit, self.options)
part.from_string(item)
self.parts.append(part)
def to_string(self) -> str:
"""Return the cron schedule as a string.
:return: cron string (str) -> The cron schedule as a string.
"""
if not self.parts:
raise LookupError('No schedule found')
return ' '.join(str(part) for part in self.parts)
def from_list(self, cron_list: List[List[Union[str, int]]]):
"""Parses a 2-dimensional array of integers as a cron schedule.
:param cron_list: (list of list) The 2-dimensional list to parse.
:raises ValueError: Incorrect length of the cron list.
"""
if len(cron_list) != 5:
raise ValueError('Invalid cron list')
for cron_part_list, unit in zip(cron_list, units):
part = Part(unit, self.options)
part.from_list(cron_part_list)
self.parts.append(part)
def to_list(self) -> List[List[int]]:
"""Returns the cron schedule as a 2-dimensional list of integers
:return: schedule_list -> The cron schedule as a list.
:raises LookupError: Empty Cron object.
"""
if not self.parts:
raise LookupError('No schedule found')
schedule_list = []
for part in self.parts:
schedule_list.append(part.to_list())
return schedule_list
def schedule(self, start_date: Optional[datetime] = None, timezone_str: Optional[str] = None) -> Seeker:
"""Returns the time the schedule would run next.
:param start_date: Optional. A datetime object. If not provided, date will be now in UTC.
This param exclude 'timezone_str'.
:param timezone_str: Optional. A timezone str('Europe/Rome', 'America/New_York', ...).
Date will be now, but localized.
If not provided, date will be now in UTC. This param exclude 'start_date'.
:return: A schedule iterator.
"""
return Seeker(self, start_date, timezone_str)
def validate(self, date_time_obj: Union[datetime, date]) -> bool:
"""Returns True if the object passed is within the Cron rule.
:param date_time_obj: A datetime or date object
:return: True if the object passed is within the Cron Rule.
"""
valid = []
for cron_part, d_par in zip(self.parts, to_parts(date_time_obj)):
if d_par is not None:
valid.append(d_par in cron_part.to_list())
else:
valid.append(True)
return all(valid)
| (cron_string: Optional[str] = None, options=None) |
29,827 | cron_converter.cron | __contains__ | null | def __contains__(self, item: Union[datetime, date]) -> bool:
return self.validate(item)
| (self, item: Union[datetime.datetime, datetime.date]) -> bool |
29,828 | cron_converter.cron | __eq__ | This Cron object is equal to the other Cron.
The comparison is made by 'total_ordering' comparing the number of Cron schedule times.
| def __eq__(self, other) -> bool:
""" This Cron object is equal to the other Cron.
The comparison is made by 'total_ordering' comparing the number of Cron schedule times.
"""
return all(part == other_part for part, other_part in zip(self.parts, other.parts))
| (self, other) -> bool |
29,831 | cron_converter.cron | __init__ | null | def __init__(self, cron_string: Optional[str] = None, options=None):
self.options = options if bool(options) else dict()
self.parts: List[Part] = []
if cron_string:
self.from_string(cron_string)
| (self, cron_string: Optional[str] = None, options=None) |
29,833 | cron_converter.cron | __lt__ | This Cron object is lower than the other Cron.
The comparison is made by 'total_ordering' comparing the number of Cron schedule times.
| def __lt__(self, other) -> bool:
""" This Cron object is lower than the other Cron.
The comparison is made by 'total_ordering' comparing the number of Cron schedule times.
"""
reordered_parts = self.parts[:3] + [self.parts[4], self.parts[3]]
reordered_parts_other = other.parts[:3] + [other.parts[4], other.parts[3]]
for part, other_part in zip(reversed(reordered_parts), reversed(reordered_parts_other)):
if part < other_part:
return True
return False
| (self, other) -> bool |
29,834 | cron_converter.cron | __str__ | Print directly the Cron Object | def __str__(self) -> str:
"""Print directly the Cron Object"""
return self.to_string()
| (self) -> str |
29,835 | cron_converter.cron | from_list | Parses a 2-dimensional array of integers as a cron schedule.
:param cron_list: (list of list) The 2-dimensional list to parse.
:raises ValueError: Incorrect length of the cron list.
| def from_list(self, cron_list: List[List[Union[str, int]]]):
"""Parses a 2-dimensional array of integers as a cron schedule.
:param cron_list: (list of list) The 2-dimensional list to parse.
:raises ValueError: Incorrect length of the cron list.
"""
if len(cron_list) != 5:
raise ValueError('Invalid cron list')
for cron_part_list, unit in zip(cron_list, units):
part = Part(unit, self.options)
part.from_list(cron_part_list)
self.parts.append(part)
| (self, cron_list: List[List[Union[str, int]]]) |
29,836 | cron_converter.cron | from_string | Parses a cron string (minutes - hours - days - months - weekday)
:param cron_string: (str) The cron string to parse. It has to be made up 5 parts.
:raises ValueError: Incorrect length of the cron string.
| def from_string(self, cron_string: str) -> None:
"""Parses a cron string (minutes - hours - days - months - weekday)
:param cron_string: (str) The cron string to parse. It has to be made up 5 parts.
:raises ValueError: Incorrect length of the cron string.
"""
if type(cron_string) is not str:
raise TypeError('Invalid cron string')
raw_cron_parts = cron_string.strip().split()
if len(raw_cron_parts) != 5:
raise ValueError("Invalid cron string format")
for item, unit in zip(raw_cron_parts, units):
part = Part(unit, self.options)
part.from_string(item)
self.parts.append(part)
| (self, cron_string: str) -> NoneType |
29,837 | cron_converter.cron | schedule | Returns the time the schedule would run next.
:param start_date: Optional. A datetime object. If not provided, date will be now in UTC.
This param exclude 'timezone_str'.
:param timezone_str: Optional. A timezone str('Europe/Rome', 'America/New_York', ...).
Date will be now, but localized.
If not provided, date will be now in UTC. This param exclude 'start_date'.
:return: A schedule iterator.
| def schedule(self, start_date: Optional[datetime] = None, timezone_str: Optional[str] = None) -> Seeker:
"""Returns the time the schedule would run next.
:param start_date: Optional. A datetime object. If not provided, date will be now in UTC.
This param exclude 'timezone_str'.
:param timezone_str: Optional. A timezone str('Europe/Rome', 'America/New_York', ...).
Date will be now, but localized.
If not provided, date will be now in UTC. This param exclude 'start_date'.
:return: A schedule iterator.
"""
return Seeker(self, start_date, timezone_str)
| (self, start_date: Optional[datetime.datetime] = None, timezone_str: Optional[str] = None) -> cron_converter.sub_modules.seeker.Seeker |
29,838 | cron_converter.cron | to_list | Returns the cron schedule as a 2-dimensional list of integers
:return: schedule_list -> The cron schedule as a list.
:raises LookupError: Empty Cron object.
| def to_list(self) -> List[List[int]]:
"""Returns the cron schedule as a 2-dimensional list of integers
:return: schedule_list -> The cron schedule as a list.
:raises LookupError: Empty Cron object.
"""
if not self.parts:
raise LookupError('No schedule found')
schedule_list = []
for part in self.parts:
schedule_list.append(part.to_list())
return schedule_list
| (self) -> List[List[int]] |
29,839 | cron_converter.cron | to_string | Return the cron schedule as a string.
:return: cron string (str) -> The cron schedule as a string.
| def to_string(self) -> str:
"""Return the cron schedule as a string.
:return: cron string (str) -> The cron schedule as a string.
"""
if not self.parts:
raise LookupError('No schedule found')
return ' '.join(str(part) for part in self.parts)
| (self) -> str |
29,840 | cron_converter.cron | validate | Returns True if the object passed is within the Cron rule.
:param date_time_obj: A datetime or date object
:return: True if the object passed is within the Cron Rule.
| def validate(self, date_time_obj: Union[datetime, date]) -> bool:
"""Returns True if the object passed is within the Cron rule.
:param date_time_obj: A datetime or date object
:return: True if the object passed is within the Cron Rule.
"""
valid = []
for cron_part, d_par in zip(self.parts, to_parts(date_time_obj)):
if d_par is not None:
valid.append(d_par in cron_part.to_list())
else:
valid.append(True)
return all(valid)
| (self, date_time_obj: Union[datetime.datetime, datetime.date]) -> bool |
29,844 | hqxtinyai.hqxtinyai | say | null | def say(text=''):
subprocess.run(['say',text])
| (text='') |
29,852 | typing | _SpecialGenericAlias | null | class _SpecialGenericAlias(_BaseGenericAlias, _root=True):
def __init__(self, origin, nparams, *, inst=True, name=None):
if name is None:
name = origin.__name__
super().__init__(origin, inst=inst, name=name)
self._nparams = nparams
if origin.__module__ == 'builtins':
self.__doc__ = f'A generic version of {origin.__qualname__}.'
else:
self.__doc__ = f'A generic version of {origin.__module__}.{origin.__qualname__}.'
@_tp_cache
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params,)
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
_check_generic(self, params, self._nparams)
return self.copy_with(params)
def copy_with(self, params):
return _GenericAlias(self.__origin__, params,
name=self._name, inst=self._inst)
def __repr__(self):
return 'typing.' + self._name
def __subclasscheck__(self, cls):
if isinstance(cls, _SpecialGenericAlias):
return issubclass(cls.__origin__, self.__origin__)
if not isinstance(cls, _GenericAlias):
return issubclass(cls, self.__origin__)
return super().__subclasscheck__(cls)
def __reduce__(self):
return self._name
def __or__(self, right):
return Union[self, right]
def __ror__(self, left):
return Union[left, self]
| (origin, nparams, *, inst=True, name=None) |
29,853 | typing | __call__ | null | def __call__(self, *args, **kwargs):
if not self._inst:
raise TypeError(f"Type {self._name} cannot be instantiated; "
f"use {self.__origin__.__name__}() instead")
result = self.__origin__(*args, **kwargs)
try:
result.__orig_class__ = self
except AttributeError:
pass
return result
| (self, *args, **kwargs) |
29,854 | typing | __dir__ | null | def __dir__(self):
return list(set(super().__dir__()
+ [attr for attr in dir(self.__origin__) if not _is_dunder(attr)]))
| (self) |
29,855 | typing | __getattr__ | null | def __getattr__(self, attr):
if attr in {'__name__', '__qualname__'}:
return self._name or self.__origin__.__name__
# We are careful for copy and pickle.
# Also for simplicity we don't relay any dunder names
if '__origin__' in self.__dict__ and not _is_dunder(attr):
return getattr(self.__origin__, attr)
raise AttributeError(attr)
| (self, attr) |
29,856 | typing | __getitem__ | null | def _tp_cache(func=None, /, *, typed=False):
"""Internal wrapper caching __getitem__ of generic types with a fallback to
original function for non-hashable arguments.
"""
def decorator(func):
cached = functools.lru_cache(typed=typed)(func)
_cleanups.append(cached.cache_clear)
@functools.wraps(func)
def inner(*args, **kwds):
try:
return cached(*args, **kwds)
except TypeError:
pass # All real errors (not unhashable args) are raised below.
return func(*args, **kwds)
return inner
if func is not None:
return decorator(func)
return decorator
| (self, params) |
29,857 | typing | __init__ | null | def __init__(self, origin, nparams, *, inst=True, name=None):
if name is None:
name = origin.__name__
super().__init__(origin, inst=inst, name=name)
self._nparams = nparams
if origin.__module__ == 'builtins':
self.__doc__ = f'A generic version of {origin.__qualname__}.'
else:
self.__doc__ = f'A generic version of {origin.__module__}.{origin.__qualname__}.'
| (self, origin, nparams, *, inst=True, name=None) |
29,858 | typing | __instancecheck__ | null | def __instancecheck__(self, obj):
return self.__subclasscheck__(type(obj))
| (self, obj) |
29,859 | typing | __mro_entries__ | null | def __mro_entries__(self, bases):
res = []
if self.__origin__ not in bases:
res.append(self.__origin__)
i = bases.index(self)
for b in bases[i+1:]:
if isinstance(b, _BaseGenericAlias) or issubclass(b, Generic):
break
else:
res.append(Generic)
return tuple(res)
| (self, bases) |
29,861 | typing | __reduce__ | null | def __reduce__(self):
return self._name
| (self) |
29,862 | typing | __repr__ | null | def __repr__(self):
return 'typing.' + self._name
| (self) |
29,864 | typing | __setattr__ | null | def __setattr__(self, attr, val):
if _is_dunder(attr) or attr in {'_name', '_inst', '_nparams',
'_typevar_types', '_paramspec_tvars'}:
super().__setattr__(attr, val)
else:
setattr(self.__origin__, attr, val)
| (self, attr, val) |
29,865 | typing | __subclasscheck__ | null | def __subclasscheck__(self, cls):
if isinstance(cls, _SpecialGenericAlias):
return issubclass(cls.__origin__, self.__origin__)
if not isinstance(cls, _GenericAlias):
return issubclass(cls, self.__origin__)
return super().__subclasscheck__(cls)
| (self, cls) |
29,866 | typing | copy_with | null | def copy_with(self, params):
return _GenericAlias(self.__origin__, params,
name=self._name, inst=self._inst)
| (self, params) |
29,867 | typing_utils | NormalizedType |
Normalized type, made it possible to compare, hash between types.
| class NormalizedType(typing.NamedTuple):
'''
Normalized type, made it possible to compare, hash between types.
'''
origin: Type
args: typing.Union[tuple, frozenset] = tuple()
def __eq__(self, other):
if isinstance(other, NormalizedType):
if self.origin != other.origin:
return False
if isinstance(self.args, frozenset) and isinstance(other.args, frozenset):
return self.args <= other.args and other.args <= self.args
return self.origin == other.origin and self.args == other.args
if not self.args:
return self.origin == other
return False
def __hash__(self) -> int:
if not self.args:
return hash(self.origin)
return hash((self.origin, self.args))
def __repr__(self):
if not self.args:
return f"{self.origin}"
return f"{self.origin}[{self.args}])"
| (origin: Union[NoneType, type, TypeVar], args: Union[tuple, frozenset] = ()) |
29,868 | typing_utils | __eq__ | null | def __eq__(self, other):
if isinstance(other, NormalizedType):
if self.origin != other.origin:
return False
if isinstance(self.args, frozenset) and isinstance(other.args, frozenset):
return self.args <= other.args and other.args <= self.args
return self.origin == other.origin and self.args == other.args
if not self.args:
return self.origin == other
return False
| (self, other) |
29,870 | typing_utils | __hash__ | null | def __hash__(self) -> int:
if not self.args:
return hash(self.origin)
return hash((self.origin, self.args))
| (self) -> int |
29,871 | namedtuple_NormalizedType | __new__ | Create new instance of NormalizedType(origin, args) | from builtins import function
| (_cls, origin: Union[NoneType, type, ForwardRef('TypeVar')], args: Union[tuple, frozenset] = ()) |
29,872 | typing_utils | __repr__ | null | def __repr__(self):
if not self.args:
return f"{self.origin}"
return f"{self.origin}[{self.args}])"
| (self) |
29,874 | collections | _replace | Return a new NormalizedType object replacing specified fields with new values | def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = _sys.intern(str(typename))
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not name.isidentifier()
or _iskeyword(name)
or name.startswith('_')
or name in seen):
field_names[index] = f'_{index}'
seen.add(name)
for name in [typename] + field_names:
if type(name) is not str:
raise TypeError('Type names and field names must be strings')
if not name.isidentifier():
raise ValueError('Type names and field names must be valid '
f'identifiers: {name!r}')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
f'keyword: {name!r}')
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
f'{name!r}')
if name in seen:
raise ValueError(f'Encountered duplicate field name: {name!r}')
seen.add(name)
field_defaults = {}
if defaults is not None:
defaults = tuple(defaults)
if len(defaults) > len(field_names):
raise TypeError('Got more default values than field names')
field_defaults = dict(reversed(list(zip(reversed(field_names),
reversed(defaults)))))
# Variables used in the methods and docstrings
field_names = tuple(map(_sys.intern, field_names))
num_fields = len(field_names)
arg_list = ', '.join(field_names)
if num_fields == 1:
arg_list += ','
repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')'
tuple_new = tuple.__new__
_dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip
# Create all the named tuple methods to be added to the class namespace
namespace = {
'_tuple_new': tuple_new,
'__builtins__': {},
'__name__': f'namedtuple_{typename}',
}
code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))'
__new__ = eval(code, namespace)
__new__.__name__ = '__new__'
__new__.__doc__ = f'Create new instance of {typename}({arg_list})'
if defaults is not None:
__new__.__defaults__ = defaults
@classmethod
def _make(cls, iterable):
result = tuple_new(cls, iterable)
if _len(result) != num_fields:
raise TypeError(f'Expected {num_fields} arguments, got {len(result)}')
return result
_make.__func__.__doc__ = (f'Make a new {typename} object from a sequence '
'or iterable')
def _replace(self, /, **kwds):
result = self._make(_map(kwds.pop, field_names, self))
if kwds:
raise ValueError(f'Got unexpected field names: {list(kwds)!r}')
return result
_replace.__doc__ = (f'Return a new {typename} object replacing specified '
'fields with new values')
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + repr_fmt % self
def _asdict(self):
'Return a new dict which maps field names to their values.'
return _dict(_zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return _tuple(self)
# Modify function metadata to help with introspection and debugging
for method in (
__new__,
_make.__func__,
_replace,
__repr__,
_asdict,
__getnewargs__,
):
method.__qualname__ = f'{typename}.{method.__name__}'
# Build-up the class namespace dictionary
# and use type() to build the result class
class_namespace = {
'__doc__': f'{typename}({arg_list})',
'__slots__': (),
'_fields': field_names,
'_field_defaults': field_defaults,
'__new__': __new__,
'_make': _make,
'_replace': _replace,
'__repr__': __repr__,
'_asdict': _asdict,
'__getnewargs__': __getnewargs__,
'__match_args__': field_names,
}
for index, name in enumerate(field_names):
doc = _sys.intern(f'Alias for field number {index}')
class_namespace[name] = _tuplegetter(index, doc)
result = type(typename, (tuple,), class_namespace)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
if module is None:
try:
module = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
if module is not None:
result.__module__ = module
return result
| (self, /, **kwds) |
29,875 | typing | _SpecialForm | null | class _SpecialForm(_Final, _root=True):
__slots__ = ('_name', '__doc__', '_getitem')
def __init__(self, getitem):
self._getitem = getitem
self._name = getitem.__name__
self.__doc__ = getitem.__doc__
def __getattr__(self, item):
if item in {'__name__', '__qualname__'}:
return self._name
raise AttributeError(item)
def __mro_entries__(self, bases):
raise TypeError(f"Cannot subclass {self!r}")
def __repr__(self):
return 'typing.' + self._name
def __reduce__(self):
return self._name
def __call__(self, *args, **kwds):
raise TypeError(f"Cannot instantiate {self!r}")
def __or__(self, other):
return Union[self, other]
def __ror__(self, other):
return Union[other, self]
def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance()")
def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass()")
@_tp_cache
def __getitem__(self, parameters):
return self._getitem(self, parameters)
| (getitem) |
29,876 | typing | __call__ | null | def __call__(self, *args, **kwds):
raise TypeError(f"Cannot instantiate {self!r}")
| (self, *args, **kwds) |
29,877 | typing | __getattr__ | null | def __getattr__(self, item):
if item in {'__name__', '__qualname__'}:
return self._name
raise AttributeError(item)
| (self, item) |
29,879 | typing | __init__ | null | def __init__(self, getitem):
self._getitem = getitem
self._name = getitem.__name__
self.__doc__ = getitem.__doc__
| (self, getitem) |
29,880 | typing | __instancecheck__ | null | def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance()")
| (self, obj) |
29,881 | typing | __mro_entries__ | null | def __mro_entries__(self, bases):
raise TypeError(f"Cannot subclass {self!r}")
| (self, bases) |
29,882 | typing | __or__ | null | def __or__(self, other):
return Union[self, other]
| (self, other) |
29,885 | typing | __ror__ | null | def __ror__(self, other):
return Union[other, self]
| (self, other) |
29,886 | typing | __subclasscheck__ | null | def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass()")
| (self, cls) |
29,887 | typing_utils | _hashable | Determine whether `value` can be hashed. | def _hashable(value):
"""Determine whether `value` can be hashed."""
try:
hash(value)
except TypeError:
return False
return True
| (value) |
29,888 | typing_utils | _is_normal_subtype | null | def _is_normal_subtype(
left: NormalizedType,
right: NormalizedType,
forward_refs: typing.Optional[typing.Mapping[str, type]],
) -> typing.Optional[bool]:
if isinstance(left.origin, ForwardRef):
left = normalize(eval_forward_ref(left.origin, forward_refs=forward_refs))
if isinstance(right.origin, ForwardRef):
right = normalize(eval_forward_ref(right.origin, forward_refs=forward_refs))
# Any
if right.origin is typing.Any:
return True
# Union
if right.origin is typing.Union and left.origin is typing.Union:
return _is_origin_subtype_args(left.args, right.args, forward_refs)
if right.origin is typing.Union:
return optional_any(
_is_normal_subtype(left, a, forward_refs) for a in right.args
)
if left.origin is typing.Union:
return optional_all(
_is_normal_subtype(a, right, forward_refs) for a in left.args
)
# TypeVar
if isinstance(left.origin, typing.TypeVar) and isinstance(
right.origin, typing.TypeVar
):
if left.origin is right.origin:
return True
left_bound = getattr(left.origin, "__bound__", None)
right_bound = getattr(right.origin, "__bound__", None)
if right_bound is None or left_bound is None:
return unknown
return _is_normal_subtype(
normalize(left_bound), normalize(right_bound), forward_refs
)
if isinstance(right.origin, typing.TypeVar):
return unknown
if isinstance(left.origin, typing.TypeVar):
left_bound = getattr(left.origin, "__bound__", None)
if left_bound is None:
return unknown
return _is_normal_subtype(normalize(left_bound), right, forward_refs)
if not left.args and not right.args:
return _is_origin_subtype(left.origin, right.origin)
if not right.args:
return _is_origin_subtype(left.origin, right.origin)
if _is_origin_subtype(left.origin, right.origin):
return _is_origin_subtype_args(left.args, right.args, forward_refs)
return False
| (left: typing_utils.NormalizedType, right: typing_utils.NormalizedType, forward_refs: Optional[Mapping[str, type]]) -> Optional[bool] |
29,889 | typing_utils | _is_origin_subtype | null | def _is_origin_subtype(left: OriginType, right: OriginType) -> bool:
if left is right:
return True
if (
left is not None
and left in STATIC_SUBTYPE_MAPPING
and right == STATIC_SUBTYPE_MAPPING[left]
):
return True
if hasattr(left, "mro"):
for parent in left.mro():
if parent == right:
return True
if isinstance(left, type) and isinstance(right, type):
return issubclass(left, right)
return left == right
| (left: Optional[type], right: Optional[type]) -> bool |
29,890 | typing_utils | _is_origin_subtype_args | null | def _is_origin_subtype_args(
left: NormalizedTypeArgs,
right: NormalizedTypeArgs,
forward_refs: typing.Optional[typing.Mapping[str, type]],
) -> typing.Optional[bool]:
if isinstance(left, frozenset):
if not isinstance(right, frozenset):
return False
excluded = left - right
if not excluded:
# Union[str, int] <> Union[int, str]
return True
# Union[list, int] <> Union[typing.Sequence, int]
return all(
any(_is_normal_subtype(e, r, forward_refs) for r in right) for e in excluded
)
if isinstance(left, collections.abc.Sequence) and not isinstance(
left, NormalizedType
):
if not isinstance(right, collections.abc.Sequence) or isinstance(
right, NormalizedType
):
return False
if (
left
and left[-1].origin is not Ellipsis
and right
and right[-1].origin is Ellipsis
):
# Tuple[type, type] <> Tuple[type, ...]
return all(_is_origin_subtype_args(l, right[0], forward_refs) for l in left)
if len(left) != len(right):
return False
return all(
l is not None
and r is not None
and _is_origin_subtype_args(l, r, forward_refs)
for l, r in itertools.zip_longest(left, right)
)
assert isinstance(left, NormalizedType)
assert isinstance(right, NormalizedType)
return _is_normal_subtype(left, right, forward_refs)
| (left: Union[Tuple[Union[Tuple[ForwardRef('NormalizedTypeArgs'), ...], FrozenSet[typing_utils.NormalizedType], typing_utils.NormalizedType], ...], FrozenSet[typing_utils.NormalizedType], typing_utils.NormalizedType], right: Union[Tuple[Union[Tuple[ForwardRef('NormalizedTypeArgs'), ...], FrozenSet[typing_utils.NormalizedType], typing_utils.NormalizedType], ...], FrozenSet[typing_utils.NormalizedType], typing_utils.NormalizedType], forward_refs: Optional[Mapping[str, type]]) -> Optional[bool] |
29,891 | typing_utils | _normalize_aliases | null | def _normalize_aliases(type_: Type) -> Type:
if isinstance(type_, typing.TypeVar):
return type_
assert _hashable(type_), "_normalize_aliases should only be called on element types"
if type_ in BUILTINS_MAPPING:
return BUILTINS_MAPPING[type_]
return type_
| (type_: Union[NoneType, type, TypeVar]) -> Union[NoneType, type, TypeVar] |
29,892 | typing_utils | _normalize_args | null | def _normalize_args(tps: TypeArgs):
if isinstance(tps, collections.abc.Sequence):
return tuple(_normalize_args(type_) for type_ in tps)
if isinstance(tps, collections.abc.Set):
return frozenset(_normalize_args(type_) for type_ in tps)
return normalize(tps)
| (tps: Union[type, AbstractSet[type], Sequence[type]]) |
29,894 | typing_utils | eval_forward_ref |
eval forward_refs in all cPython versions
| def eval_forward_ref(ref, forward_refs=None):
'''
eval forward_refs in all cPython versions
'''
localns = forward_refs or {}
if hasattr(typing, "_eval_type"): # python3.8 & python 3.9
_eval_type = getattr(typing, "_eval_type")
return _eval_type(ref, globals(), localns)
if hasattr(ref, "_eval_type"): # python3.6
_eval_type = getattr(ref, "_eval_type")
return _eval_type(globals(), localns)
raise NotImplementedError()
| (ref, forward_refs=None) |
29,895 | typing_utils | get_args | Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples:
```python
from typing_utils import get_args
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
```
| def get_args(type_) -> typing.Tuple:
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples:
```python
from typing_utils import get_args
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
```
"""
if hasattr(typing, 'get_args'): # python 3.8+
_getter = getattr(typing, "get_args")
res = _getter(type_)
elif hasattr(typing.List, "_special"): # python 3.7
if (
isinstance(type_, GenericClass) and not type_._special
): # backport for python 3.8
res = type_.__args__
if get_origin(type_) is collections.abc.Callable and res[0] is not Ellipsis:
res = (list(res[:-1]), res[-1])
else:
res = ()
else: # python 3.6
if isinstance(type_, (GenericClass, UnionClass)): # backport for python 3.8
res = type_.__args__
if get_origin(type_) is collections.abc.Callable and res[0] is not Ellipsis:
res = (list(res[:-1]), res[-1])
else:
res = ()
return () if res is None else res
| (type_) -> Tuple |
29,896 | typing_utils | get_origin | Get the unsubscripted version of a type.
This supports generic types, Callable, Tuple, Union, Literal, Final and ClassVar.
Return None for unsupported types.
Examples:
```python
from typing_utils import get_origin
get_origin(Literal[42]) is Literal
get_origin(int) is None
get_origin(ClassVar[int]) is ClassVar
get_origin(Generic) is Generic
get_origin(Generic[T]) is Generic
get_origin(Union[T, int]) is Union
get_origin(List[Tuple[T, T]][int]) == list
```
| def get_origin(type_):
"""Get the unsubscripted version of a type.
This supports generic types, Callable, Tuple, Union, Literal, Final and ClassVar.
Return None for unsupported types.
Examples:
```python
from typing_utils import get_origin
get_origin(Literal[42]) is Literal
get_origin(int) is None
get_origin(ClassVar[int]) is ClassVar
get_origin(Generic) is Generic
get_origin(Generic[T]) is Generic
get_origin(Union[T, int]) is Union
get_origin(List[Tuple[T, T]][int]) == list
```
"""
if hasattr(typing, 'get_origin'): # python 3.8+
_getter = getattr(typing, "get_origin")
ori = _getter(type_)
elif hasattr(typing.List, "_special"): # python 3.7
if isinstance(type_, GenericClass) and not type_._special:
ori = type_.__origin__
elif hasattr(type_, "_special") and type_._special:
ori = type_
elif type_ is typing.Generic:
ori = typing.Generic
else:
ori = None
else: # python 3.6
if isinstance(type_, GenericClass):
ori = type_.__origin__
if ori is None:
ori = type_
elif isinstance(type_, UnionClass):
ori = type_.__origin__
elif type_ is typing.Generic:
ori = typing.Generic
else:
ori = None
return _normalize_aliases(ori)
| (type_) |
29,897 | typing | get_type_hints | Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used. For classes, the search
order is globals first then locals.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
| def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used. For classes, the search
order is globals first then locals.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if getattr(obj, '__no_type_check__', None):
return {}
# Classes require a special treatment.
if isinstance(obj, type):
hints = {}
for base in reversed(obj.__mro__):
if globalns is None:
base_globals = getattr(sys.modules.get(base.__module__, None), '__dict__', {})
else:
base_globals = globalns
ann = base.__dict__.get('__annotations__', {})
if isinstance(ann, types.GetSetDescriptorType):
ann = {}
base_locals = dict(vars(base)) if localns is None else localns
if localns is None and globalns is None:
# This is surprising, but required. Before Python 3.10,
# get_type_hints only evaluated the globalns of
# a class. To maintain backwards compatibility, we reverse
# the globalns and localns order so that eval() looks into
# *base_globals* first rather than *base_locals*.
# This only affects ForwardRefs.
base_globals, base_locals = base_locals, base_globals
for name, value in ann.items():
if value is None:
value = type(None)
if isinstance(value, str):
value = ForwardRef(value, is_argument=False, is_class=True)
value = _eval_type(value, base_globals, base_locals)
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
if globalns is None:
if isinstance(obj, types.ModuleType):
globalns = obj.__dict__
else:
nsobj = obj
# Find globalns for the unwrapped object.
while hasattr(nsobj, '__wrapped__'):
nsobj = nsobj.__wrapped__
globalns = getattr(nsobj, '__globals__', {})
if localns is None:
localns = globalns
elif localns is None:
localns = globalns
hints = getattr(obj, '__annotations__', None)
if hints is None:
# Return empty annotations for something that _could_ have them.
if isinstance(obj, _allowed_types):
return {}
else:
raise TypeError('{!r} is not a module, class, method, '
'or function.'.format(obj))
defaults = _get_defaults(obj)
hints = dict(hints)
for name, value in hints.items():
if value is None:
value = type(None)
if isinstance(value, str):
# class-level forward refs were handled above, this must be either
# a module-level annotation or a function argument annotation
value = ForwardRef(
value,
is_argument=not isinstance(obj, types.ModuleType),
is_class=False,
)
value = _eval_type(value, globalns, localns)
if name in defaults and defaults[name] is None:
value = Optional[value]
hints[name] = value
return hints if include_extras else {k: _strip_annotations(t) for k, t in hints.items()}
| (obj, globalns=None, localns=None, include_extras=False) |
29,899 | typing_utils | issubtype | Check that the left argument is a subtype of the right.
For unions, check if the type arguments of the left is a subset of the right.
Also works for nested types including ForwardRefs.
Examples:
```python
from typing_utils import issubtype
issubtype(typing.List, typing.Any) == True
issubtype(list, list) == True
issubtype(list, typing.List) == True
issubtype(list, typing.Sequence) == True
issubtype(typing.List[int], list) == True
issubtype(typing.List[typing.List], list) == True
issubtype(list, typing.List[int]) == False
issubtype(list, typing.Union[typing.Tuple, typing.Set]) == False
issubtype(typing.List[typing.List], typing.List[typing.Sequence]) == True
JSON = typing.Union[
int, float, bool, str, None, typing.Sequence["JSON"],
typing.Mapping[str, "JSON"]
]
issubtype(str, JSON, forward_refs={'JSON': JSON}) == True
issubtype(typing.Dict[str, str], JSON, forward_refs={'JSON': JSON}) == True
issubtype(typing.Dict[str, bytes], JSON, forward_refs={'JSON': JSON}) == False
```
| def issubtype(
left: Type, right: Type, forward_refs: typing.Optional[dict] = None,
) -> typing.Optional[bool]:
"""Check that the left argument is a subtype of the right.
For unions, check if the type arguments of the left is a subset of the right.
Also works for nested types including ForwardRefs.
Examples:
```python
from typing_utils import issubtype
issubtype(typing.List, typing.Any) == True
issubtype(list, list) == True
issubtype(list, typing.List) == True
issubtype(list, typing.Sequence) == True
issubtype(typing.List[int], list) == True
issubtype(typing.List[typing.List], list) == True
issubtype(list, typing.List[int]) == False
issubtype(list, typing.Union[typing.Tuple, typing.Set]) == False
issubtype(typing.List[typing.List], typing.List[typing.Sequence]) == True
JSON = typing.Union[
int, float, bool, str, None, typing.Sequence["JSON"],
typing.Mapping[str, "JSON"]
]
issubtype(str, JSON, forward_refs={'JSON': JSON}) == True
issubtype(typing.Dict[str, str], JSON, forward_refs={'JSON': JSON}) == True
issubtype(typing.Dict[str, bytes], JSON, forward_refs={'JSON': JSON}) == False
```
"""
return _is_normal_subtype(normalize(left), normalize(right), forward_refs)
| (left: Union[NoneType, type, TypeVar], right: Union[NoneType, type, TypeVar], forward_refs: Optional[dict] = None) -> Optional[bool] |
29,901 | typing_utils | normalize |
convert types to NormalizedType instances.
| def normalize(type_: Type) -> NormalizedType:
'''
convert types to NormalizedType instances.
'''
args = get_args(type_)
origin = get_origin(type_)
if not origin:
return NormalizedType(_normalize_aliases(type_))
origin = _normalize_aliases(origin)
if origin is typing.Union: # sort args when the origin is Union
args = _normalize_args(frozenset(args))
else:
args = _normalize_args(args)
return NormalizedType(origin, args)
| (type_: Union[NoneType, type, TypeVar]) -> typing_utils.NormalizedType |
29,902 | typing_utils | optional_all | null | def optional_all(elements) -> typing.Optional[bool]:
if all(elements):
return True
if all(e is False for e in elements):
return False
return unknown
| (elements) -> Optional[bool] |
29,903 | typing_utils | optional_any | null | def optional_any(elements) -> typing.Optional[bool]:
if any(elements):
return True
if any(e is None for e in elements):
return unknown
return False
| (elements) -> Optional[bool] |
29,905 | biocgenerics.colnames | colnames | Access column names from 2-dimensional representations.
Args:
x: Any object.
Raises:
NotImplementedError: If ``x`` is not a supported type.
Returns:
list: List of column names.
| null | (x) -> list |
29,906 | biocgenerics.combine_cols | combine_cols | Combine n-dimensional objects along the second dimension.
If all elements are :py:class:`~numpy.ndarray`,
we combine them using numpy's :py:func:`~numpy.concatenate`.
If all elements are either :py:class:`~scipy.sparse.spmatrix` or
:py:class:`~scipy.sparse.sparray`, these objects are combined
using scipy's :py:class:`~scipy.sparse.hstack`.
If all elements are :py:class:`~pandas.DataFrame` objects, they are combined using
:py:func:`~pandas.concat` along the second axis.
Args:
x (Any): n-dimensional objects to combine.
All elements of x are expected to be the same class or
atleast compatible with each other.
Returns:
A combined object, typically the same type as the first element in ``x``.
A :py:class:`~numpy.ndarray`, if the elements are a mix of dense and sparse objects.
| null | (*x: Any) |
29,907 | biocgenerics.combine_rows | combine_rows | Combine n-dimensional objects along their first dimension.
If all elements are :py:class:`~numpy.ndarray`,
we combine them using numpy's :py:func:`~numpy.concatenate`.
If all elements are either :py:class:`~scipy.sparse.spmatrix` or
:py:class:`~scipy.sparse.sparray`, these objects are combined
using scipy's :py:class:`~scipy.sparse.vstack`.
If all elements are :py:class:`~pandas.DataFrame` objects, they are combined using
:py:func:`~pandas.concat` along the first axis.
Args:
x (Any): n-dimensional objects to combine.
All elements of x are expected to be the same class or
atleast compatible with each other.
Returns:
A combined object, typically the same type as the first element in ``x``.
A :py:class:`~numpy.ndarray`, if the elements are a mix of dense and sparse objects.
| null | (*x: Any) |
29,908 | biocgenerics.combine_seqs | combine_seqs | Combine vector-like objects (1-dimensional arrays).
If all elements are :py:class:`~numpy.ndarray`,
we combine them using numpy's :py:func:`~numpy.concatenate`.
If all elements are either :py:class:`~scipy.sparse.spmatrix` or
:py:class:`~scipy.sparse.sparray`, these objects are combined
using scipy's :py:class:`~scipy.sparse.hstack`.
If all elements are :py:class:`~pandas.Series` objects, they are combined using
:py:func:`~pandas.concat`.
For all other scenario's, all elements are coerced to a :py:class:`~list` and
combined.
Args:
x (Any): Vector-like objects to combine.
All elements of ``x`` are expected to be the same class or
atleast compatible with each other.
Raises:
TypeError: If any object in the list cannot be coerced to a list.
Returns:
A combined object, typically the same type as the first element in ``x``.
A :py:class:`~numpy.ndarray`, if the elements are a mix of dense and sparse objects.
A :py:class:`~list`, if one of the objects is a :py:class:`~list`.
| null | (*x: Any) |
29,909 | biocgenerics.show_as_cell | format_table | Pretty-print a table with wrapping columns.
Args:
columns: List of list of strings, where each inner list is the same length.
Strings are typically generated by :py:meth:`~show_as_cell`.
floating_names: List of strings to be added to the left of the table. This is
printed repeatedly for each set of wrapped columns.
sep: Separator between columns.
window: Size of the terminal window, in characters. We attempt to determine
this automatically, otherwise it is set to 150.
Returns:
str: String containing the pretty-printed table.
| def format_table(
columns: List[Sequence[str]],
floating_names: Optional[Sequence[str]] = None,
sep: str = " ",
window: Optional[int] = None,
) -> str:
"""Pretty-print a table with wrapping columns.
Args:
columns: List of list of strings, where each inner list is the same length.
Strings are typically generated by :py:meth:`~show_as_cell`.
floating_names: List of strings to be added to the left of the table. This is
printed repeatedly for each set of wrapped columns.
sep: Separator between columns.
window: Size of the terminal window, in characters. We attempt to determine
this automatically, otherwise it is set to 150.
Returns:
str: String containing the pretty-printed table.
"""
if window is None:
import os
try:
window = os.get_terminal_size().columns
except Exception as _:
window = 150
if len(columns) == 0:
raise ValueError("At least one column should be supplied in 'columns'.")
n = len(columns[0])
floatwidth = 0
if floating_names is not None:
floatwidth = _get_max_width(floating_names)
new_floating_names = []
for y in floating_names:
new_floating_names.append(y.rjust(floatwidth))
floating_names = new_floating_names
output = ""
def reinitialize():
if floating_names is None:
return [""] * n
else:
return floating_names[:]
contents = reinitialize()
init = True
used = floatwidth
for col in columns:
width = _get_max_width(col)
if not init and used + width + len(sep) > window:
for line in contents:
output += line + "\n"
contents = reinitialize()
init = True
used = floatwidth
for i, y in enumerate(col):
if used > 0:
contents[i] += sep
contents[i] += y.rjust(width)
used += width + len(sep)
init = False
output += "\n".join(contents)
return output
| (columns: List[Sequence[str]], floating_names: Optional[Sequence[str]] = None, sep: str = ' ', window: Optional[int] = None) -> str |
29,910 | biocgenerics.rownames | rownames | Access row names from 2-dimensional representations.
Args:
x: Any object.
Raises:
NotImplementedError: If ``x`` is not a supported type.
Returns:
List[str]: List of row names.
| null | (x) -> List[str] |
29,911 | biocgenerics.colnames | set_colnames | Set column names.
Args:
x: Any object.
names (List[str]): New names.
Raises:
NotImplementedError: if type is not supported.
Returns:
An object with the same type as ``x``.
| null | (x, names: List[str]) |
29,912 | biocgenerics.rownames | set_rownames | Set row names.
Args:
x (Any): supported object.
names (List[str]): New names.
Raises:
NotImplementedError: If ``x`` is not a supported type.
Returns:
An object with the same type as ``x``.
| null | (x: Any, names: List[str]) |
29,913 | biocgenerics.show_as_cell | show_as_cell | Show the contents of ``x`` as a cell of a table, typically for use in the ``__str__`` method of a class that
contains ``x``.
Args:
x:
Any object. By default, we assume that it can be treated as
a sequence, with a valid ``__getitem__`` method for an index.
indices:
List of indices to be extracted.
Returns:
List of strings of length equal to ``indices``, containing a
string summary of each of the specified elements of ``x``.
| null | (x: Any, indices: Sequence[int]) -> List[str] |
29,916 | gymnasium.envs.registration | register | Registers an environment in gymnasium with an ``id`` to use with :meth:`gymnasium.make` with the ``entry_point`` being a string or callable for creating the environment.
The ``id`` parameter corresponds to the name of the environment, with the syntax as follows:
``[namespace/](env_name)[-v(version)]`` where ``namespace`` and ``-v(version)`` is optional.
It takes arbitrary keyword arguments, which are passed to the :class:`EnvSpec` ``kwargs`` parameter.
Args:
id: The environment id
entry_point: The entry point for creating the environment
reward_threshold: The reward threshold considered for an agent to have learnt the environment
nondeterministic: If the environment is nondeterministic (even with knowledge of the initial seed and all actions, the same state cannot be reached)
max_episode_steps: The maximum number of episodes steps before truncation. Used by the :class:`gymnasium.wrappers.TimeLimit` wrapper if not ``None``.
order_enforce: If to enable the order enforcer wrapper to ensure users run functions in the correct order.
If ``True``, then the :class:`gymnasium.wrappers.OrderEnforcing` is applied to the environment.
autoreset: If to add the :class:`gymnasium.wrappers.AutoResetWrapper` such that on ``(terminated or truncated) is True``, :meth:`gymnasium.Env.reset` is called.
disable_env_checker: If to disable the :class:`gymnasium.wrappers.PassiveEnvChecker` to the environment.
apply_api_compatibility: If to apply the :class:`gymnasium.wrappers.StepAPICompatibility` wrapper to the environment.
Use if the environment is implemented in the gym v0.21 environment API.
additional_wrappers: Additional wrappers to apply the environment.
vector_entry_point: The entry point for creating the vector environment
**kwargs: arbitrary keyword arguments which are passed to the environment constructor on initialisation.
| def register(
id: str,
entry_point: EnvCreator | str | None = None,
reward_threshold: float | None = None,
nondeterministic: bool = False,
max_episode_steps: int | None = None,
order_enforce: bool = True,
autoreset: bool = False,
disable_env_checker: bool = False,
apply_api_compatibility: bool = False,
additional_wrappers: tuple[WrapperSpec, ...] = (),
vector_entry_point: VectorEnvCreator | str | None = None,
**kwargs: Any,
):
"""Registers an environment in gymnasium with an ``id`` to use with :meth:`gymnasium.make` with the ``entry_point`` being a string or callable for creating the environment.
The ``id`` parameter corresponds to the name of the environment, with the syntax as follows:
``[namespace/](env_name)[-v(version)]`` where ``namespace`` and ``-v(version)`` is optional.
It takes arbitrary keyword arguments, which are passed to the :class:`EnvSpec` ``kwargs`` parameter.
Args:
id: The environment id
entry_point: The entry point for creating the environment
reward_threshold: The reward threshold considered for an agent to have learnt the environment
nondeterministic: If the environment is nondeterministic (even with knowledge of the initial seed and all actions, the same state cannot be reached)
max_episode_steps: The maximum number of episodes steps before truncation. Used by the :class:`gymnasium.wrappers.TimeLimit` wrapper if not ``None``.
order_enforce: If to enable the order enforcer wrapper to ensure users run functions in the correct order.
If ``True``, then the :class:`gymnasium.wrappers.OrderEnforcing` is applied to the environment.
autoreset: If to add the :class:`gymnasium.wrappers.AutoResetWrapper` such that on ``(terminated or truncated) is True``, :meth:`gymnasium.Env.reset` is called.
disable_env_checker: If to disable the :class:`gymnasium.wrappers.PassiveEnvChecker` to the environment.
apply_api_compatibility: If to apply the :class:`gymnasium.wrappers.StepAPICompatibility` wrapper to the environment.
Use if the environment is implemented in the gym v0.21 environment API.
additional_wrappers: Additional wrappers to apply the environment.
vector_entry_point: The entry point for creating the vector environment
**kwargs: arbitrary keyword arguments which are passed to the environment constructor on initialisation.
"""
assert (
entry_point is not None or vector_entry_point is not None
), "Either `entry_point` or `vector_entry_point` (or both) must be provided"
global registry, current_namespace
ns, name, version = parse_env_id(id)
if current_namespace is not None:
if (
kwargs.get("namespace") is not None
and kwargs.get("namespace") != current_namespace
):
logger.warn(
f"Custom namespace `{kwargs.get('namespace')}` is being overridden by namespace `{current_namespace}`. "
f"If you are developing a plugin you shouldn't specify a namespace in `register` calls. "
"The namespace is specified through the entry point package metadata."
)
ns_id = current_namespace
else:
ns_id = ns
full_env_id = get_env_id(ns_id, name, version)
if autoreset is True:
logger.warn(
"`gymnasium.register(..., autoreset=True)` is deprecated and will be removed in v1.0. If users wish to use it then add the auto reset wrapper in the `addition_wrappers` argument."
)
new_spec = EnvSpec(
id=full_env_id,
entry_point=entry_point,
reward_threshold=reward_threshold,
nondeterministic=nondeterministic,
max_episode_steps=max_episode_steps,
order_enforce=order_enforce,
autoreset=autoreset,
disable_env_checker=disable_env_checker,
apply_api_compatibility=apply_api_compatibility,
**kwargs,
additional_wrappers=additional_wrappers,
vector_entry_point=vector_entry_point,
)
_check_spec_register(new_spec)
if new_spec.id in registry:
logger.warn(f"Overriding environment {new_spec.id} already in registry.")
registry[new_spec.id] = new_spec
| (id: str, entry_point: Union[gymnasium.envs.registration.EnvCreator, str, NoneType] = None, reward_threshold: Optional[float] = None, nondeterministic: bool = False, max_episode_steps: Optional[int] = None, order_enforce: bool = True, autoreset: bool = False, disable_env_checker: bool = False, apply_api_compatibility: bool = False, additional_wrappers: tuple[gymnasium.envs.registration.WrapperSpec, ...] = (), vector_entry_point: Union[gymnasium.envs.registration.VectorEnvCreator, str, NoneType] = None, **kwargs: Any) |
29,917 | zeroconf._exceptions | AbstractMethodException | Exception when a required method is not implemented. | class AbstractMethodException(Error):
"""Exception when a required method is not implemented."""
| null |
29,918 | zeroconf._exceptions | BadTypeInNameException | Exception when the type in a name is invalid. | class BadTypeInNameException(Error):
"""Exception when the type in a name is invalid."""
| null |
29,919 | zeroconf._dns | DNSAddress | A DNS address record | from zeroconf._dns import DNSAddress
| null |
29,920 | zeroconf._cache | DNSCache | A cache of DNS entries. | from zeroconf._cache import DNSCache
| null |
29,921 | zeroconf._dns | DNSEntry | A DNS entry | from zeroconf._dns import DNSEntry
| null |
29,922 | zeroconf._dns | DNSHinfo | A DNS host information record | from zeroconf._dns import DNSHinfo
| null |
29,923 | zeroconf._protocol.incoming | DNSIncoming | Object representation of an incoming DNS packet | from zeroconf._protocol.incoming import DNSIncoming
| null |
29,924 | zeroconf._dns | DNSNsec | A DNS NSEC record | from zeroconf._dns import DNSNsec
| null |
29,925 | zeroconf._protocol.outgoing | DNSOutgoing | Object representation of an outgoing packet | from zeroconf._protocol.outgoing import DNSOutgoing
| null |
29,926 | zeroconf._dns | DNSPointer | A DNS pointer record | from zeroconf._dns import DNSPointer
| null |
29,927 | zeroconf._dns | DNSQuestion | A DNS question entry | from zeroconf._dns import DNSQuestion
| null |
29,928 | zeroconf._dns | DNSQuestionType | An MDNS question type.
"QU" - questions requesting unicast responses
"QM" - questions requesting multicast responses
https://datatracker.ietf.org/doc/html/rfc6762#section-5.4
| from zeroconf._dns import DNSQuestionType
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
29,929 | zeroconf._dns | DNSRecord | A DNS record - like a DNS entry, but has a TTL | from zeroconf._dns import DNSRecord
| null |
29,930 | zeroconf._dns | DNSService | A DNS service record | from zeroconf._dns import DNSService
| null |
29,931 | zeroconf._dns | DNSText | A DNS text record | from zeroconf._dns import DNSText
| null |
29,932 | zeroconf._exceptions | Error | Base class for all zeroconf exceptions. | class Error(Exception):
"""Base class for all zeroconf exceptions."""
| null |
29,933 | zeroconf._exceptions | EventLoopBlocked | Exception when the event loop is blocked.
This exception is never expected to be thrown
during normal operation. It should only happen
when the cpu is maxed out or there is something blocking
the event loop.
| class EventLoopBlocked(Error):
"""Exception when the event loop is blocked.
This exception is never expected to be thrown
during normal operation. It should only happen
when the cpu is maxed out or there is something blocking
the event loop.
"""
| null |
29,934 | zeroconf._utils.net | IPVersion | An enumeration. | class IPVersion(enum.Enum):
V4Only = 1
V6Only = 2
All = 3
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
29,935 | zeroconf._exceptions | IncomingDecodeError | Exception when there is invalid data in an incoming packet. | class IncomingDecodeError(Error):
"""Exception when there is invalid data in an incoming packet."""
| null |
29,936 | zeroconf._utils.net | InterfaceChoice | An enumeration. | class InterfaceChoice(enum.Enum):
Default = 1
All = 2
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
29,937 | zeroconf._exceptions | NamePartTooLongException | Exception when the name is too long. | class NamePartTooLongException(Error):
"""Exception when the name is too long."""
| null |
29,938 | zeroconf._exceptions | NonUniqueNameException | Exception when the name is already registered. | class NonUniqueNameException(Error):
"""Exception when the name is already registered."""
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.