index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
22,689
salem.sio
read_shapefile
Reads a shapefile using geopandas. For convenience, it adds four columns to the dataframe: [min_x, max_x, min_y, max_y] Because reading a shapefile can take a long time, Salem provides a caching utility (cached=True). This will save a pickle of the shapefile in the cache directory.
def read_shapefile(fpath, cached=False): """Reads a shapefile using geopandas. For convenience, it adds four columns to the dataframe: [min_x, max_x, min_y, max_y] Because reading a shapefile can take a long time, Salem provides a caching utility (cached=True). This will save a pickle of the shapefile in the cache directory. """ import geopandas as gpd _, ext = os.path.splitext(fpath) if ext.lower() in ['.shp', '.p']: if cached: cpath = cached_shapefile_path(fpath) # unpickle if cached, read and pickle if not if os.path.exists(cpath): with open(cpath, 'rb') as f: out = pickle.load(f) else: out = read_shapefile(fpath, cached=False) with open(cpath, 'wb') as f: pickle.dump(out, f) else: out = gpd.read_file(fpath) out['min_x'] = [g.bounds[0] for g in out.geometry] out['max_x'] = [g.bounds[2] for g in out.geometry] out['min_y'] = [g.bounds[1] for g in out.geometry] out['max_y'] = [g.bounds[3] for g in out.geometry] else: raise ValueError('File extension not recognised: {}'.format(ext)) return out
(fpath, cached=False)
22,690
salem.sio
read_shapefile_to_grid
Same as read_shapefile but directly transformed to a grid. The whole thing is cached so that the second call will will be much faster. Parameters ---------- fpath: path to the file grid: the arrival grid
def read_shapefile_to_grid(fpath, grid): """Same as read_shapefile but directly transformed to a grid. The whole thing is cached so that the second call will will be much faster. Parameters ---------- fpath: path to the file grid: the arrival grid """ # ensure it is a cached pickle (copy code smell) shape_cpath = cached_shapefile_path(fpath) if not os.path.exists(shape_cpath): out = read_shapefile(fpath, cached=False) with open(shape_cpath, 'wb') as f: pickle.dump(out, f) return _memory_shapefile_to_grid(shape_cpath, grid=grid, **grid.to_dict())
(fpath, grid)
22,691
salem.utils
reduce
Reduces an array's size by a given factor. The reduction can be done by any reduction function (default is mean). Parameters ---------- arr : ndarray an array of at least 2 dimensions (the reduction is done on the two last dimensions). factor : int the factor to apply for reduction (must be a divider of the original axis dimension!). how : func the reduction function Returns ------- the reduced array
def reduce(arr, factor=1, how=np.mean): """Reduces an array's size by a given factor. The reduction can be done by any reduction function (default is mean). Parameters ---------- arr : ndarray an array of at least 2 dimensions (the reduction is done on the two last dimensions). factor : int the factor to apply for reduction (must be a divider of the original axis dimension!). how : func the reduction function Returns ------- the reduced array """ arr = np.asarray(arr) shape = list(arr.shape) newshape = shape[:-2] + [np.round(shape[-2] / factor).astype(int), factor, np.round(shape[-1] / factor).astype(int), factor] return how(how(arr.reshape(*newshape), axis=len(newshape)-3), axis=len(newshape)-2)
(arr, factor=1, how=<function mean at 0x7f9b3c8b9c70>)
22,694
salem.gis
transform_geometry
Reprojects a shapely geometry. Parameters ---------- geom : shapely geometry the geometry to transform crs : crs the geometry's crs to_crs : crs the crs into which the geometry must be transformed Returns ------- A reprojected geometry
def transform_geometry(geom, crs=wgs84, to_crs=wgs84): """Reprojects a shapely geometry. Parameters ---------- geom : shapely geometry the geometry to transform crs : crs the geometry's crs to_crs : crs the crs into which the geometry must be transformed Returns ------- A reprojected geometry """ from_crs = check_crs(crs) to_crs = check_crs(to_crs) if isinstance(to_crs, pyproj.Proj) and isinstance(from_crs, pyproj.Proj): project = partial(transform_proj, from_crs, to_crs) elif isinstance(to_crs, Grid): project = partial(to_crs.transform, crs=from_crs) elif isinstance(from_crs, Grid): project = partial(from_crs.ij_to_crs, crs=to_crs) else: raise NotImplementedError() from shapely.ops import transform return transform(project, geom)
(geom, crs=<Other Coordinate Operation Transformer: longlat> Description: PROJ-based coordinate operation Area of Use: - undefined, to_crs=<Other Coordinate Operation Transformer: longlat> Description: PROJ-based coordinate operation Area of Use: - undefined)
22,695
salem.gis
transform_geopandas
Reprojects a geopandas dataframe. Parameters ---------- gdf : geopandas.DataFrame the dataframe to transform (must have a crs attribute) from_crs : crs if gdf has no crs attribute (happens when the crs is a salem grid) to_crs : crs the crs into which the dataframe must be transformed inplace : bool the original dataframe will be overwritten (default: False) Returns ------- A projected dataframe
def transform_geopandas(gdf, from_crs=None, to_crs=wgs84, inplace=False): """Reprojects a geopandas dataframe. Parameters ---------- gdf : geopandas.DataFrame the dataframe to transform (must have a crs attribute) from_crs : crs if gdf has no crs attribute (happens when the crs is a salem grid) to_crs : crs the crs into which the dataframe must be transformed inplace : bool the original dataframe will be overwritten (default: False) Returns ------- A projected dataframe """ from shapely.ops import transform import geopandas as gpd if from_crs is None: from_crs = check_crs(gdf.crs) else: from_crs = check_crs(from_crs) to_crs = check_crs(to_crs) if inplace: out = gdf else: out = gdf.copy() if isinstance(to_crs, pyproj.Proj) and isinstance(from_crs, pyproj.Proj): project = partial(transform_proj, from_crs, to_crs) elif isinstance(to_crs, Grid): project = partial(to_crs.transform, crs=from_crs) elif isinstance(from_crs, Grid): project = partial(from_crs.ij_to_crs, crs=to_crs) else: raise NotImplementedError() # Do the job and set the new attributes result = out.geometry.apply(lambda geom: transform(project, geom)) result.__class__ = gpd.GeoSeries if isinstance(to_crs, pyproj.Proj): to_crs = to_crs.srs elif isinstance(to_crs, Grid): to_crs = None result.crs = to_crs out.geometry = result out.crs = to_crs out['min_x'] = [g.bounds[0] for g in out.geometry] out['max_x'] = [g.bounds[2] for g in out.geometry] out['min_y'] = [g.bounds[1] for g in out.geometry] out['max_y'] = [g.bounds[3] for g in out.geometry] return out
(gdf, from_crs=None, to_crs=<Other Coordinate Operation Transformer: longlat> Description: PROJ-based coordinate operation Area of Use: - undefined, inplace=False)
22,696
salem.gis
transform_proj
Wrapper around the pyproj.transform function. Transform points between two coordinate systems defined by the Proj instances p1 and p2. When two projections are equal, this function avoids quite a bunch of useless calculations. See https://github.com/jswhit/pyproj/issues/15 Parameters ---------- p1 : pyproj.Proj projection associated to x and y p2 : pyproj.Proj projection into which x, y must be transformed x : ndarray eastings y : ndarray northings nocopy : bool in case the two projections are equal, you can use nocopy if you wish
def transform_proj(p1, p2, x, y, nocopy=False): """Wrapper around the pyproj.transform function. Transform points between two coordinate systems defined by the Proj instances p1 and p2. When two projections are equal, this function avoids quite a bunch of useless calculations. See https://github.com/jswhit/pyproj/issues/15 Parameters ---------- p1 : pyproj.Proj projection associated to x and y p2 : pyproj.Proj projection into which x, y must be transformed x : ndarray eastings y : ndarray northings nocopy : bool in case the two projections are equal, you can use nocopy if you wish """ try: # This always makes a copy, even if projections are equivalent return _transform_internal(p1, p2, x, y, always_xy=True) except TypeError: if proj_is_same(p1, p2): if nocopy: return x, y else: return copy.deepcopy(x), copy.deepcopy(y) return _transform_internal(p1, p2, x, y)
(p1, p2, x, y, nocopy=False)
22,697
urllib.request
urlopen
Open the URL url, which can be either a string or a Request object. *data* must be an object specifying additional data to be sent to the server, or None if no such data is needed. See Request for details. urllib.request module uses HTTP/1.1 and includes a "Connection:close" header in its HTTP requests. The optional *timeout* parameter specifies a timeout in seconds for blocking operations like the connection attempt (if not specified, the global default timeout setting will be used). This only works for HTTP, HTTPS and FTP connections. If *context* is specified, it must be a ssl.SSLContext instance describing the various SSL options. See HTTPSConnection for more details. The optional *cafile* and *capath* parameters specify a set of trusted CA certificates for HTTPS requests. cafile should point to a single file containing a bundle of CA certificates, whereas capath should point to a directory of hashed certificate files. More information can be found in ssl.SSLContext.load_verify_locations(). The *cadefault* parameter is ignored. This function always returns an object which can work as a context manager and has the properties url, headers, and status. See urllib.response.addinfourl for more detail on these properties. For HTTP and HTTPS URLs, this function returns a http.client.HTTPResponse object slightly modified. In addition to the three new methods above, the msg attribute contains the same information as the reason attribute --- the reason phrase returned by the server --- instead of the response headers as it is specified in the documentation for HTTPResponse. For FTP, file, and data URLs and requests explicitly handled by legacy URLopener and FancyURLopener classes, this function returns a urllib.response.addinfourl object. Note that None may be returned if no handler handles the request (though the default installed global OpenerDirector uses UnknownHandler to ensure this never happens). In addition, if proxy settings are detected (for example, when a *_proxy environment variable like http_proxy is set), ProxyHandler is default installed and makes sure the requests are handled through the proxy.
def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, *, cafile=None, capath=None, cadefault=False, context=None): '''Open the URL url, which can be either a string or a Request object. *data* must be an object specifying additional data to be sent to the server, or None if no such data is needed. See Request for details. urllib.request module uses HTTP/1.1 and includes a "Connection:close" header in its HTTP requests. The optional *timeout* parameter specifies a timeout in seconds for blocking operations like the connection attempt (if not specified, the global default timeout setting will be used). This only works for HTTP, HTTPS and FTP connections. If *context* is specified, it must be a ssl.SSLContext instance describing the various SSL options. See HTTPSConnection for more details. The optional *cafile* and *capath* parameters specify a set of trusted CA certificates for HTTPS requests. cafile should point to a single file containing a bundle of CA certificates, whereas capath should point to a directory of hashed certificate files. More information can be found in ssl.SSLContext.load_verify_locations(). The *cadefault* parameter is ignored. This function always returns an object which can work as a context manager and has the properties url, headers, and status. See urllib.response.addinfourl for more detail on these properties. For HTTP and HTTPS URLs, this function returns a http.client.HTTPResponse object slightly modified. In addition to the three new methods above, the msg attribute contains the same information as the reason attribute --- the reason phrase returned by the server --- instead of the response headers as it is specified in the documentation for HTTPResponse. For FTP, file, and data URLs and requests explicitly handled by legacy URLopener and FancyURLopener classes, this function returns a urllib.response.addinfourl object. Note that None may be returned if no handler handles the request (though the default installed global OpenerDirector uses UnknownHandler to ensure this never happens). In addition, if proxy settings are detected (for example, when a *_proxy environment variable like http_proxy is set), ProxyHandler is default installed and makes sure the requests are handled through the proxy. ''' global _opener if cafile or capath or cadefault: import warnings warnings.warn("cafile, capath and cadefault are deprecated, use a " "custom context instead.", DeprecationWarning, 2) if context is not None: raise ValueError( "You can't pass both context and any of cafile, capath, and " "cadefault" ) if not _have_ssl: raise ValueError('SSL support not available') context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=cafile, capath=capath) # send ALPN extension to indicate HTTP/1.1 protocol context.set_alpn_protocols(['http/1.1']) https_handler = HTTPSHandler(context=context) opener = build_opener(https_handler) elif context: https_handler = HTTPSHandler(context=context) opener = build_opener(https_handler) elif _opener is None: _opener = opener = build_opener() else: opener = _opener return opener.open(url, data, timeout)
(url, data=None, timeout=<object object at 0x7f9b3fe2ce50>, *, cafile=None, capath=None, cadefault=False, context=None)
22,704
geckodriver_autoinstaller
get_firefox_version
Get installed version of chrome on client :return: The version of chrome
def get_firefox_version(): """ Get installed version of chrome on client :return: The version of chrome """ return utils.get_firefox_version()
()
22,705
geckodriver_autoinstaller
install
Appends the directory of the geckodriver binary file to PATH. :param cwd: Flag indicating whether to download to current working directory :return: The file path of geckodriver
def install(cwd=False): """ Appends the directory of the geckodriver binary file to PATH. :param cwd: Flag indicating whether to download to current working directory :return: The file path of geckodriver """ geckodriver_filepath = utils.download_geckodriver(cwd) if not geckodriver_filepath: logging.debug('Can not download geckodriver.') return geckodriver_dir = os.path.dirname(geckodriver_filepath) if 'PATH' not in os.environ: os.environ['PATH'] = geckodriver_dir elif geckodriver_dir not in os.environ['PATH']: os.environ['PATH'] = geckodriver_dir + utils.get_variable_separator() + os.environ['PATH'] return geckodriver_filepath
(cwd=False)
22,709
pyproject_api._frontend
BackendFailed
An error of the build backend.
class BackendFailed(RuntimeError): # noqa: N818 """An error of the build backend.""" def __init__(self, result: dict[str, Any], out: str, err: str) -> None: super().__init__() #: standard output collected while running the command self.out = out #: standard error collected while running the command self.err = err #: exit code of the command self.code: int = result.get("code", -2) #: the type of exception thrown self.exc_type: str = result.get("exc_type", "missing Exception type") #: the string representation of the exception thrown self.exc_msg: str = result.get("exc_msg", "missing Exception message") def __str__(self) -> str: return ( f"packaging backend failed{'' if self.code is None else f' (code={self.code})'}, " f"with {self.exc_type}: {self.exc_msg}\n{self.err}{self.out}" ).rstrip() def __repr__(self) -> str: return ( f"{self.__class__.__name__}(" f"result=dict(code={self.code}, exc_type={self.exc_type!r},exc_msg={self.exc_msg!r})," f" out={self.out!r}, err={self.err!r})" )
(result: 'dict[str, Any]', out: 'str', err: 'str') -> 'None'
22,710
pyproject_api._frontend
__init__
null
def __init__(self, result: dict[str, Any], out: str, err: str) -> None: super().__init__() #: standard output collected while running the command self.out = out #: standard error collected while running the command self.err = err #: exit code of the command self.code: int = result.get("code", -2) #: the type of exception thrown self.exc_type: str = result.get("exc_type", "missing Exception type") #: the string representation of the exception thrown self.exc_msg: str = result.get("exc_msg", "missing Exception message")
(self, result: dict[str, typing.Any], out: str, err: str) -> NoneType
22,711
pyproject_api._frontend
__repr__
null
def __repr__(self) -> str: return ( f"{self.__class__.__name__}(" f"result=dict(code={self.code}, exc_type={self.exc_type!r},exc_msg={self.exc_msg!r})," f" out={self.out!r}, err={self.err!r})" )
(self) -> str
22,712
pyproject_api._frontend
__str__
null
def __str__(self) -> str: return ( f"packaging backend failed{'' if self.code is None else f' (code={self.code})'}, " f"with {self.exc_type}: {self.exc_msg}\n{self.err}{self.out}" ).rstrip()
(self) -> str
22,713
pyproject_api._frontend
CmdStatus
null
class CmdStatus(ABC): @property @abstractmethod def done(self) -> bool: """:return: truthful when the command finished running""" raise NotImplementedError @abstractmethod def out_err(self) -> tuple[str, str]: """:return: standard output and standard error text""" raise NotImplementedError
()
22,714
pyproject_api._frontend
out_err
:return: standard output and standard error text
@abstractmethod def out_err(self) -> tuple[str, str]: """:return: standard output and standard error text""" raise NotImplementedError
(self) -> tuple[str, str]
22,715
pyproject_api._frontend
EditableResult
Information collected while building an editable wheel.
class EditableResult(NamedTuple): """Information collected while building an editable wheel.""" #: path to the built wheel artifact wheel: Path #: backend standard output while building the wheel out: str #: backend standard error while building the wheel err: str
(wheel: pathlib.Path, out: str, err: str)
22,717
namedtuple_EditableResult
__new__
Create new instance of EditableResult(wheel, out, err)
from builtins import function
(_cls, wheel: ForwardRef('Path'), out: ForwardRef('str'), err: ForwardRef('str'))
22,720
collections
_replace
Return a new EditableResult object replacing specified fields with new values
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None): """Returns a new subclass of tuple with named fields. >>> Point = namedtuple('Point', ['x', 'y']) >>> Point.__doc__ # docstring for the new class 'Point(x, y)' >>> p = Point(11, y=22) # instantiate with positional args or keywords >>> p[0] + p[1] # indexable like a plain tuple 33 >>> x, y = p # unpack like a regular tuple >>> x, y (11, 22) >>> p.x + p.y # fields also accessible by name 33 >>> d = p._asdict() # convert to a dictionary >>> d['x'] 11 >>> Point(**d) # convert from a dictionary Point(x=11, y=22) >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields Point(x=100, y=22) """ # Validate the field names. At the user's option, either generate an error # message or automatically replace the field name with a valid name. if isinstance(field_names, str): field_names = field_names.replace(',', ' ').split() field_names = list(map(str, field_names)) typename = _sys.intern(str(typename)) if rename: seen = set() for index, name in enumerate(field_names): if (not name.isidentifier() or _iskeyword(name) or name.startswith('_') or name in seen): field_names[index] = f'_{index}' seen.add(name) for name in [typename] + field_names: if type(name) is not str: raise TypeError('Type names and field names must be strings') if not name.isidentifier(): raise ValueError('Type names and field names must be valid ' f'identifiers: {name!r}') if _iskeyword(name): raise ValueError('Type names and field names cannot be a ' f'keyword: {name!r}') seen = set() for name in field_names: if name.startswith('_') and not rename: raise ValueError('Field names cannot start with an underscore: ' f'{name!r}') if name in seen: raise ValueError(f'Encountered duplicate field name: {name!r}') seen.add(name) field_defaults = {} if defaults is not None: defaults = tuple(defaults) if len(defaults) > len(field_names): raise TypeError('Got more default values than field names') field_defaults = dict(reversed(list(zip(reversed(field_names), reversed(defaults))))) # Variables used in the methods and docstrings field_names = tuple(map(_sys.intern, field_names)) num_fields = len(field_names) arg_list = ', '.join(field_names) if num_fields == 1: arg_list += ',' repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')' tuple_new = tuple.__new__ _dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip # Create all the named tuple methods to be added to the class namespace namespace = { '_tuple_new': tuple_new, '__builtins__': {}, '__name__': f'namedtuple_{typename}', } code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))' __new__ = eval(code, namespace) __new__.__name__ = '__new__' __new__.__doc__ = f'Create new instance of {typename}({arg_list})' if defaults is not None: __new__.__defaults__ = defaults @classmethod def _make(cls, iterable): result = tuple_new(cls, iterable) if _len(result) != num_fields: raise TypeError(f'Expected {num_fields} arguments, got {len(result)}') return result _make.__func__.__doc__ = (f'Make a new {typename} object from a sequence ' 'or iterable') def _replace(self, /, **kwds): result = self._make(_map(kwds.pop, field_names, self)) if kwds: raise ValueError(f'Got unexpected field names: {list(kwds)!r}') return result _replace.__doc__ = (f'Return a new {typename} object replacing specified ' 'fields with new values') def __repr__(self): 'Return a nicely formatted representation string' return self.__class__.__name__ + repr_fmt % self def _asdict(self): 'Return a new dict which maps field names to their values.' return _dict(_zip(self._fields, self)) def __getnewargs__(self): 'Return self as a plain tuple. Used by copy and pickle.' return _tuple(self) # Modify function metadata to help with introspection and debugging for method in ( __new__, _make.__func__, _replace, __repr__, _asdict, __getnewargs__, ): method.__qualname__ = f'{typename}.{method.__name__}' # Build-up the class namespace dictionary # and use type() to build the result class class_namespace = { '__doc__': f'{typename}({arg_list})', '__slots__': (), '_fields': field_names, '_field_defaults': field_defaults, '__new__': __new__, '_make': _make, '_replace': _replace, '__repr__': __repr__, '_asdict': _asdict, '__getnewargs__': __getnewargs__, '__match_args__': field_names, } for index, name in enumerate(field_names): doc = _sys.intern(f'Alias for field number {index}') class_namespace[name] = _tuplegetter(index, doc) result = type(typename, (tuple,), class_namespace) # For pickling to work, the __module__ variable needs to be set to the frame # where the named tuple is created. Bypass this step in environments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython), or where the user has # specified a particular module. if module is None: try: module = _sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass if module is not None: result.__module__ = module return result
(self, /, **kwds)
22,721
pyproject_api._frontend
Frontend
Abstract base class for a pyproject frontend.
class Frontend(ABC): """Abstract base class for a pyproject frontend.""" #: backend key when the ``pyproject.toml`` does not specify it LEGACY_BUILD_BACKEND: str = "setuptools.build_meta:__legacy__" #: backend requirements when the ``pyproject.toml`` does not specify it LEGACY_REQUIRES: tuple[Requirement, ...] = (Requirement("setuptools >= 40.8.0"), Requirement("wheel")) def __init__( # noqa: PLR0913 self, root: Path, backend_paths: tuple[Path, ...], backend_module: str, backend_obj: str | None, requires: tuple[Requirement, ...], reuse_backend: bool = True, # noqa: FBT001, FBT002 ) -> None: """ Create a new frontend. :param root: the root path of the project :param backend_paths: paths to provision as available to import from for the build backend :param backend_module: the module where the backend lives :param backend_obj: the backend object key (will be lookup up within the backend module) :param requires: build requirements for the backend :param reuse_backend: a flag indicating if the communication channel should be kept alive between messages """ self._root = root self._backend_paths = backend_paths self._backend_module = backend_module self._backend_obj = backend_obj self.requires: tuple[Requirement, ...] = requires self._reuse_backend = reuse_backend self._optional_hooks: OptionalHooks | None = None @classmethod def create_args_from_folder( cls, folder: Path, ) -> tuple[Path, tuple[Path, ...], str, str | None, tuple[Requirement, ...], bool]: """ Frontend creation arguments from a python project folder (thould have a ``pypyproject.toml`` file per PEP-518). :param folder: the python project folder :return: the frontend creation args E.g., to create a frontend from a python project folder: .. code:: python frontend = Frontend(*Frontend.create_args_from_folder(project_folder)) """ py_project_toml = folder / "pyproject.toml" if py_project_toml.exists(): with py_project_toml.open("rb") as file_handler: py_project = tomllib.load(file_handler) build_system = py_project.get("build-system", {}) if "backend-path" in build_system: backend_paths: tuple[Path, ...] = tuple(folder / p for p in build_system["backend-path"]) else: backend_paths = () if "requires" in build_system: requires: tuple[Requirement, ...] = tuple(Requirement(r) for r in build_system.get("requires")) else: requires = cls.LEGACY_REQUIRES build_backend = build_system.get("build-backend", cls.LEGACY_BUILD_BACKEND) else: backend_paths = () requires = cls.LEGACY_REQUIRES build_backend = cls.LEGACY_BUILD_BACKEND paths = build_backend.split(":") backend_module: str = paths[0] backend_obj: str | None = paths[1] if len(paths) > 1 else None return folder, backend_paths, backend_module, backend_obj, requires, True @property def backend(self) -> str: """:return: backend key""" return f"{self._backend_module}{f':{self._backend_obj}' if self._backend_obj else ''}" @property def backend_args(self) -> list[str]: """:return: startup arguments for a backend""" result: list[str] = [str(_HERE / "_backend.py"), str(self._reuse_backend), self._backend_module] if self._backend_obj: result.append(self._backend_obj) return result @property def optional_hooks(self) -> OptionalHooks: """:return: a dictionary indicating if the optional hook is supported or not""" if self._optional_hooks is None: result, _, __ = self._send("_optional_hooks") self._optional_hooks = result return self._optional_hooks def get_requires_for_build_sdist(self, config_settings: ConfigSettings | None = None) -> RequiresBuildSdistResult: """ Get build requirements for a source distribution (per PEP-517). :param config_settings: run arguments :return: outcome """ if self.optional_hooks["get_requires_for_build_sdist"]: result, out, err = self._send(cmd="get_requires_for_build_sdist", config_settings=config_settings) else: result, out, err = [], "", "" if not isinstance(result, list) or not all(isinstance(i, str) for i in result): self._unexpected_response("get_requires_for_build_sdist", result, "list of string", out, err) return RequiresBuildSdistResult(tuple(Requirement(r) for r in cast(List[str], result)), out, err) def get_requires_for_build_wheel(self, config_settings: ConfigSettings | None = None) -> RequiresBuildWheelResult: """ Get build requirements for a wheel (per PEP-517). :param config_settings: run arguments :return: outcome """ if self.optional_hooks["get_requires_for_build_wheel"]: result, out, err = self._send(cmd="get_requires_for_build_wheel", config_settings=config_settings) else: result, out, err = [], "", "" if not isinstance(result, list) or not all(isinstance(i, str) for i in result): self._unexpected_response("get_requires_for_build_wheel", result, "list of string", out, err) return RequiresBuildWheelResult(tuple(Requirement(r) for r in cast(List[str], result)), out, err) def get_requires_for_build_editable( self, config_settings: ConfigSettings | None = None, ) -> RequiresBuildEditableResult: """ Get build requirements for an editable wheel build (per PEP-660). :param config_settings: run arguments :return: outcome """ if self.optional_hooks["get_requires_for_build_editable"]: result, out, err = self._send(cmd="get_requires_for_build_editable", config_settings=config_settings) else: result, out, err = [], "", "" if not isinstance(result, list) or not all(isinstance(i, str) for i in result): self._unexpected_response("get_requires_for_build_editable", result, "list of string", out, err) return RequiresBuildEditableResult(tuple(Requirement(r) for r in cast(List[str], result)), out, err) def prepare_metadata_for_build_wheel( self, metadata_directory: Path, config_settings: ConfigSettings | None = None, ) -> MetadataForBuildWheelResult | None: """ Build wheel metadata (per PEP-517). :param metadata_directory: where to generate the metadata :param config_settings: build arguments :return: metadata generation result """ self._check_metadata_dir(metadata_directory) basename: str | None = None if self.optional_hooks["prepare_metadata_for_build_wheel"]: basename, out, err = self._send( cmd="prepare_metadata_for_build_wheel", metadata_directory=metadata_directory, config_settings=config_settings, ) if basename is None: return None if not isinstance(basename, str): self._unexpected_response("prepare_metadata_for_build_wheel", basename, str, out, err) return MetadataForBuildWheelResult(metadata_directory / basename, out, err) def _check_metadata_dir(self, metadata_directory: Path) -> None: if metadata_directory == self._root: msg = f"the project root and the metadata directory can't be the same {self._root}" raise RuntimeError(msg) if metadata_directory.exists(): # start with fresh ensure_empty_dir(metadata_directory) metadata_directory.mkdir(parents=True, exist_ok=True) def prepare_metadata_for_build_editable( self, metadata_directory: Path, config_settings: ConfigSettings | None = None, ) -> MetadataForBuildEditableResult | None: """ Build editable wheel metadata (per PEP-660). :param metadata_directory: where to generate the metadata :param config_settings: build arguments :return: metadata generation result """ self._check_metadata_dir(metadata_directory) basename: str | None = None if self.optional_hooks["prepare_metadata_for_build_editable"]: basename, out, err = self._send( cmd="prepare_metadata_for_build_editable", metadata_directory=metadata_directory, config_settings=config_settings, ) if basename is None: return None if not isinstance(basename, str): self._unexpected_response("prepare_metadata_for_build_wheel", basename, str, out, err) result = metadata_directory / basename return MetadataForBuildEditableResult(result, out, err) def build_sdist(self, sdist_directory: Path, config_settings: ConfigSettings | None = None) -> SdistResult: """ Build a source distribution (per PEP-517). :param sdist_directory: the folder where to build the source distribution :param config_settings: build arguments :return: source distribution build result """ sdist_directory.mkdir(parents=True, exist_ok=True) basename, out, err = self._send( cmd="build_sdist", sdist_directory=sdist_directory, config_settings=config_settings, ) if not isinstance(basename, str): self._unexpected_response("build_sdist", basename, str, out, err) return SdistResult(sdist_directory / basename, out, err) def build_wheel( self, wheel_directory: Path, config_settings: ConfigSettings | None = None, metadata_directory: Path | None = None, ) -> WheelResult: """ Build a wheel file (per PEP-517). :param wheel_directory: the folder where to build the wheel :param config_settings: build arguments :param metadata_directory: wheel metadata folder :return: wheel build result """ wheel_directory.mkdir(parents=True, exist_ok=True) basename, out, err = self._send( cmd="build_wheel", wheel_directory=wheel_directory, config_settings=config_settings, metadata_directory=metadata_directory, ) if not isinstance(basename, str): self._unexpected_response("build_wheel", basename, str, out, err) return WheelResult(wheel_directory / basename, out, err) def build_editable( self, wheel_directory: Path, config_settings: ConfigSettings | None = None, metadata_directory: Path | None = None, ) -> EditableResult: """ Build an editable wheel file (per PEP-660). :param wheel_directory: the folder where to build the editable wheel :param config_settings: build arguments :param metadata_directory: wheel metadata folder :return: wheel build result """ wheel_directory.mkdir(parents=True, exist_ok=True) basename, out, err = self._send( cmd="build_editable", wheel_directory=wheel_directory, config_settings=config_settings, metadata_directory=metadata_directory, ) if not isinstance(basename, str): self._unexpected_response("build_editable", basename, str, out, err) return EditableResult(wheel_directory / basename, out, err) def _unexpected_response( # noqa: PLR0913 self, cmd: str, got: Any, expected_type: Any, out: str, err: str, ) -> NoReturn: msg = f"{cmd!r} on {self.backend!r} returned {got!r} but expected type {expected_type!r}" raise BackendFailed({"code": None, "exc_type": TypeError.__name__, "exc_msg": msg}, out, err) def metadata_from_built( self, metadata_directory: Path, target: Literal["wheel", "editable"], config_settings: ConfigSettings | None = None, ) -> tuple[Path, str, str]: """ Create metadata from building the wheel (use when the prepare endpoints are not present or don't work). :param metadata_directory: directory where to put the metadata :param target: the type of wheel metadata to build :param config_settings: config settings to pass in to the build endpoint :return: """ hook = getattr(self, f"build_{target}") with self._wheel_directory() as wheel_directory: result: EditableResult | WheelResult = hook(wheel_directory, config_settings) wheel = result.wheel if not wheel.exists(): msg = f"missing wheel file return by backed {wheel!r}" raise RuntimeError(msg) out, err = result.out, result.err extract_to = str(metadata_directory) basename = None with ZipFile(str(wheel), "r") as zip_file: for name in zip_file.namelist(): # pragma: no branch root = Path(name).parts[0] if root.endswith(".dist-info"): basename = root zip_file.extract(name, extract_to) if basename is None: # pragma: no branch msg = f"no .dist-info found inside generated wheel {wheel}" raise RuntimeError(msg) return metadata_directory / basename, out, err @contextmanager def _wheel_directory(self) -> Iterator[Path]: with TemporaryDirectory() as wheel_directory: yield Path(wheel_directory) def _send(self, cmd: str, **kwargs: Any) -> tuple[Any, str, str]: with NamedTemporaryFile(prefix=f"pep517_{cmd}-") as result_file_marker: result_file = Path(result_file_marker.name).with_suffix(".json") msg = json.dumps( { "cmd": cmd, "kwargs": {k: (str(v) if isinstance(v, Path) else v) for k, v in kwargs.items()}, "result": str(result_file), }, ) with self._send_msg(cmd, result_file, msg) as status: while not status.done: # pragma: no branch sleep(0.001) # wait a bit for things to happen if result_file.exists(): try: with result_file.open("rt") as result_handler: result = json.load(result_handler) finally: result_file.unlink() else: result = { "code": 1, "exc_type": "RuntimeError", "exc_msg": f"Backend response file {result_file} is missing", } out, err = status.out_err() if "return" in result: return result["return"], out, err raise BackendFailed(result, out, err) @abstractmethod @contextmanager def _send_msg(self, cmd: str, result_file: Path, msg: str) -> Iterator[CmdStatus]: raise NotImplementedError
(root: 'Path', backend_paths: 'tuple[Path, ...]', backend_module: 'str', backend_obj: 'str | None', requires: 'tuple[Requirement, ...]', reuse_backend: 'bool' = True) -> 'None'
22,722
pyproject_api._frontend
__init__
Create a new frontend. :param root: the root path of the project :param backend_paths: paths to provision as available to import from for the build backend :param backend_module: the module where the backend lives :param backend_obj: the backend object key (will be lookup up within the backend module) :param requires: build requirements for the backend :param reuse_backend: a flag indicating if the communication channel should be kept alive between messages
def __init__( # noqa: PLR0913 self, root: Path, backend_paths: tuple[Path, ...], backend_module: str, backend_obj: str | None, requires: tuple[Requirement, ...], reuse_backend: bool = True, # noqa: FBT001, FBT002 ) -> None: """ Create a new frontend. :param root: the root path of the project :param backend_paths: paths to provision as available to import from for the build backend :param backend_module: the module where the backend lives :param backend_obj: the backend object key (will be lookup up within the backend module) :param requires: build requirements for the backend :param reuse_backend: a flag indicating if the communication channel should be kept alive between messages """ self._root = root self._backend_paths = backend_paths self._backend_module = backend_module self._backend_obj = backend_obj self.requires: tuple[Requirement, ...] = requires self._reuse_backend = reuse_backend self._optional_hooks: OptionalHooks | None = None
(self, root: pathlib.Path, backend_paths: tuple[pathlib.Path, ...], backend_module: str, backend_obj: str | None, requires: tuple[packaging.requirements.Requirement, ...], reuse_backend: bool = True) -> NoneType
22,723
pyproject_api._frontend
_check_metadata_dir
null
def _check_metadata_dir(self, metadata_directory: Path) -> None: if metadata_directory == self._root: msg = f"the project root and the metadata directory can't be the same {self._root}" raise RuntimeError(msg) if metadata_directory.exists(): # start with fresh ensure_empty_dir(metadata_directory) metadata_directory.mkdir(parents=True, exist_ok=True)
(self, metadata_directory: pathlib.Path) -> NoneType
22,724
pyproject_api._frontend
_send
null
def _send(self, cmd: str, **kwargs: Any) -> tuple[Any, str, str]: with NamedTemporaryFile(prefix=f"pep517_{cmd}-") as result_file_marker: result_file = Path(result_file_marker.name).with_suffix(".json") msg = json.dumps( { "cmd": cmd, "kwargs": {k: (str(v) if isinstance(v, Path) else v) for k, v in kwargs.items()}, "result": str(result_file), }, ) with self._send_msg(cmd, result_file, msg) as status: while not status.done: # pragma: no branch sleep(0.001) # wait a bit for things to happen if result_file.exists(): try: with result_file.open("rt") as result_handler: result = json.load(result_handler) finally: result_file.unlink() else: result = { "code": 1, "exc_type": "RuntimeError", "exc_msg": f"Backend response file {result_file} is missing", } out, err = status.out_err() if "return" in result: return result["return"], out, err raise BackendFailed(result, out, err)
(self, cmd: str, **kwargs: Any) -> tuple[typing.Any, str, str]
22,725
pyproject_api._frontend
_send_msg
null
def get_requires_for_build_sdist(self, config_settings: ConfigSettings | None = None) -> RequiresBuildSdistResult: """ Get build requirements for a source distribution (per PEP-517). :param config_settings: run arguments :return: outcome """ if self.optional_hooks["get_requires_for_build_sdist"]: result, out, err = self._send(cmd="get_requires_for_build_sdist", config_settings=config_settings) else: result, out, err = [], "", "" if not isinstance(result, list) or not all(isinstance(i, str) for i in result): self._unexpected_response("get_requires_for_build_sdist", result, "list of string", out, err) return RequiresBuildSdistResult(tuple(Requirement(r) for r in cast(List[str], result)), out, err)
(self, cmd: str, result_file: pathlib.Path, msg: str) -> Iterator[pyproject_api._frontend.CmdStatus]
22,726
pyproject_api._frontend
_unexpected_response
null
def _unexpected_response( # noqa: PLR0913 self, cmd: str, got: Any, expected_type: Any, out: str, err: str, ) -> NoReturn: msg = f"{cmd!r} on {self.backend!r} returned {got!r} but expected type {expected_type!r}" raise BackendFailed({"code": None, "exc_type": TypeError.__name__, "exc_msg": msg}, out, err)
(self, cmd: str, got: Any, expected_type: Any, out: str, err: str) -> NoReturn
22,728
pyproject_api._frontend
build_editable
Build an editable wheel file (per PEP-660). :param wheel_directory: the folder where to build the editable wheel :param config_settings: build arguments :param metadata_directory: wheel metadata folder :return: wheel build result
def build_editable( self, wheel_directory: Path, config_settings: ConfigSettings | None = None, metadata_directory: Path | None = None, ) -> EditableResult: """ Build an editable wheel file (per PEP-660). :param wheel_directory: the folder where to build the editable wheel :param config_settings: build arguments :param metadata_directory: wheel metadata folder :return: wheel build result """ wheel_directory.mkdir(parents=True, exist_ok=True) basename, out, err = self._send( cmd="build_editable", wheel_directory=wheel_directory, config_settings=config_settings, metadata_directory=metadata_directory, ) if not isinstance(basename, str): self._unexpected_response("build_editable", basename, str, out, err) return EditableResult(wheel_directory / basename, out, err)
(self, wheel_directory: pathlib.Path, config_settings: Optional[Dict[str, Any]] = None, metadata_directory: Optional[pathlib.Path] = None) -> pyproject_api._frontend.EditableResult
22,729
pyproject_api._frontend
build_sdist
Build a source distribution (per PEP-517). :param sdist_directory: the folder where to build the source distribution :param config_settings: build arguments :return: source distribution build result
def build_sdist(self, sdist_directory: Path, config_settings: ConfigSettings | None = None) -> SdistResult: """ Build a source distribution (per PEP-517). :param sdist_directory: the folder where to build the source distribution :param config_settings: build arguments :return: source distribution build result """ sdist_directory.mkdir(parents=True, exist_ok=True) basename, out, err = self._send( cmd="build_sdist", sdist_directory=sdist_directory, config_settings=config_settings, ) if not isinstance(basename, str): self._unexpected_response("build_sdist", basename, str, out, err) return SdistResult(sdist_directory / basename, out, err)
(self, sdist_directory: pathlib.Path, config_settings: Optional[Dict[str, Any]] = None) -> pyproject_api._frontend.SdistResult
22,730
pyproject_api._frontend
build_wheel
Build a wheel file (per PEP-517). :param wheel_directory: the folder where to build the wheel :param config_settings: build arguments :param metadata_directory: wheel metadata folder :return: wheel build result
def build_wheel( self, wheel_directory: Path, config_settings: ConfigSettings | None = None, metadata_directory: Path | None = None, ) -> WheelResult: """ Build a wheel file (per PEP-517). :param wheel_directory: the folder where to build the wheel :param config_settings: build arguments :param metadata_directory: wheel metadata folder :return: wheel build result """ wheel_directory.mkdir(parents=True, exist_ok=True) basename, out, err = self._send( cmd="build_wheel", wheel_directory=wheel_directory, config_settings=config_settings, metadata_directory=metadata_directory, ) if not isinstance(basename, str): self._unexpected_response("build_wheel", basename, str, out, err) return WheelResult(wheel_directory / basename, out, err)
(self, wheel_directory: pathlib.Path, config_settings: Optional[Dict[str, Any]] = None, metadata_directory: Optional[pathlib.Path] = None) -> pyproject_api._frontend.WheelResult
22,731
pyproject_api._frontend
get_requires_for_build_editable
Get build requirements for an editable wheel build (per PEP-660). :param config_settings: run arguments :return: outcome
def get_requires_for_build_editable( self, config_settings: ConfigSettings | None = None, ) -> RequiresBuildEditableResult: """ Get build requirements for an editable wheel build (per PEP-660). :param config_settings: run arguments :return: outcome """ if self.optional_hooks["get_requires_for_build_editable"]: result, out, err = self._send(cmd="get_requires_for_build_editable", config_settings=config_settings) else: result, out, err = [], "", "" if not isinstance(result, list) or not all(isinstance(i, str) for i in result): self._unexpected_response("get_requires_for_build_editable", result, "list of string", out, err) return RequiresBuildEditableResult(tuple(Requirement(r) for r in cast(List[str], result)), out, err)
(self, config_settings: Optional[Dict[str, Any]] = None) -> pyproject_api._frontend.RequiresBuildEditableResult
22,732
pyproject_api._frontend
get_requires_for_build_sdist
Get build requirements for a source distribution (per PEP-517). :param config_settings: run arguments :return: outcome
def get_requires_for_build_sdist(self, config_settings: ConfigSettings | None = None) -> RequiresBuildSdistResult: """ Get build requirements for a source distribution (per PEP-517). :param config_settings: run arguments :return: outcome """ if self.optional_hooks["get_requires_for_build_sdist"]: result, out, err = self._send(cmd="get_requires_for_build_sdist", config_settings=config_settings) else: result, out, err = [], "", "" if not isinstance(result, list) or not all(isinstance(i, str) for i in result): self._unexpected_response("get_requires_for_build_sdist", result, "list of string", out, err) return RequiresBuildSdistResult(tuple(Requirement(r) for r in cast(List[str], result)), out, err)
(self, config_settings: Optional[Dict[str, Any]] = None) -> pyproject_api._frontend.RequiresBuildSdistResult
22,733
pyproject_api._frontend
get_requires_for_build_wheel
Get build requirements for a wheel (per PEP-517). :param config_settings: run arguments :return: outcome
def get_requires_for_build_wheel(self, config_settings: ConfigSettings | None = None) -> RequiresBuildWheelResult: """ Get build requirements for a wheel (per PEP-517). :param config_settings: run arguments :return: outcome """ if self.optional_hooks["get_requires_for_build_wheel"]: result, out, err = self._send(cmd="get_requires_for_build_wheel", config_settings=config_settings) else: result, out, err = [], "", "" if not isinstance(result, list) or not all(isinstance(i, str) for i in result): self._unexpected_response("get_requires_for_build_wheel", result, "list of string", out, err) return RequiresBuildWheelResult(tuple(Requirement(r) for r in cast(List[str], result)), out, err)
(self, config_settings: Optional[Dict[str, Any]] = None) -> pyproject_api._frontend.RequiresBuildWheelResult
22,734
pyproject_api._frontend
metadata_from_built
Create metadata from building the wheel (use when the prepare endpoints are not present or don't work). :param metadata_directory: directory where to put the metadata :param target: the type of wheel metadata to build :param config_settings: config settings to pass in to the build endpoint :return:
def metadata_from_built( self, metadata_directory: Path, target: Literal["wheel", "editable"], config_settings: ConfigSettings | None = None, ) -> tuple[Path, str, str]: """ Create metadata from building the wheel (use when the prepare endpoints are not present or don't work). :param metadata_directory: directory where to put the metadata :param target: the type of wheel metadata to build :param config_settings: config settings to pass in to the build endpoint :return: """ hook = getattr(self, f"build_{target}") with self._wheel_directory() as wheel_directory: result: EditableResult | WheelResult = hook(wheel_directory, config_settings) wheel = result.wheel if not wheel.exists(): msg = f"missing wheel file return by backed {wheel!r}" raise RuntimeError(msg) out, err = result.out, result.err extract_to = str(metadata_directory) basename = None with ZipFile(str(wheel), "r") as zip_file: for name in zip_file.namelist(): # pragma: no branch root = Path(name).parts[0] if root.endswith(".dist-info"): basename = root zip_file.extract(name, extract_to) if basename is None: # pragma: no branch msg = f"no .dist-info found inside generated wheel {wheel}" raise RuntimeError(msg) return metadata_directory / basename, out, err
(self, metadata_directory: pathlib.Path, target: Literal['wheel', 'editable'], config_settings: Optional[Dict[str, Any]] = None) -> tuple[pathlib.Path, str, str]
22,735
pyproject_api._frontend
prepare_metadata_for_build_editable
Build editable wheel metadata (per PEP-660). :param metadata_directory: where to generate the metadata :param config_settings: build arguments :return: metadata generation result
def prepare_metadata_for_build_editable( self, metadata_directory: Path, config_settings: ConfigSettings | None = None, ) -> MetadataForBuildEditableResult | None: """ Build editable wheel metadata (per PEP-660). :param metadata_directory: where to generate the metadata :param config_settings: build arguments :return: metadata generation result """ self._check_metadata_dir(metadata_directory) basename: str | None = None if self.optional_hooks["prepare_metadata_for_build_editable"]: basename, out, err = self._send( cmd="prepare_metadata_for_build_editable", metadata_directory=metadata_directory, config_settings=config_settings, ) if basename is None: return None if not isinstance(basename, str): self._unexpected_response("prepare_metadata_for_build_wheel", basename, str, out, err) result = metadata_directory / basename return MetadataForBuildEditableResult(result, out, err)
(self, metadata_directory: pathlib.Path, config_settings: Optional[Dict[str, Any]] = None) -> pyproject_api._frontend.MetadataForBuildEditableResult | None
22,736
pyproject_api._frontend
prepare_metadata_for_build_wheel
Build wheel metadata (per PEP-517). :param metadata_directory: where to generate the metadata :param config_settings: build arguments :return: metadata generation result
def prepare_metadata_for_build_wheel( self, metadata_directory: Path, config_settings: ConfigSettings | None = None, ) -> MetadataForBuildWheelResult | None: """ Build wheel metadata (per PEP-517). :param metadata_directory: where to generate the metadata :param config_settings: build arguments :return: metadata generation result """ self._check_metadata_dir(metadata_directory) basename: str | None = None if self.optional_hooks["prepare_metadata_for_build_wheel"]: basename, out, err = self._send( cmd="prepare_metadata_for_build_wheel", metadata_directory=metadata_directory, config_settings=config_settings, ) if basename is None: return None if not isinstance(basename, str): self._unexpected_response("prepare_metadata_for_build_wheel", basename, str, out, err) return MetadataForBuildWheelResult(metadata_directory / basename, out, err)
(self, metadata_directory: pathlib.Path, config_settings: Optional[Dict[str, Any]] = None) -> pyproject_api._frontend.MetadataForBuildWheelResult | None
22,737
pyproject_api._frontend
MetadataForBuildEditableResult
Information collected while acquiring the editable metadata.
class MetadataForBuildEditableResult(NamedTuple): """Information collected while acquiring the editable metadata.""" #: path to the wheel metadata metadata: Path #: backend standard output while generating the editable wheel metadata out: str #: backend standard output while generating the editable wheel metadata err: str
(metadata: pathlib.Path, out: str, err: str)
22,739
namedtuple_MetadataForBuildEditableResult
__new__
Create new instance of MetadataForBuildEditableResult(metadata, out, err)
from builtins import function
(_cls, metadata: ForwardRef('Path'), out: ForwardRef('str'), err: ForwardRef('str'))
22,742
collections
_replace
Return a new MetadataForBuildEditableResult object replacing specified fields with new values
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None): """Returns a new subclass of tuple with named fields. >>> Point = namedtuple('Point', ['x', 'y']) >>> Point.__doc__ # docstring for the new class 'Point(x, y)' >>> p = Point(11, y=22) # instantiate with positional args or keywords >>> p[0] + p[1] # indexable like a plain tuple 33 >>> x, y = p # unpack like a regular tuple >>> x, y (11, 22) >>> p.x + p.y # fields also accessible by name 33 >>> d = p._asdict() # convert to a dictionary >>> d['x'] 11 >>> Point(**d) # convert from a dictionary Point(x=11, y=22) >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields Point(x=100, y=22) """ # Validate the field names. At the user's option, either generate an error # message or automatically replace the field name with a valid name. if isinstance(field_names, str): field_names = field_names.replace(',', ' ').split() field_names = list(map(str, field_names)) typename = _sys.intern(str(typename)) if rename: seen = set() for index, name in enumerate(field_names): if (not name.isidentifier() or _iskeyword(name) or name.startswith('_') or name in seen): field_names[index] = f'_{index}' seen.add(name) for name in [typename] + field_names: if type(name) is not str: raise TypeError('Type names and field names must be strings') if not name.isidentifier(): raise ValueError('Type names and field names must be valid ' f'identifiers: {name!r}') if _iskeyword(name): raise ValueError('Type names and field names cannot be a ' f'keyword: {name!r}') seen = set() for name in field_names: if name.startswith('_') and not rename: raise ValueError('Field names cannot start with an underscore: ' f'{name!r}') if name in seen: raise ValueError(f'Encountered duplicate field name: {name!r}') seen.add(name) field_defaults = {} if defaults is not None: defaults = tuple(defaults) if len(defaults) > len(field_names): raise TypeError('Got more default values than field names') field_defaults = dict(reversed(list(zip(reversed(field_names), reversed(defaults))))) # Variables used in the methods and docstrings field_names = tuple(map(_sys.intern, field_names)) num_fields = len(field_names) arg_list = ', '.join(field_names) if num_fields == 1: arg_list += ',' repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')' tuple_new = tuple.__new__ _dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip # Create all the named tuple methods to be added to the class namespace namespace = { '_tuple_new': tuple_new, '__builtins__': {}, '__name__': f'namedtuple_{typename}', } code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))' __new__ = eval(code, namespace) __new__.__name__ = '__new__' __new__.__doc__ = f'Create new instance of {typename}({arg_list})' if defaults is not None: __new__.__defaults__ = defaults @classmethod def _make(cls, iterable): result = tuple_new(cls, iterable) if _len(result) != num_fields: raise TypeError(f'Expected {num_fields} arguments, got {len(result)}') return result _make.__func__.__doc__ = (f'Make a new {typename} object from a sequence ' 'or iterable') def _replace(self, /, **kwds): result = self._make(_map(kwds.pop, field_names, self)) if kwds: raise ValueError(f'Got unexpected field names: {list(kwds)!r}') return result _replace.__doc__ = (f'Return a new {typename} object replacing specified ' 'fields with new values') def __repr__(self): 'Return a nicely formatted representation string' return self.__class__.__name__ + repr_fmt % self def _asdict(self): 'Return a new dict which maps field names to their values.' return _dict(_zip(self._fields, self)) def __getnewargs__(self): 'Return self as a plain tuple. Used by copy and pickle.' return _tuple(self) # Modify function metadata to help with introspection and debugging for method in ( __new__, _make.__func__, _replace, __repr__, _asdict, __getnewargs__, ): method.__qualname__ = f'{typename}.{method.__name__}' # Build-up the class namespace dictionary # and use type() to build the result class class_namespace = { '__doc__': f'{typename}({arg_list})', '__slots__': (), '_fields': field_names, '_field_defaults': field_defaults, '__new__': __new__, '_make': _make, '_replace': _replace, '__repr__': __repr__, '_asdict': _asdict, '__getnewargs__': __getnewargs__, '__match_args__': field_names, } for index, name in enumerate(field_names): doc = _sys.intern(f'Alias for field number {index}') class_namespace[name] = _tuplegetter(index, doc) result = type(typename, (tuple,), class_namespace) # For pickling to work, the __module__ variable needs to be set to the frame # where the named tuple is created. Bypass this step in environments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython), or where the user has # specified a particular module. if module is None: try: module = _sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass if module is not None: result.__module__ = module return result
(self, /, **kwds)
22,743
pyproject_api._frontend
MetadataForBuildWheelResult
Information collected while acquiring the wheel metadata.
class MetadataForBuildWheelResult(NamedTuple): """Information collected while acquiring the wheel metadata.""" #: path to the wheel metadata metadata: Path #: backend standard output while generating the wheel metadata out: str #: backend standard output while generating the wheel metadata err: str
(metadata: pathlib.Path, out: str, err: str)
22,745
namedtuple_MetadataForBuildWheelResult
__new__
Create new instance of MetadataForBuildWheelResult(metadata, out, err)
from builtins import function
(_cls, metadata: ForwardRef('Path'), out: ForwardRef('str'), err: ForwardRef('str'))
22,748
collections
_replace
Return a new MetadataForBuildWheelResult object replacing specified fields with new values
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None): """Returns a new subclass of tuple with named fields. >>> Point = namedtuple('Point', ['x', 'y']) >>> Point.__doc__ # docstring for the new class 'Point(x, y)' >>> p = Point(11, y=22) # instantiate with positional args or keywords >>> p[0] + p[1] # indexable like a plain tuple 33 >>> x, y = p # unpack like a regular tuple >>> x, y (11, 22) >>> p.x + p.y # fields also accessible by name 33 >>> d = p._asdict() # convert to a dictionary >>> d['x'] 11 >>> Point(**d) # convert from a dictionary Point(x=11, y=22) >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields Point(x=100, y=22) """ # Validate the field names. At the user's option, either generate an error # message or automatically replace the field name with a valid name. if isinstance(field_names, str): field_names = field_names.replace(',', ' ').split() field_names = list(map(str, field_names)) typename = _sys.intern(str(typename)) if rename: seen = set() for index, name in enumerate(field_names): if (not name.isidentifier() or _iskeyword(name) or name.startswith('_') or name in seen): field_names[index] = f'_{index}' seen.add(name) for name in [typename] + field_names: if type(name) is not str: raise TypeError('Type names and field names must be strings') if not name.isidentifier(): raise ValueError('Type names and field names must be valid ' f'identifiers: {name!r}') if _iskeyword(name): raise ValueError('Type names and field names cannot be a ' f'keyword: {name!r}') seen = set() for name in field_names: if name.startswith('_') and not rename: raise ValueError('Field names cannot start with an underscore: ' f'{name!r}') if name in seen: raise ValueError(f'Encountered duplicate field name: {name!r}') seen.add(name) field_defaults = {} if defaults is not None: defaults = tuple(defaults) if len(defaults) > len(field_names): raise TypeError('Got more default values than field names') field_defaults = dict(reversed(list(zip(reversed(field_names), reversed(defaults))))) # Variables used in the methods and docstrings field_names = tuple(map(_sys.intern, field_names)) num_fields = len(field_names) arg_list = ', '.join(field_names) if num_fields == 1: arg_list += ',' repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')' tuple_new = tuple.__new__ _dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip # Create all the named tuple methods to be added to the class namespace namespace = { '_tuple_new': tuple_new, '__builtins__': {}, '__name__': f'namedtuple_{typename}', } code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))' __new__ = eval(code, namespace) __new__.__name__ = '__new__' __new__.__doc__ = f'Create new instance of {typename}({arg_list})' if defaults is not None: __new__.__defaults__ = defaults @classmethod def _make(cls, iterable): result = tuple_new(cls, iterable) if _len(result) != num_fields: raise TypeError(f'Expected {num_fields} arguments, got {len(result)}') return result _make.__func__.__doc__ = (f'Make a new {typename} object from a sequence ' 'or iterable') def _replace(self, /, **kwds): result = self._make(_map(kwds.pop, field_names, self)) if kwds: raise ValueError(f'Got unexpected field names: {list(kwds)!r}') return result _replace.__doc__ = (f'Return a new {typename} object replacing specified ' 'fields with new values') def __repr__(self): 'Return a nicely formatted representation string' return self.__class__.__name__ + repr_fmt % self def _asdict(self): 'Return a new dict which maps field names to their values.' return _dict(_zip(self._fields, self)) def __getnewargs__(self): 'Return self as a plain tuple. Used by copy and pickle.' return _tuple(self) # Modify function metadata to help with introspection and debugging for method in ( __new__, _make.__func__, _replace, __repr__, _asdict, __getnewargs__, ): method.__qualname__ = f'{typename}.{method.__name__}' # Build-up the class namespace dictionary # and use type() to build the result class class_namespace = { '__doc__': f'{typename}({arg_list})', '__slots__': (), '_fields': field_names, '_field_defaults': field_defaults, '__new__': __new__, '_make': _make, '_replace': _replace, '__repr__': __repr__, '_asdict': _asdict, '__getnewargs__': __getnewargs__, '__match_args__': field_names, } for index, name in enumerate(field_names): doc = _sys.intern(f'Alias for field number {index}') class_namespace[name] = _tuplegetter(index, doc) result = type(typename, (tuple,), class_namespace) # For pickling to work, the __module__ variable needs to be set to the frame # where the named tuple is created. Bypass this step in environments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython), or where the user has # specified a particular module. if module is None: try: module = _sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass if module is not None: result.__module__ = module return result
(self, /, **kwds)
22,749
pyproject_api._frontend
OptionalHooks
A flag indicating if the backend supports the optional hook or not.
class OptionalHooks(TypedDict, total=True): """A flag indicating if the backend supports the optional hook or not.""" get_requires_for_build_sdist: bool prepare_metadata_for_build_wheel: bool get_requires_for_build_wheel: bool build_editable: bool get_requires_for_build_editable: bool prepare_metadata_for_build_editable: bool
null
22,750
pyproject_api._frontend
RequiresBuildEditableResult
Information collected while acquiring the wheel build dependencies.
class RequiresBuildEditableResult(NamedTuple): """Information collected while acquiring the wheel build dependencies.""" #: editable wheel build dependencies requires: tuple[Requirement, ...] #: backend standard output while acquiring the editable wheel build dependencies out: str #: backend standard error while acquiring the editable wheel build dependencies err: str
(requires: tuple[packaging.requirements.Requirement, ...], out: str, err: str)
22,752
namedtuple_RequiresBuildEditableResult
__new__
Create new instance of RequiresBuildEditableResult(requires, out, err)
from builtins import function
(_cls, requires: ForwardRef('tuple[Requirement, ...]'), out: ForwardRef('str'), err: ForwardRef('str'))
22,755
collections
_replace
Return a new RequiresBuildEditableResult object replacing specified fields with new values
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None): """Returns a new subclass of tuple with named fields. >>> Point = namedtuple('Point', ['x', 'y']) >>> Point.__doc__ # docstring for the new class 'Point(x, y)' >>> p = Point(11, y=22) # instantiate with positional args or keywords >>> p[0] + p[1] # indexable like a plain tuple 33 >>> x, y = p # unpack like a regular tuple >>> x, y (11, 22) >>> p.x + p.y # fields also accessible by name 33 >>> d = p._asdict() # convert to a dictionary >>> d['x'] 11 >>> Point(**d) # convert from a dictionary Point(x=11, y=22) >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields Point(x=100, y=22) """ # Validate the field names. At the user's option, either generate an error # message or automatically replace the field name with a valid name. if isinstance(field_names, str): field_names = field_names.replace(',', ' ').split() field_names = list(map(str, field_names)) typename = _sys.intern(str(typename)) if rename: seen = set() for index, name in enumerate(field_names): if (not name.isidentifier() or _iskeyword(name) or name.startswith('_') or name in seen): field_names[index] = f'_{index}' seen.add(name) for name in [typename] + field_names: if type(name) is not str: raise TypeError('Type names and field names must be strings') if not name.isidentifier(): raise ValueError('Type names and field names must be valid ' f'identifiers: {name!r}') if _iskeyword(name): raise ValueError('Type names and field names cannot be a ' f'keyword: {name!r}') seen = set() for name in field_names: if name.startswith('_') and not rename: raise ValueError('Field names cannot start with an underscore: ' f'{name!r}') if name in seen: raise ValueError(f'Encountered duplicate field name: {name!r}') seen.add(name) field_defaults = {} if defaults is not None: defaults = tuple(defaults) if len(defaults) > len(field_names): raise TypeError('Got more default values than field names') field_defaults = dict(reversed(list(zip(reversed(field_names), reversed(defaults))))) # Variables used in the methods and docstrings field_names = tuple(map(_sys.intern, field_names)) num_fields = len(field_names) arg_list = ', '.join(field_names) if num_fields == 1: arg_list += ',' repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')' tuple_new = tuple.__new__ _dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip # Create all the named tuple methods to be added to the class namespace namespace = { '_tuple_new': tuple_new, '__builtins__': {}, '__name__': f'namedtuple_{typename}', } code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))' __new__ = eval(code, namespace) __new__.__name__ = '__new__' __new__.__doc__ = f'Create new instance of {typename}({arg_list})' if defaults is not None: __new__.__defaults__ = defaults @classmethod def _make(cls, iterable): result = tuple_new(cls, iterable) if _len(result) != num_fields: raise TypeError(f'Expected {num_fields} arguments, got {len(result)}') return result _make.__func__.__doc__ = (f'Make a new {typename} object from a sequence ' 'or iterable') def _replace(self, /, **kwds): result = self._make(_map(kwds.pop, field_names, self)) if kwds: raise ValueError(f'Got unexpected field names: {list(kwds)!r}') return result _replace.__doc__ = (f'Return a new {typename} object replacing specified ' 'fields with new values') def __repr__(self): 'Return a nicely formatted representation string' return self.__class__.__name__ + repr_fmt % self def _asdict(self): 'Return a new dict which maps field names to their values.' return _dict(_zip(self._fields, self)) def __getnewargs__(self): 'Return self as a plain tuple. Used by copy and pickle.' return _tuple(self) # Modify function metadata to help with introspection and debugging for method in ( __new__, _make.__func__, _replace, __repr__, _asdict, __getnewargs__, ): method.__qualname__ = f'{typename}.{method.__name__}' # Build-up the class namespace dictionary # and use type() to build the result class class_namespace = { '__doc__': f'{typename}({arg_list})', '__slots__': (), '_fields': field_names, '_field_defaults': field_defaults, '__new__': __new__, '_make': _make, '_replace': _replace, '__repr__': __repr__, '_asdict': _asdict, '__getnewargs__': __getnewargs__, '__match_args__': field_names, } for index, name in enumerate(field_names): doc = _sys.intern(f'Alias for field number {index}') class_namespace[name] = _tuplegetter(index, doc) result = type(typename, (tuple,), class_namespace) # For pickling to work, the __module__ variable needs to be set to the frame # where the named tuple is created. Bypass this step in environments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython), or where the user has # specified a particular module. if module is None: try: module = _sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass if module is not None: result.__module__ = module return result
(self, /, **kwds)
22,756
pyproject_api._frontend
RequiresBuildSdistResult
Information collected while acquiring the source distribution build dependencies.
class RequiresBuildSdistResult(NamedTuple): """Information collected while acquiring the source distribution build dependencies.""" #: wheel build dependencies requires: tuple[Requirement, ...] #: backend standard output while acquiring the source distribution build dependencies out: str #: backend standard output while acquiring the source distribution build dependencies err: str
(requires: tuple[packaging.requirements.Requirement, ...], out: str, err: str)
22,758
namedtuple_RequiresBuildSdistResult
__new__
Create new instance of RequiresBuildSdistResult(requires, out, err)
from builtins import function
(_cls, requires: ForwardRef('tuple[Requirement, ...]'), out: ForwardRef('str'), err: ForwardRef('str'))
22,761
collections
_replace
Return a new RequiresBuildSdistResult object replacing specified fields with new values
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None): """Returns a new subclass of tuple with named fields. >>> Point = namedtuple('Point', ['x', 'y']) >>> Point.__doc__ # docstring for the new class 'Point(x, y)' >>> p = Point(11, y=22) # instantiate with positional args or keywords >>> p[0] + p[1] # indexable like a plain tuple 33 >>> x, y = p # unpack like a regular tuple >>> x, y (11, 22) >>> p.x + p.y # fields also accessible by name 33 >>> d = p._asdict() # convert to a dictionary >>> d['x'] 11 >>> Point(**d) # convert from a dictionary Point(x=11, y=22) >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields Point(x=100, y=22) """ # Validate the field names. At the user's option, either generate an error # message or automatically replace the field name with a valid name. if isinstance(field_names, str): field_names = field_names.replace(',', ' ').split() field_names = list(map(str, field_names)) typename = _sys.intern(str(typename)) if rename: seen = set() for index, name in enumerate(field_names): if (not name.isidentifier() or _iskeyword(name) or name.startswith('_') or name in seen): field_names[index] = f'_{index}' seen.add(name) for name in [typename] + field_names: if type(name) is not str: raise TypeError('Type names and field names must be strings') if not name.isidentifier(): raise ValueError('Type names and field names must be valid ' f'identifiers: {name!r}') if _iskeyword(name): raise ValueError('Type names and field names cannot be a ' f'keyword: {name!r}') seen = set() for name in field_names: if name.startswith('_') and not rename: raise ValueError('Field names cannot start with an underscore: ' f'{name!r}') if name in seen: raise ValueError(f'Encountered duplicate field name: {name!r}') seen.add(name) field_defaults = {} if defaults is not None: defaults = tuple(defaults) if len(defaults) > len(field_names): raise TypeError('Got more default values than field names') field_defaults = dict(reversed(list(zip(reversed(field_names), reversed(defaults))))) # Variables used in the methods and docstrings field_names = tuple(map(_sys.intern, field_names)) num_fields = len(field_names) arg_list = ', '.join(field_names) if num_fields == 1: arg_list += ',' repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')' tuple_new = tuple.__new__ _dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip # Create all the named tuple methods to be added to the class namespace namespace = { '_tuple_new': tuple_new, '__builtins__': {}, '__name__': f'namedtuple_{typename}', } code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))' __new__ = eval(code, namespace) __new__.__name__ = '__new__' __new__.__doc__ = f'Create new instance of {typename}({arg_list})' if defaults is not None: __new__.__defaults__ = defaults @classmethod def _make(cls, iterable): result = tuple_new(cls, iterable) if _len(result) != num_fields: raise TypeError(f'Expected {num_fields} arguments, got {len(result)}') return result _make.__func__.__doc__ = (f'Make a new {typename} object from a sequence ' 'or iterable') def _replace(self, /, **kwds): result = self._make(_map(kwds.pop, field_names, self)) if kwds: raise ValueError(f'Got unexpected field names: {list(kwds)!r}') return result _replace.__doc__ = (f'Return a new {typename} object replacing specified ' 'fields with new values') def __repr__(self): 'Return a nicely formatted representation string' return self.__class__.__name__ + repr_fmt % self def _asdict(self): 'Return a new dict which maps field names to their values.' return _dict(_zip(self._fields, self)) def __getnewargs__(self): 'Return self as a plain tuple. Used by copy and pickle.' return _tuple(self) # Modify function metadata to help with introspection and debugging for method in ( __new__, _make.__func__, _replace, __repr__, _asdict, __getnewargs__, ): method.__qualname__ = f'{typename}.{method.__name__}' # Build-up the class namespace dictionary # and use type() to build the result class class_namespace = { '__doc__': f'{typename}({arg_list})', '__slots__': (), '_fields': field_names, '_field_defaults': field_defaults, '__new__': __new__, '_make': _make, '_replace': _replace, '__repr__': __repr__, '_asdict': _asdict, '__getnewargs__': __getnewargs__, '__match_args__': field_names, } for index, name in enumerate(field_names): doc = _sys.intern(f'Alias for field number {index}') class_namespace[name] = _tuplegetter(index, doc) result = type(typename, (tuple,), class_namespace) # For pickling to work, the __module__ variable needs to be set to the frame # where the named tuple is created. Bypass this step in environments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython), or where the user has # specified a particular module. if module is None: try: module = _sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass if module is not None: result.__module__ = module return result
(self, /, **kwds)
22,762
pyproject_api._frontend
RequiresBuildWheelResult
Information collected while acquiring the wheel build dependencies.
class RequiresBuildWheelResult(NamedTuple): """Information collected while acquiring the wheel build dependencies.""" #: wheel build dependencies requires: tuple[Requirement, ...] #: backend standard output while acquiring the wheel build dependencies out: str #: backend standard error while acquiring the wheel build dependencies err: str
(requires: tuple[packaging.requirements.Requirement, ...], out: str, err: str)
22,764
namedtuple_RequiresBuildWheelResult
__new__
Create new instance of RequiresBuildWheelResult(requires, out, err)
from builtins import function
(_cls, requires: ForwardRef('tuple[Requirement, ...]'), out: ForwardRef('str'), err: ForwardRef('str'))
22,767
collections
_replace
Return a new RequiresBuildWheelResult object replacing specified fields with new values
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None): """Returns a new subclass of tuple with named fields. >>> Point = namedtuple('Point', ['x', 'y']) >>> Point.__doc__ # docstring for the new class 'Point(x, y)' >>> p = Point(11, y=22) # instantiate with positional args or keywords >>> p[0] + p[1] # indexable like a plain tuple 33 >>> x, y = p # unpack like a regular tuple >>> x, y (11, 22) >>> p.x + p.y # fields also accessible by name 33 >>> d = p._asdict() # convert to a dictionary >>> d['x'] 11 >>> Point(**d) # convert from a dictionary Point(x=11, y=22) >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields Point(x=100, y=22) """ # Validate the field names. At the user's option, either generate an error # message or automatically replace the field name with a valid name. if isinstance(field_names, str): field_names = field_names.replace(',', ' ').split() field_names = list(map(str, field_names)) typename = _sys.intern(str(typename)) if rename: seen = set() for index, name in enumerate(field_names): if (not name.isidentifier() or _iskeyword(name) or name.startswith('_') or name in seen): field_names[index] = f'_{index}' seen.add(name) for name in [typename] + field_names: if type(name) is not str: raise TypeError('Type names and field names must be strings') if not name.isidentifier(): raise ValueError('Type names and field names must be valid ' f'identifiers: {name!r}') if _iskeyword(name): raise ValueError('Type names and field names cannot be a ' f'keyword: {name!r}') seen = set() for name in field_names: if name.startswith('_') and not rename: raise ValueError('Field names cannot start with an underscore: ' f'{name!r}') if name in seen: raise ValueError(f'Encountered duplicate field name: {name!r}') seen.add(name) field_defaults = {} if defaults is not None: defaults = tuple(defaults) if len(defaults) > len(field_names): raise TypeError('Got more default values than field names') field_defaults = dict(reversed(list(zip(reversed(field_names), reversed(defaults))))) # Variables used in the methods and docstrings field_names = tuple(map(_sys.intern, field_names)) num_fields = len(field_names) arg_list = ', '.join(field_names) if num_fields == 1: arg_list += ',' repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')' tuple_new = tuple.__new__ _dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip # Create all the named tuple methods to be added to the class namespace namespace = { '_tuple_new': tuple_new, '__builtins__': {}, '__name__': f'namedtuple_{typename}', } code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))' __new__ = eval(code, namespace) __new__.__name__ = '__new__' __new__.__doc__ = f'Create new instance of {typename}({arg_list})' if defaults is not None: __new__.__defaults__ = defaults @classmethod def _make(cls, iterable): result = tuple_new(cls, iterable) if _len(result) != num_fields: raise TypeError(f'Expected {num_fields} arguments, got {len(result)}') return result _make.__func__.__doc__ = (f'Make a new {typename} object from a sequence ' 'or iterable') def _replace(self, /, **kwds): result = self._make(_map(kwds.pop, field_names, self)) if kwds: raise ValueError(f'Got unexpected field names: {list(kwds)!r}') return result _replace.__doc__ = (f'Return a new {typename} object replacing specified ' 'fields with new values') def __repr__(self): 'Return a nicely formatted representation string' return self.__class__.__name__ + repr_fmt % self def _asdict(self): 'Return a new dict which maps field names to their values.' return _dict(_zip(self._fields, self)) def __getnewargs__(self): 'Return self as a plain tuple. Used by copy and pickle.' return _tuple(self) # Modify function metadata to help with introspection and debugging for method in ( __new__, _make.__func__, _replace, __repr__, _asdict, __getnewargs__, ): method.__qualname__ = f'{typename}.{method.__name__}' # Build-up the class namespace dictionary # and use type() to build the result class class_namespace = { '__doc__': f'{typename}({arg_list})', '__slots__': (), '_fields': field_names, '_field_defaults': field_defaults, '__new__': __new__, '_make': _make, '_replace': _replace, '__repr__': __repr__, '_asdict': _asdict, '__getnewargs__': __getnewargs__, '__match_args__': field_names, } for index, name in enumerate(field_names): doc = _sys.intern(f'Alias for field number {index}') class_namespace[name] = _tuplegetter(index, doc) result = type(typename, (tuple,), class_namespace) # For pickling to work, the __module__ variable needs to be set to the frame # where the named tuple is created. Bypass this step in environments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython), or where the user has # specified a particular module. if module is None: try: module = _sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass if module is not None: result.__module__ = module return result
(self, /, **kwds)
22,768
pyproject_api._frontend
SdistResult
Information collected while building a source distribution.
class SdistResult(NamedTuple): """Information collected while building a source distribution.""" #: path to the built source distribution sdist: Path #: backend standard output while building the source distribution out: str #: backend standard output while building the source distribution err: str
(sdist: pathlib.Path, out: str, err: str)
22,770
namedtuple_SdistResult
__new__
Create new instance of SdistResult(sdist, out, err)
from builtins import function
(_cls, sdist: ForwardRef('Path'), out: ForwardRef('str'), err: ForwardRef('str'))
22,773
collections
_replace
Return a new SdistResult object replacing specified fields with new values
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None): """Returns a new subclass of tuple with named fields. >>> Point = namedtuple('Point', ['x', 'y']) >>> Point.__doc__ # docstring for the new class 'Point(x, y)' >>> p = Point(11, y=22) # instantiate with positional args or keywords >>> p[0] + p[1] # indexable like a plain tuple 33 >>> x, y = p # unpack like a regular tuple >>> x, y (11, 22) >>> p.x + p.y # fields also accessible by name 33 >>> d = p._asdict() # convert to a dictionary >>> d['x'] 11 >>> Point(**d) # convert from a dictionary Point(x=11, y=22) >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields Point(x=100, y=22) """ # Validate the field names. At the user's option, either generate an error # message or automatically replace the field name with a valid name. if isinstance(field_names, str): field_names = field_names.replace(',', ' ').split() field_names = list(map(str, field_names)) typename = _sys.intern(str(typename)) if rename: seen = set() for index, name in enumerate(field_names): if (not name.isidentifier() or _iskeyword(name) or name.startswith('_') or name in seen): field_names[index] = f'_{index}' seen.add(name) for name in [typename] + field_names: if type(name) is not str: raise TypeError('Type names and field names must be strings') if not name.isidentifier(): raise ValueError('Type names and field names must be valid ' f'identifiers: {name!r}') if _iskeyword(name): raise ValueError('Type names and field names cannot be a ' f'keyword: {name!r}') seen = set() for name in field_names: if name.startswith('_') and not rename: raise ValueError('Field names cannot start with an underscore: ' f'{name!r}') if name in seen: raise ValueError(f'Encountered duplicate field name: {name!r}') seen.add(name) field_defaults = {} if defaults is not None: defaults = tuple(defaults) if len(defaults) > len(field_names): raise TypeError('Got more default values than field names') field_defaults = dict(reversed(list(zip(reversed(field_names), reversed(defaults))))) # Variables used in the methods and docstrings field_names = tuple(map(_sys.intern, field_names)) num_fields = len(field_names) arg_list = ', '.join(field_names) if num_fields == 1: arg_list += ',' repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')' tuple_new = tuple.__new__ _dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip # Create all the named tuple methods to be added to the class namespace namespace = { '_tuple_new': tuple_new, '__builtins__': {}, '__name__': f'namedtuple_{typename}', } code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))' __new__ = eval(code, namespace) __new__.__name__ = '__new__' __new__.__doc__ = f'Create new instance of {typename}({arg_list})' if defaults is not None: __new__.__defaults__ = defaults @classmethod def _make(cls, iterable): result = tuple_new(cls, iterable) if _len(result) != num_fields: raise TypeError(f'Expected {num_fields} arguments, got {len(result)}') return result _make.__func__.__doc__ = (f'Make a new {typename} object from a sequence ' 'or iterable') def _replace(self, /, **kwds): result = self._make(_map(kwds.pop, field_names, self)) if kwds: raise ValueError(f'Got unexpected field names: {list(kwds)!r}') return result _replace.__doc__ = (f'Return a new {typename} object replacing specified ' 'fields with new values') def __repr__(self): 'Return a nicely formatted representation string' return self.__class__.__name__ + repr_fmt % self def _asdict(self): 'Return a new dict which maps field names to their values.' return _dict(_zip(self._fields, self)) def __getnewargs__(self): 'Return self as a plain tuple. Used by copy and pickle.' return _tuple(self) # Modify function metadata to help with introspection and debugging for method in ( __new__, _make.__func__, _replace, __repr__, _asdict, __getnewargs__, ): method.__qualname__ = f'{typename}.{method.__name__}' # Build-up the class namespace dictionary # and use type() to build the result class class_namespace = { '__doc__': f'{typename}({arg_list})', '__slots__': (), '_fields': field_names, '_field_defaults': field_defaults, '__new__': __new__, '_make': _make, '_replace': _replace, '__repr__': __repr__, '_asdict': _asdict, '__getnewargs__': __getnewargs__, '__match_args__': field_names, } for index, name in enumerate(field_names): doc = _sys.intern(f'Alias for field number {index}') class_namespace[name] = _tuplegetter(index, doc) result = type(typename, (tuple,), class_namespace) # For pickling to work, the __module__ variable needs to be set to the frame # where the named tuple is created. Bypass this step in environments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython), or where the user has # specified a particular module. if module is None: try: module = _sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass if module is not None: result.__module__ = module return result
(self, /, **kwds)
22,774
pyproject_api._via_fresh_subprocess
SubprocessFrontend
A frontend that creates fresh subprocess at every call to communicate with the backend.
class SubprocessFrontend(Frontend): """A frontend that creates fresh subprocess at every call to communicate with the backend.""" def __init__( # noqa: PLR0913 self, root: Path, backend_paths: tuple[Path, ...], backend_module: str, backend_obj: str | None, requires: tuple[Requirement, ...], ) -> None: """ Create a subprocess frontend. :param root: the root path to the built project :param backend_paths: paths that are available on the python path for the backend :param backend_module: module where the backend is located :param backend_obj: object within the backend module identifying the backend :param requires: seed requirements for the backend """ super().__init__(root, backend_paths, backend_module, backend_obj, requires, reuse_backend=False) self.executable = sys.executable @contextmanager def _send_msg(self, cmd: str, result_file: Path, msg: str) -> Iterator[SubprocessCmdStatus]: # noqa: ARG002 env = os.environ.copy() backend = os.pathsep.join(str(i) for i in self._backend_paths).strip() if backend: env["PYTHONPATH"] = backend process = Popen( args=[self.executable, *self.backend_args], stdout=PIPE, stderr=PIPE, stdin=PIPE, universal_newlines=True, cwd=self._root, env=env, ) cast(IO[str], process.stdin).write(f"{os.linesep}{msg}{os.linesep}") yield SubprocessCmdStatus(process) def send_cmd(self, cmd: str, **kwargs: Any) -> tuple[Any, str, str]: """ Send a command to the backend. :param cmd: the command to send :param kwargs: keyword arguments to the backend :return: a tuple of: backend response, standard output text, standard error text """ return self._send(cmd, **kwargs)
(root: 'Path', backend_paths: 'tuple[Path, ...]', backend_module: 'str', backend_obj: 'str | None', requires: 'tuple[Requirement, ...]') -> 'None'
22,775
pyproject_api._via_fresh_subprocess
__init__
Create a subprocess frontend. :param root: the root path to the built project :param backend_paths: paths that are available on the python path for the backend :param backend_module: module where the backend is located :param backend_obj: object within the backend module identifying the backend :param requires: seed requirements for the backend
def __init__( # noqa: PLR0913 self, root: Path, backend_paths: tuple[Path, ...], backend_module: str, backend_obj: str | None, requires: tuple[Requirement, ...], ) -> None: """ Create a subprocess frontend. :param root: the root path to the built project :param backend_paths: paths that are available on the python path for the backend :param backend_module: module where the backend is located :param backend_obj: object within the backend module identifying the backend :param requires: seed requirements for the backend """ super().__init__(root, backend_paths, backend_module, backend_obj, requires, reuse_backend=False) self.executable = sys.executable
(self, root: 'Path', backend_paths: 'tuple[Path, ...]', backend_module: 'str', backend_obj: 'str | None', requires: 'tuple[Requirement, ...]') -> 'None'
22,790
pyproject_api._via_fresh_subprocess
send_cmd
Send a command to the backend. :param cmd: the command to send :param kwargs: keyword arguments to the backend :return: a tuple of: backend response, standard output text, standard error text
def send_cmd(self, cmd: str, **kwargs: Any) -> tuple[Any, str, str]: """ Send a command to the backend. :param cmd: the command to send :param kwargs: keyword arguments to the backend :return: a tuple of: backend response, standard output text, standard error text """ return self._send(cmd, **kwargs)
(self, cmd: str, **kwargs: Any) -> tuple[typing.Any, str, str]
22,791
pyproject_api._frontend
WheelResult
Information collected while building a wheel.
class WheelResult(NamedTuple): """Information collected while building a wheel.""" #: path to the built wheel artifact wheel: Path #: backend standard output while building the wheel out: str #: backend standard error while building the wheel err: str
(wheel: pathlib.Path, out: str, err: str)
22,793
namedtuple_WheelResult
__new__
Create new instance of WheelResult(wheel, out, err)
from builtins import function
(_cls, wheel: ForwardRef('Path'), out: ForwardRef('str'), err: ForwardRef('str'))
22,796
collections
_replace
Return a new WheelResult object replacing specified fields with new values
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None): """Returns a new subclass of tuple with named fields. >>> Point = namedtuple('Point', ['x', 'y']) >>> Point.__doc__ # docstring for the new class 'Point(x, y)' >>> p = Point(11, y=22) # instantiate with positional args or keywords >>> p[0] + p[1] # indexable like a plain tuple 33 >>> x, y = p # unpack like a regular tuple >>> x, y (11, 22) >>> p.x + p.y # fields also accessible by name 33 >>> d = p._asdict() # convert to a dictionary >>> d['x'] 11 >>> Point(**d) # convert from a dictionary Point(x=11, y=22) >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields Point(x=100, y=22) """ # Validate the field names. At the user's option, either generate an error # message or automatically replace the field name with a valid name. if isinstance(field_names, str): field_names = field_names.replace(',', ' ').split() field_names = list(map(str, field_names)) typename = _sys.intern(str(typename)) if rename: seen = set() for index, name in enumerate(field_names): if (not name.isidentifier() or _iskeyword(name) or name.startswith('_') or name in seen): field_names[index] = f'_{index}' seen.add(name) for name in [typename] + field_names: if type(name) is not str: raise TypeError('Type names and field names must be strings') if not name.isidentifier(): raise ValueError('Type names and field names must be valid ' f'identifiers: {name!r}') if _iskeyword(name): raise ValueError('Type names and field names cannot be a ' f'keyword: {name!r}') seen = set() for name in field_names: if name.startswith('_') and not rename: raise ValueError('Field names cannot start with an underscore: ' f'{name!r}') if name in seen: raise ValueError(f'Encountered duplicate field name: {name!r}') seen.add(name) field_defaults = {} if defaults is not None: defaults = tuple(defaults) if len(defaults) > len(field_names): raise TypeError('Got more default values than field names') field_defaults = dict(reversed(list(zip(reversed(field_names), reversed(defaults))))) # Variables used in the methods and docstrings field_names = tuple(map(_sys.intern, field_names)) num_fields = len(field_names) arg_list = ', '.join(field_names) if num_fields == 1: arg_list += ',' repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')' tuple_new = tuple.__new__ _dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip # Create all the named tuple methods to be added to the class namespace namespace = { '_tuple_new': tuple_new, '__builtins__': {}, '__name__': f'namedtuple_{typename}', } code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))' __new__ = eval(code, namespace) __new__.__name__ = '__new__' __new__.__doc__ = f'Create new instance of {typename}({arg_list})' if defaults is not None: __new__.__defaults__ = defaults @classmethod def _make(cls, iterable): result = tuple_new(cls, iterable) if _len(result) != num_fields: raise TypeError(f'Expected {num_fields} arguments, got {len(result)}') return result _make.__func__.__doc__ = (f'Make a new {typename} object from a sequence ' 'or iterable') def _replace(self, /, **kwds): result = self._make(_map(kwds.pop, field_names, self)) if kwds: raise ValueError(f'Got unexpected field names: {list(kwds)!r}') return result _replace.__doc__ = (f'Return a new {typename} object replacing specified ' 'fields with new values') def __repr__(self): 'Return a nicely formatted representation string' return self.__class__.__name__ + repr_fmt % self def _asdict(self): 'Return a new dict which maps field names to their values.' return _dict(_zip(self._fields, self)) def __getnewargs__(self): 'Return self as a plain tuple. Used by copy and pickle.' return _tuple(self) # Modify function metadata to help with introspection and debugging for method in ( __new__, _make.__func__, _replace, __repr__, _asdict, __getnewargs__, ): method.__qualname__ = f'{typename}.{method.__name__}' # Build-up the class namespace dictionary # and use type() to build the result class class_namespace = { '__doc__': f'{typename}({arg_list})', '__slots__': (), '_fields': field_names, '_field_defaults': field_defaults, '__new__': __new__, '_make': _make, '_replace': _replace, '__repr__': __repr__, '_asdict': _asdict, '__getnewargs__': __getnewargs__, '__match_args__': field_names, } for index, name in enumerate(field_names): doc = _sys.intern(f'Alias for field number {index}') class_namespace[name] = _tuplegetter(index, doc) result = type(typename, (tuple,), class_namespace) # For pickling to work, the __module__ variable needs to be set to the frame # where the named tuple is created. Bypass this step in environments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython), or where the user has # specified a particular module. if module is None: try: module = _sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass if module is not None: result.__module__ = module return result
(self, /, **kwds)
22,801
streamlit_embedcode
_clean_link
Strip trailing slash if present on link. Parameters ---------- link : str URL from code sharing website Returns ------- str Returns value of `link` without trailing slash. Example ------- >>> _clean_link("https://gist.github.com/randyzwitch/be8c5e9fb5b8e7b046afebcac12e5087/") 'https://gist.github.com/randyzwitch/be8c5e9fb5b8e7b046afebcac12e5087' >>> _clean_link("https://gist.github.com/randyzwitch/be8c5e9fb5b8e7b046afebcac12e5087") 'https://gist.github.com/randyzwitch/be8c5e9fb5b8e7b046afebcac12e5087'
def _clean_link(link): """Strip trailing slash if present on link. Parameters ---------- link : str URL from code sharing website Returns ------- str Returns value of `link` without trailing slash. Example ------- >>> _clean_link("https://gist.github.com/randyzwitch/be8c5e9fb5b8e7b046afebcac12e5087/") 'https://gist.github.com/randyzwitch/be8c5e9fb5b8e7b046afebcac12e5087' >>> _clean_link("https://gist.github.com/randyzwitch/be8c5e9fb5b8e7b046afebcac12e5087") 'https://gist.github.com/randyzwitch/be8c5e9fb5b8e7b046afebcac12e5087' """ return link[:-1] if link[-1] == "/" else link
(link)
22,802
streamlit_embedcode
codepen_snippet
Embed a CodePen snippet. Parameters ---------- link : str URL from https://codepen.io/ height: int Height of the resulting iframe width: int Width of the resulting iframe scrolling: bool If content is larger than iframe size, provide scrollbars? theme: str Color theme of snippet (i.e. "light", "dark") preview: bool Require snippet to be clicked to load. Setting `preview=True` can improve load times. Example ------- >>> codepen_snippet("https://codepen.io/ste-vg/pen/GRooLza", width = 600, scrolling = False)
def codepen_snippet( link, height=600, width=950, scrolling=True, theme="light", preview=True ): """Embed a CodePen snippet. Parameters ---------- link : str URL from https://codepen.io/ height: int Height of the resulting iframe width: int Width of the resulting iframe scrolling: bool If content is larger than iframe size, provide scrollbars? theme: str Color theme of snippet (i.e. "light", "dark") preview: bool Require snippet to be clicked to load. Setting `preview=True` can improve load times. Example ------- >>> codepen_snippet("https://codepen.io/ste-vg/pen/GRooLza", width = 600, scrolling = False) """ user, _, slughash = _clean_link(link).split("/")[-3:] return components.html( f""" <p class="codepen" data-height="{height}" data-theme-id="{theme}" data-default-tab="html,result" data-user="{user}" data-slug-hash="{slughash}" data-preview="{str(preview).lower()}" style="height: {height}px; box-sizing: border-box; display: flex; align-items: center; justify-content: center; border: 2px solid; margin: 1em 0; padding: 1em;""> </p><script async src="https://static.codepen.io/assets/embed/ei.js"></script> """, height=height, width=width, scrolling=scrolling, )
(link, height=600, width=950, scrolling=True, theme='light', preview=True)
22,804
streamlit_embedcode
github_gist
Embed a GitHub gist. Parameters ---------- link : str URL from https://gist.github.com/ height: int Height of the resulting iframe width: int Width of the resulting iframe scrolling: bool If content is larger than iframe size, provide scrollbars? Example ------- >>> github_gist("https://gist.github.com/randyzwitch/934d502e53f2adcb48eea2423fe4a47e")
def github_gist(link, height=600, width=950, scrolling=True): """Embed a GitHub gist. Parameters ---------- link : str URL from https://gist.github.com/ height: int Height of the resulting iframe width: int Width of the resulting iframe scrolling: bool If content is larger than iframe size, provide scrollbars? Example ------- >>> github_gist("https://gist.github.com/randyzwitch/934d502e53f2adcb48eea2423fe4a47e") """ gistcreator, gistid = _clean_link(link).split("/")[-2:] return components.html( f"""<script src="https://gist.github.com/{gistcreator}/{gistid}.js"></script>""", height=height, width=width, scrolling=scrolling, )
(link, height=600, width=950, scrolling=True)
22,805
streamlit_embedcode
gitlab_snippet
Embed a Gitlab snippet. Parameters ---------- link : str URL from https://gitlab.com/explore/snippets height: int Height of the resulting iframe width: int Width of the resulting iframe scrolling: bool If content is larger than iframe size, provide scrollbars? Example ------- >>> gitlab_snippet("https://gitlab.com/snippets/1995463", height = 400)
def gitlab_snippet(link, height=600, width=950, scrolling=True): """Embed a Gitlab snippet. Parameters ---------- link : str URL from https://gitlab.com/explore/snippets height: int Height of the resulting iframe width: int Width of the resulting iframe scrolling: bool If content is larger than iframe size, provide scrollbars? Example ------- >>> gitlab_snippet("https://gitlab.com/snippets/1995463", height = 400) """ snippetnumber = _clean_link(link).split("/")[-1] return components.html( f"""<script src='https://gitlab.com/snippets/{snippetnumber}.js'></script>""", height=height, width=width, scrolling=scrolling, )
(link, height=600, width=950, scrolling=True)
22,806
streamlit_embedcode
ideone_snippet
Embed a Ideone snippet. Parameters ---------- link : str URL from https://ideone.com/ height: int Height of the resulting iframe width: int Width of the resulting iframe scrolling: bool If content is larger than iframe size, provide scrollbars? Example ------- >>> ideone_snippet("https://ideone.com/vQ54cr")
def ideone_snippet(link, height=600, width=950, scrolling=True): """Embed a Ideone snippet. Parameters ---------- link : str URL from https://ideone.com/ height: int Height of the resulting iframe width: int Width of the resulting iframe scrolling: bool If content is larger than iframe size, provide scrollbars? Example ------- >>> ideone_snippet("https://ideone.com/vQ54cr") """ snippetnumber = _clean_link(link).split("/")[-1] return components.html( f"""<script src="https://ideone.com/e.js/{snippetnumber}" type="text/javascript" ></script>""", height=height, width=width, scrolling=scrolling, )
(link, height=600, width=950, scrolling=True)
22,807
streamlit_embedcode
pastebin_snippet
Embed a Pastebin snippet. Parameters ---------- link : str URL from https://pastebin.com/ height: int Height of the resulting iframe width: int Width of the resulting iframe scrolling: bool If content is larger than iframe size, provide scrollbars? Example ------- >>> pastebin_snippet("https://pastebin.com/AWYbziQF", width = 600, scrolling = False)
def pastebin_snippet(link, height=600, width=950, scrolling=True): """Embed a Pastebin snippet. Parameters ---------- link : str URL from https://pastebin.com/ height: int Height of the resulting iframe width: int Width of the resulting iframe scrolling: bool If content is larger than iframe size, provide scrollbars? Example ------- >>> pastebin_snippet("https://pastebin.com/AWYbziQF", width = 600, scrolling = False) """ snippetnumber = _clean_link(link).split("/")[-1] return components.html( f"""<script src="https://pastebin.com/embed_js/{snippetnumber}"></script>""", height=height, width=width, scrolling=scrolling, )
(link, height=600, width=950, scrolling=True)
22,809
streamlit_embedcode
tagmycode_snippet
Embed a TagMyCode snippet. Parameters ---------- link : str URL from https://tagmycode.com/ height: int Height of the resulting iframe width: int Width of the resulting iframe scrolling: bool If content is larger than iframe size, provide scrollbars? Example ------- >>> tagmycode_snippet("https://tagmycode.com/snippet/5965/recursive-list-files-in-a-dir#.Xwyc43VKglU")
def tagmycode_snippet(link, height=600, width=950, scrolling=True): """Embed a TagMyCode snippet. Parameters ---------- link : str URL from https://tagmycode.com/ height: int Height of the resulting iframe width: int Width of the resulting iframe scrolling: bool If content is larger than iframe size, provide scrollbars? Example ------- >>> tagmycode_snippet("https://tagmycode.com/snippet/5965/recursive-list-files-in-a-dir#.Xwyc43VKglU") """ snippetnumber = _clean_link(link).split("/")[-2] return components.html( f"""<script src="https://tagmycode.com/embed/js/{snippetnumber}"></script>""", height=height, width=width, scrolling=scrolling, )
(link, height=600, width=950, scrolling=True)
22,811
pyfetch_mimic
FetchResponse
FetchResponse(headers: dict, url: str, ok: bool, redirected: bool, status: int, status_text: str, body_used: bool, do_not_use_body: bytes)
class FetchResponse: headers: dict url: str ok: bool redirected: bool status: int # type: str status_text: str body_used: bool do_not_use_body: bytes # unit-tested async def string(self) -> str: return await self._text() async def text(self) -> str: return await self._text() # unit-tested async def _text(self) -> str: return self.do_not_use_body.decode() async def buffer(self): logging.warning("`httpx_http.FetchResponse.buffer()` is not yet implemented for non-pyodide version") return self # unit-tested async def bytes(self) -> bytes: return bytes(self.do_not_use_body) # unit-tested async def json(self) -> dict: return json.loads(self.do_not_use_body) async def memoryview(self): logging.warning("`httpx_http.FetchResponse.memoryview()` is not yet implemented for non-pyodide version") return self async def unpack_archive(self, extract_dir: str, format: Literal["zip", "tar", "gztar", "bztar", "xztar"]): # treat data as an archive and unpack into target directory with BytesIO(self.bytes()) as file_buffer: shutil.unpack_archive(file_buffer, extract_dir=extract_dir, format=format) # shutil.unpack_archive(self.do_not_use_body, extract_dir=extract_dir, format=format) def raise_for_status(self): if self.status >= 400: raise OSError( f"Request failed due to local issue. Status code: {self.status}. Status text: {self.status_text}" ) def clone(self) -> "FetchResponse": # return copy.copy(self, deepcopy=True) return copy.deepcopy(self)
(headers: dict, url: str, ok: bool, redirected: bool, status: int, status_text: str, body_used: bool, do_not_use_body: bytes) -> None
22,812
pyfetch_mimic
__eq__
null
import copy import json import logging import shutil import sys from dataclasses import dataclass from io import BytesIO from typing import Literal if "pyodide" not in sys.modules: import httpx @dataclass class FetchResponse: headers: dict url: str ok: bool redirected: bool status: int # type: str status_text: str body_used: bool do_not_use_body: bytes # unit-tested async def string(self) -> str: return await self._text() async def text(self) -> str: return await self._text() # unit-tested async def _text(self) -> str: return self.do_not_use_body.decode() async def buffer(self): logging.warning("`httpx_http.FetchResponse.buffer()` is not yet implemented for non-pyodide version") return self # unit-tested async def bytes(self) -> bytes: return bytes(self.do_not_use_body) # unit-tested async def json(self) -> dict: return json.loads(self.do_not_use_body) async def memoryview(self): logging.warning("`httpx_http.FetchResponse.memoryview()` is not yet implemented for non-pyodide version") return self async def unpack_archive(self, extract_dir: str, format: Literal["zip", "tar", "gztar", "bztar", "xztar"]): # treat data as an archive and unpack into target directory with BytesIO(self.bytes()) as file_buffer: shutil.unpack_archive(file_buffer, extract_dir=extract_dir, format=format) # shutil.unpack_archive(self.do_not_use_body, extract_dir=extract_dir, format=format) def raise_for_status(self): if self.status >= 400: raise OSError( f"Request failed due to local issue. Status code: {self.status}. Status text: {self.status_text}" ) def clone(self) -> "FetchResponse": # return copy.copy(self, deepcopy=True) return copy.deepcopy(self)
(self, other)
22,815
pyfetch_mimic
_text
null
@dataclass class FetchResponse: headers: dict url: str ok: bool redirected: bool status: int # type: str status_text: str body_used: bool do_not_use_body: bytes # unit-tested async def string(self) -> str: return await self._text() async def text(self) -> str: return await self._text() # unit-tested async def _text(self) -> str: return self.do_not_use_body.decode() async def buffer(self): logging.warning("`httpx_http.FetchResponse.buffer()` is not yet implemented for non-pyodide version") return self # unit-tested async def bytes(self) -> bytes: return bytes(self.do_not_use_body) # unit-tested async def json(self) -> dict: return json.loads(self.do_not_use_body) async def memoryview(self): logging.warning("`httpx_http.FetchResponse.memoryview()` is not yet implemented for non-pyodide version") return self async def unpack_archive(self, extract_dir: str, format: Literal["zip", "tar", "gztar", "bztar", "xztar"]): # treat data as an archive and unpack into target directory with BytesIO(self.bytes()) as file_buffer: shutil.unpack_archive(file_buffer, extract_dir=extract_dir, format=format) # shutil.unpack_archive(self.do_not_use_body, extract_dir=extract_dir, format=format) def raise_for_status(self): if self.status >= 400: raise OSError( f"Request failed due to local issue. Status code: {self.status}. Status text: {self.status_text}" ) def clone(self) -> "FetchResponse": # return copy.copy(self, deepcopy=True) return copy.deepcopy(self)
(self) -> str
22,818
pyfetch_mimic
clone
null
def clone(self) -> "FetchResponse": # return copy.copy(self, deepcopy=True) return copy.deepcopy(self)
(self) -> pyfetch_mimic.FetchResponse
22,821
pyfetch_mimic
raise_for_status
null
def raise_for_status(self): if self.status >= 400: raise OSError( f"Request failed due to local issue. Status code: {self.status}. Status text: {self.status_text}" )
(self)
22,827
pyfetch_mimic
http
null
class http: @staticmethod async def pyfetch( url: str, headers: dict, method: Literal["GET", "POST", "PUT", "DELETE", "PATCH"] = "GET", credentials: Literal["omit", "same-origin", "include"] = None, body: str = None, # i.e., in the case of a dictionary, pass to "body" using: `json.dumps({...})` redirect: bool = True, ) -> FetchResponse: if credentials: logging.warning( "`credentials` parameter doesn't do anything when running outside of browser. " "Separate testing required." ) request_arguments = { "method": method, "url": url, "headers": headers, "follow_redirects": redirect, "content": body, } async with httpx.AsyncClient() as client: r = await client.request(**request_arguments) ok = True if 100 <= r.status_code < 400 else False redirected = True if 300 <= r.status_code < 400 else False return FetchResponse( headers=r.headers, url=str(r.url), redirected=redirected, status=r.status_code, status_text=str(r.status_code), do_not_use_body=r.content, ok=ok, body_used=False, # type=None )
()
22,828
pyfetch_mimic
pyfetch
null
@staticmethod async def pyfetch( url: str, headers: dict, method: Literal["GET", "POST", "PUT", "DELETE", "PATCH"] = "GET", credentials: Literal["omit", "same-origin", "include"] = None, body: str = None, # i.e., in the case of a dictionary, pass to "body" using: `json.dumps({...})` redirect: bool = True, ) -> FetchResponse: if credentials: logging.warning( "`credentials` parameter doesn't do anything when running outside of browser. " "Separate testing required." ) request_arguments = { "method": method, "url": url, "headers": headers, "follow_redirects": redirect, "content": body, } async with httpx.AsyncClient() as client: r = await client.request(**request_arguments) ok = True if 100 <= r.status_code < 400 else False redirected = True if 300 <= r.status_code < 400 else False return FetchResponse( headers=r.headers, url=str(r.url), redirected=redirected, status=r.status_code, status_text=str(r.status_code), do_not_use_body=r.content, ok=ok, body_used=False, # type=None )
(url: str, headers: dict, method: Literal['GET', 'POST', 'PUT', 'DELETE', 'PATCH'] = 'GET', credentials: Optional[Literal['omit', 'same-origin', 'include']] = None, body: Optional[str] = None, redirect: bool = True) -> pyfetch_mimic.FetchResponse
22,836
stubalyzer.compare
ComparisonResult
Result of comparing two symbol nodes and their types.
class ComparisonResult(NamedTuple): """ Result of comparing two symbol nodes and their types. """ match_result: MatchResult """Type of comparison result""" symbol: RelevantSymbolNode """Symbol that was checked""" reference: Optional[SymbolNode] """Reference symbol that was checked against""" symbol_name: str """Full name of the symbol that was checked""" symbol_type: str """Type of the symbol that was checked""" reference_name: Optional[str] """Full name of the reference symbol""" reference_type: Optional[str] """Type of the reference symbol""" data: Optional[Dict[str, Any]] = None """Optional additional data""" message_val: Optional[str] = None """Optional message""" @property def message(self) -> str: """Human readable result of the comparison""" if self.message_val: return self.message_val if self.match_result is MatchResult.MATCH: return "\n".join( [ f"Types for {self.symbol_name} match:", f" {self.symbol_type}", f" {self.reference_type}", ] ) elif self.match_result is MatchResult.MISMATCH: return "\n".join( [ f"Types for {self.symbol_name} do not match:", f" Handwritten type: {self.symbol_type}", f" Reference type : {self.reference_type}", ] ) elif self.match_result is MatchResult.NOT_FOUND: return f'Symbol "{self.symbol_name}" not found in generated stubs' elif self.match_result is MatchResult.MISLOCATED_SYMBOL: return ( f'Found symbol "{self.symbol_name}" in different location' f' "{self.reference_name}".' ) @classmethod def create( cls, match_result: MatchResult, symbol: RelevantSymbolNode, reference: Optional[SymbolNode], data: Optional[Dict[str, Any]] = None, message: Optional[str] = None, ) -> ComparisonResult: """ Create a comparison result. :param match_result: if the match was successful :param symbol: symbol that was checked :param reference: reference symbol that was checked against :param data: optional additional data :param message: optional message """ return cls( match_result=match_result, symbol=symbol, reference=reference, data=data, message_val=message, symbol_name=symbol.fullname, symbol_type=_get_symbol_type_info(symbol), reference_name=reference.fullname if reference else None, reference_type=_get_symbol_type_info(reference) if reference else None, ) @classmethod def create_not_found( cls, symbol: RelevantSymbolNode, data: Optional[Dict[str, Any]] = None ) -> ComparisonResult: """ Create an unsuccessful comparison result where there was no reference symbol found. :param symbol: symbol we wanted to check :param data: optional additional data """ return cls.create( match_result=MatchResult.NOT_FOUND, symbol=symbol, reference=None, data=data ) @classmethod def create_mislocated_symbol( cls, symbol: RelevantSymbolNode, reference: SymbolNode, data: Optional[Dict[str, Any]] = None, ) -> ComparisonResult: """ Create an unsuccessful comparison result where the reference symbol was found in a different level of the class hierarchy. :param symbol: symbol we wanted to check :param reference: symbol that was found somewhere else in the hierarchy :param data: optional additional data """ return cls.create( match_result=MatchResult.MISLOCATED_SYMBOL, symbol=symbol, reference=reference, data=data, ) @classmethod def create_mismatch( cls, symbol: RelevantSymbolNode, reference: RelevantSymbolNode, data: Optional[Dict[str, Any]] = None, message: Optional[str] = None, ) -> ComparisonResult: """ Create an unsuccessful comparison result. :param symbol: symbol that was checked :param reference: reference symbol that was checked against :param data: optional additional data :param message: optional message """ return cls.create( match_result=MatchResult.MISMATCH, symbol=symbol, reference=reference, data=data, message=message, ) @classmethod def create_match( cls, symbol: RelevantSymbolNode, reference: RelevantSymbolNode, data: Optional[Dict[str, Any]] = None, message: Optional[str] = None, ) -> ComparisonResult: """ Create a successful comparison result. :param symbol: symbol that was checked :param reference: reference symbol that was checked against :param data: optional additional data :param message: optional message """ return cls.create( match_result=MatchResult.MATCH, symbol=symbol, reference=reference, data=data, message=message, )
(match_result: stubalyzer.compare.MatchResult, symbol: Union[mypy.nodes.Decorator, mypy.nodes.FuncDef, mypy.nodes.OverloadedFuncDef, mypy.nodes.Var, mypy.nodes.TypeInfo, mypy.nodes.TypeVarExpr, mypy.nodes.TypeAlias], reference: Optional[mypy.nodes.SymbolNode], symbol_name: str, symbol_type: str, reference_name: Optional[str], reference_type: Optional[str], data: Optional[Dict[str, Any]] = None, message_val: Optional[str] = None)
22,838
namedtuple_ComparisonResult
__new__
Create new instance of ComparisonResult(match_result, symbol, reference, symbol_name, symbol_type, reference_name, reference_type, data, message_val)
from builtins import function
(_cls, match_result: ForwardRef('MatchResult'), symbol: ForwardRef('RelevantSymbolNode'), reference: ForwardRef('Optional[SymbolNode]'), symbol_name: ForwardRef('str'), symbol_type: ForwardRef('str'), reference_name: ForwardRef('Optional[str]'), reference_type: ForwardRef('Optional[str]'), data: ForwardRef('Optional[Dict[str, Any]]') = None, message_val: ForwardRef('Optional[str]') = None)
22,841
collections
_replace
Return a new ComparisonResult object replacing specified fields with new values
def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None): """Returns a new subclass of tuple with named fields. >>> Point = namedtuple('Point', ['x', 'y']) >>> Point.__doc__ # docstring for the new class 'Point(x, y)' >>> p = Point(11, y=22) # instantiate with positional args or keywords >>> p[0] + p[1] # indexable like a plain tuple 33 >>> x, y = p # unpack like a regular tuple >>> x, y (11, 22) >>> p.x + p.y # fields also accessible by name 33 >>> d = p._asdict() # convert to a dictionary >>> d['x'] 11 >>> Point(**d) # convert from a dictionary Point(x=11, y=22) >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields Point(x=100, y=22) """ # Validate the field names. At the user's option, either generate an error # message or automatically replace the field name with a valid name. if isinstance(field_names, str): field_names = field_names.replace(',', ' ').split() field_names = list(map(str, field_names)) typename = _sys.intern(str(typename)) if rename: seen = set() for index, name in enumerate(field_names): if (not name.isidentifier() or _iskeyword(name) or name.startswith('_') or name in seen): field_names[index] = f'_{index}' seen.add(name) for name in [typename] + field_names: if type(name) is not str: raise TypeError('Type names and field names must be strings') if not name.isidentifier(): raise ValueError('Type names and field names must be valid ' f'identifiers: {name!r}') if _iskeyword(name): raise ValueError('Type names and field names cannot be a ' f'keyword: {name!r}') seen = set() for name in field_names: if name.startswith('_') and not rename: raise ValueError('Field names cannot start with an underscore: ' f'{name!r}') if name in seen: raise ValueError(f'Encountered duplicate field name: {name!r}') seen.add(name) field_defaults = {} if defaults is not None: defaults = tuple(defaults) if len(defaults) > len(field_names): raise TypeError('Got more default values than field names') field_defaults = dict(reversed(list(zip(reversed(field_names), reversed(defaults))))) # Variables used in the methods and docstrings field_names = tuple(map(_sys.intern, field_names)) num_fields = len(field_names) arg_list = ', '.join(field_names) if num_fields == 1: arg_list += ',' repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')' tuple_new = tuple.__new__ _dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip # Create all the named tuple methods to be added to the class namespace namespace = { '_tuple_new': tuple_new, '__builtins__': {}, '__name__': f'namedtuple_{typename}', } code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))' __new__ = eval(code, namespace) __new__.__name__ = '__new__' __new__.__doc__ = f'Create new instance of {typename}({arg_list})' if defaults is not None: __new__.__defaults__ = defaults @classmethod def _make(cls, iterable): result = tuple_new(cls, iterable) if _len(result) != num_fields: raise TypeError(f'Expected {num_fields} arguments, got {len(result)}') return result _make.__func__.__doc__ = (f'Make a new {typename} object from a sequence ' 'or iterable') def _replace(self, /, **kwds): result = self._make(_map(kwds.pop, field_names, self)) if kwds: raise ValueError(f'Got unexpected field names: {list(kwds)!r}') return result _replace.__doc__ = (f'Return a new {typename} object replacing specified ' 'fields with new values') def __repr__(self): 'Return a nicely formatted representation string' return self.__class__.__name__ + repr_fmt % self def _asdict(self): 'Return a new dict which maps field names to their values.' return _dict(_zip(self._fields, self)) def __getnewargs__(self): 'Return self as a plain tuple. Used by copy and pickle.' return _tuple(self) # Modify function metadata to help with introspection and debugging for method in ( __new__, _make.__func__, _replace, __repr__, _asdict, __getnewargs__, ): method.__qualname__ = f'{typename}.{method.__name__}' # Build-up the class namespace dictionary # and use type() to build the result class class_namespace = { '__doc__': f'{typename}({arg_list})', '__slots__': (), '_fields': field_names, '_field_defaults': field_defaults, '__new__': __new__, '_make': _make, '_replace': _replace, '__repr__': __repr__, '_asdict': _asdict, '__getnewargs__': __getnewargs__, '__match_args__': field_names, } for index, name in enumerate(field_names): doc = _sys.intern(f'Alias for field number {index}') class_namespace[name] = _tuplegetter(index, doc) result = type(typename, (tuple,), class_namespace) # For pickling to work, the __module__ variable needs to be set to the frame # where the named tuple is created. Bypass this step in environments where # sys._getframe is not defined (Jython for example) or sys._getframe is not # defined for arguments greater than 0 (IronPython), or where the user has # specified a particular module. if module is None: try: module = _sys._getframe(1).f_globals.get('__name__', '__main__') except (AttributeError, ValueError): pass if module is not None: result.__module__ = module return result
(self, /, **kwds)
22,842
stubalyzer.compare
MatchResult
An enumeration.
class MatchResult(Enum): MATCH = "match" MISMATCH = "mismatch" NOT_FOUND = "not_found" MISLOCATED_SYMBOL = "mislocated_symbol" @classmethod def declare_mismatch(cls, matchResultString: str) -> MatchResult: err = matchResultString == MatchResult.MATCH.value try: result = MatchResult(matchResultString) except ValueError: err = True if err: possible_values = ", ".join( [f'"{m.value}"' for m in MatchResult if m is not MatchResult.MATCH] ) raise ValueError( f'"{matchResultString}" is not a valid mismatch type.' f" (Use one of {possible_values}" ) return result
(value, names=None, *, module=None, qualname=None, type=None, start=1)
22,845
stubalyzer.compare
compare_symbols
Check if the given symbol node is compatible with the reference symbol. Will return a successful comparison if any of the following holds: - the symbols describe the same class - the symbols are type aliases that resolve to the same type - ``symbol`` is a valid subtype of ``reference`` (see :py:func:`mypy.subtypes.is_subtype`) - ``symbol`` and ``reference`` somehow overlap (see :py:func:`mypy.meet.is_overlapping_types`) :param symbol: symbol node to validate :param reference: symbol node to validate against
def compare_symbols( symbol: RelevantSymbolNode, reference: RelevantSymbolNode ) -> ComparisonResult: """ Check if the given symbol node is compatible with the reference symbol. Will return a successful comparison if any of the following holds: - the symbols describe the same class - the symbols are type aliases that resolve to the same type - ``symbol`` is a valid subtype of ``reference`` (see :py:func:`mypy.subtypes.is_subtype`) - ``symbol`` and ``reference`` somehow overlap (see :py:func:`mypy.meet.is_overlapping_types`) :param symbol: symbol node to validate :param reference: symbol node to validate against """ # TODO: Check if this is always the case, i.e. could there be # cases where `symbol` and `reference` don't have the same class but still match? if type(symbol) != type(reference): return ComparisonResult.create_mismatch(symbol=symbol, reference=reference) if isinstance(symbol, TypeInfo) and isinstance(reference, TypeInfo): return _type_infos_are_same_class(symbol, reference) if isinstance(symbol, TypeAlias) and isinstance(reference, TypeAlias): return _compare_type_aliases(symbol, reference) if isinstance(symbol, TypeVarExpr) and isinstance(reference, TypeVarExpr): return _compare_type_var_expr(symbol, reference) if isinstance(symbol, Decorator) and isinstance(reference, Decorator): return _compare_decorator(symbol, reference) return compare_mypy_types( symbol, reference, getattr(symbol, "type"), getattr(reference, "type") )
(symbol: Union[mypy.nodes.Decorator, mypy.nodes.FuncDef, mypy.nodes.OverloadedFuncDef, mypy.nodes.Var, mypy.nodes.TypeInfo, mypy.nodes.TypeVarExpr, mypy.nodes.TypeAlias], reference: Union[mypy.nodes.Decorator, mypy.nodes.FuncDef, mypy.nodes.OverloadedFuncDef, mypy.nodes.Var, mypy.nodes.TypeInfo, mypy.nodes.TypeVarExpr, mypy.nodes.TypeAlias]) -> stubalyzer.compare.ComparisonResult
22,846
stubalyzer.collect
get_stub_types
Analyze the stub files in stubs_path and return module and class definitions of stubs as symbol nodes. Only relevant symbol nodes (e.g. for variables, functions, classes, methods) are returned. They contain the type annotation information. :param stubs_path: where all the stub files are located :param mypy_conf_path: path to mypy.ini :param root_path: path to the code directory where the type analysis is started
def get_stub_types( stubs_path: str, mypy_conf_path: str, root_path: Optional[str] = None ) -> Iterable[Tuple[RelevantSymbolNode, str]]: """ Analyze the stub files in stubs_path and return module and class definitions of stubs as symbol nodes. Only relevant symbol nodes (e.g. for variables, functions, classes, methods) are returned. They contain the type annotation information. :param stubs_path: where all the stub files are located :param mypy_conf_path: path to mypy.ini :param root_path: path to the code directory where the type analysis is started """ stubs_path = abspath(stubs_path) if root_path: build_result = _mypy_analyze(mypy_conf_path, root_path, stubs_path) else: build_result = _mypy_analyze(mypy_conf_path, stubs_path) stubbed_modules = { module for module in build_result.graph.values() if module.path and is_stubbed_module(module) and module.path.startswith(stubs_path) } for module in stubbed_modules: if module.tree: assert module.path yield from ( (stub_type, module.path) for stub_type in collect_types(module.tree) )
(stubs_path: str, mypy_conf_path: str, root_path: Optional[str] = None) -> Iterable[Tuple[Union[mypy.nodes.Decorator, mypy.nodes.FuncDef, mypy.nodes.OverloadedFuncDef, mypy.nodes.Var, mypy.nodes.TypeInfo, mypy.nodes.TypeVarExpr, mypy.nodes.TypeAlias], str]]
22,848
stubalyzer.lookup
lookup_symbol
Find the given symbol in the symbol map. Ideally the symbol is just found under the exact same ``fullname``. If not and the symbol is a method, this will take the symbol's class and look it up in the map and then try to resolve the symbol on that class using its method resolution order. If the result of the lookup has a different ``fullname`` than the original symbol the given symbol is defined at a different point in the class hierarchy than expected. :param symbol_map: Dictionary for looking up symbols by their full name :param symbol_to_lookup: Symbol to search for :return: The found symbol (if any) and the class it was found on (if any)
def lookup_symbol( symbol_map: Dict[str, RelevantSymbolNode], symbol_to_lookup: SymbolNode ) -> LookupResult: """ Find the given symbol in the symbol map. Ideally the symbol is just found under the exact same ``fullname``. If not and the symbol is a method, this will take the symbol's class and look it up in the map and then try to resolve the symbol on that class using its method resolution order. If the result of the lookup has a different ``fullname`` than the original symbol the given symbol is defined at a different point in the class hierarchy than expected. :param symbol_map: Dictionary for looking up symbols by their full name :param symbol_to_lookup: Symbol to search for :return: The found symbol (if any) and the class it was found on (if any) """ fail = LookupResult(None, None) symbol = symbol_map.get(symbol_to_lookup.fullname) if symbol: return LookupResult(symbol, get_symbol_class(symbol)) # Check if we have a class on the symbol we're looking up cls_to_lookup = getattr(symbol_to_lookup, "info", None) if not cls_to_lookup or cls_to_lookup == FUNC_NO_INFO: return fail symbol_cls = symbol_map.get(cls_to_lookup.fullname) if not symbol_cls or not isinstance(symbol_cls, TypeInfo): return fail found_symbol_table_node = symbol_cls.get(symbol_to_lookup.name) if not found_symbol_table_node or not found_symbol_table_node.node: return fail return LookupResult( found_symbol_table_node.node, get_symbol_class(found_symbol_table_node.node) )
(symbol_map: Dict[str, Union[mypy.nodes.Decorator, mypy.nodes.FuncDef, mypy.nodes.OverloadedFuncDef, mypy.nodes.Var, mypy.nodes.TypeInfo, mypy.nodes.TypeVarExpr, mypy.nodes.TypeAlias]], symbol_to_lookup: mypy.nodes.SymbolNode) -> stubalyzer.lookup.LookupResult
22,851
xqute.defaults
JobStatus
The status of a job Life cycles: ........................queued in scheduler INIT -> QUEUED -> SUBMITTED -> RUNNING -> FINISHED (FAILED) INIT -> QUEUED -> SUBMITTED -> RUNNING -> KILLING -> FINISHED INIT -> QUEUED -> SUBMITTED -> KILLING -> FINISHED INIT -> QUEUED -> (CANELED) Attributes: INIT: When a job is initialized RETRYING: When a job is to be retried QUEUED: When a job is queued SUBMITTED: When a job is sumitted RUNNING: When a job is running KILLING: When a job is being killed FINISHED: When a job is finished FAILED: When a job is failed
class JobStatus: """The status of a job Life cycles: ........................queued in scheduler INIT -> QUEUED -> SUBMITTED -> RUNNING -> FINISHED (FAILED) INIT -> QUEUED -> SUBMITTED -> RUNNING -> KILLING -> FINISHED INIT -> QUEUED -> SUBMITTED -> KILLING -> FINISHED INIT -> QUEUED -> (CANELED) Attributes: INIT: When a job is initialized RETRYING: When a job is to be retried QUEUED: When a job is queued SUBMITTED: When a job is sumitted RUNNING: When a job is running KILLING: When a job is being killed FINISHED: When a job is finished FAILED: When a job is failed """ INIT: int = 0 RETRYING: int = 1 QUEUED: int = 2 SUBMITTED: int = 3 RUNNING: int = 4 KILLING: int = 5 FINISHED: int = 6 FAILED: int = 7 @classmethod def get_name(cls, *statuses: int) -> Tuple[str, ...] | str: """Get the name of the status Args: *statuses: The status values Returns: The name of the status if a single status is passed, otherwise a tuple of names """ ret_dict = {} for name, value in cls.__dict__.items(): if value in statuses: ret_dict[value] = name ret_tuple = tuple(ret_dict[status] for status in statuses) if len(ret_tuple) > 1: return ret_tuple return ret_tuple[0] # pragma: no cover
()
22,852
pipen_verbose
PipenVerbose
pipen-verbose plugin: Logging some addtitional informtion for pipen
class PipenVerbose: """pipen-verbose plugin: Logging some addtitional informtion for pipen""" __version__: str = __version__ def __init__(self) -> None: """Constructor""" self.tic: float = 0.0 # pragma: no cover @plugin.impl def on_proc_input_computed(self, proc: "Proc"): """Print input data on debug""" data_to_show = proc.input.data.copy() if hasattr(data_to_show, "map"): # pragma: no cover # pandas 2.1 data_to_show = data_to_show.map(_shorten_path) else: data_to_show = data_to_show.applymap(_shorten_path) for line in data_to_show.to_string( show_dimensions=True, index=False ).splitlines(): line = line.replace('%', '%%').replace('[', '\\[') proc.log("debug", f"indata | {line}") @plugin.impl async def on_proc_start(self, proc: "Proc"): """Print some configuration items of the process""" props = {} for prop, getter in VERBOSAL_CONFIGS.items(): value = getter(proc) if getter else getattr(proc, prop) if value is not None and value != proc.pipeline.config.get( prop, None ): props[prop] = value key_len = max(len(prop) for prop in props) if props else 0 for prop, value in props.items(): proc.log( "info", "%s: %s", prop.ljust(key_len), value, logger=logger ) # args if proc.envs: key_len = max(len(key) for key in proc.envs) if proc.envs else 0 for key, value in proc.envs.items(): value = [ line if i == 0 else f"{' ' * (len(proc.name) + key_len + 17)}{line}" for i, line in enumerate( str(value) .replace('%', '%%') .replace('[', '\\[') .splitlines() ) ] proc.log( "info", "envs.%s: %s", key.ljust(key_len), "\n".join(value), logger=logger, ) job = proc.jobs[0] # input input = job.input key_len = max(len(inp) for inp in input) if input else 0 for inkey, inval in input.items(): job.log( "info", "in.%s: %s", inkey.ljust(key_len), inval, logger=logger ) # output output = job.output key_len = max(len(outp) for outp in output) if output else 0 for inkey, inval in output.items(): job.log( "info", "out.%s: %s", inkey.ljust(key_len), inval, logger=logger, ) self.tic = time() @plugin.impl async def on_proc_done(self, proc: "Proc", succeeded: bool) -> None: """Log the ellapsed time for the process. If the process fails, log some error messages. """ proc.log( "info", "Time elapsed: %ss", _format_secs(time() - self.tic), logger=logger, ) if succeeded: return # print error info if any job failed failed_jobs = [ job.index for job in proc.jobs if job.status == JobStatus.FAILED ] if not failed_jobs: # pragma: no cover # could be triggered by Ctrl+C and all jobs are running return job = proc.jobs[failed_jobs[0]] proc.log( "error", "[red]Failed jobs: %s[/red]", brief_list(failed_jobs), logger=logger, ) for job in proc.jobs: if job.status == JobStatus.FAILED: stderr = ( await a_read_text(job.stderr_file) if job.stderr_file.is_file() else "" ) for line in stderr.splitlines(): job.log("error", "[red]%s[/red]", line, logger=logger) job.log( "error", "[red]-----------------------------------[/red]", logger=logger, ) job.log("error", "Script: %s", job.script_file, logger=logger) job.log("error", "Stdout: %s", job.stdout_file, logger=logger) job.log("error", "Stderr: %s", job.stderr_file, logger=logger) break
() -> None
22,853
pipen_verbose
__init__
Constructor
def __init__(self) -> None: """Constructor""" self.tic: float = 0.0 # pragma: no cover
(self) -> NoneType
22,854
pipen_verbose
_format_secs
Format a time duration Args: seconds: the time duration in seconds Returns The formated string. For example: "01:01:01.001" stands for 1 hour 1 min 1 sec and 1 minisec.
def _format_secs(seconds: float) -> str: """Format a time duration Args: seconds: the time duration in seconds Returns The formated string. For example: "01:01:01.001" stands for 1 hour 1 min 1 sec and 1 minisec. """ minute, sec = divmod(seconds, 60) hour, minute = divmod(minute, 60) return "%02d:%02d:%02d.%03.0f" % ( hour, minute, sec, 1000 * (sec - int(sec)), )
(seconds: float) -> str
22,855
pipen_verbose
_shorten_path
Shorten the path in input data
def _shorten_path(path: Any, cutoff: int = 20) -> Any: """Shorten the path in input data""" if not isinstance(path, str): return path if len(path) < cutoff: return path if os.path.sep not in path: return f"{path[:5]} ... {path[-5:]}" parts = path.split(os.path.sep) if len(parts) >= 3: out = os.path.sep.join([parts[0], "..."] + parts[-2:]) if len(out) < cutoff: return out out = os.path.sep.join([parts[0], "...", parts[-1]]) return out if len(parts[0]) > 3: return os.path.sep.join(["...", parts[-1]]) return os.path.sep.join([parts[0], f"...{parts[-1][-5:]}"])
(path: Any, cutoff: int = 20) -> Any
22,856
xqute.utils
a_read_text
Read the text from a file asyncly Args: path: The path of the file Returns: The content of the file
"""Utilities for xqute""" import logging from os import PathLike from typing import Callable import asyncio from functools import partial, wraps import aiopath as aiop # type: ignore import aiofile as aiof from rich.logging import RichHandler from .defaults import LOGGER_NAME # helper functions to read and write the whole content of the file async def a_read_text(path: PathLike) -> str: """Read the text from a file asyncly Args: path: The path of the file Returns: The content of the file """ async with aiof.async_open(path, mode='rt') as file: # type: ignore return await file.read()
(path: os.PathLike) -> str
22,857
pipen.utils
brief_list
Briefly show an integer list, combine the continuous numbers. Args: blist: The list Returns: The string to show for the briefed list.
def brief_list(blist: List[int]) -> str: """Briefly show an integer list, combine the continuous numbers. Args: blist: The list Returns: The string to show for the briefed list. """ ret = [] for _, g in groupby(enumerate(blist), lambda x: x[0] - x[1]): list_group = list(map(itemgetter(1), g)) if len(list_group) > 1: ret.append(f"{list_group[0]}-{list_group[-1]}") else: ret.append(str(list_group[0])) return ", ".join(ret)
(blist: List[int]) -> str
22,858
pipen.utils
get_logger
Get the logger by given plugin name Args: level: The initial level of the logger Returns: The logger
def get_logger( name: str = LOGGER_NAME, level: str | int = None, ) -> logging.LoggerAdapter: """Get the logger by given plugin name Args: level: The initial level of the logger Returns: The logger """ log = logging.getLogger(f"pipen.{name}") log.addHandler(_logger_handler) if level is not None: log.setLevel(level.upper() if isinstance(level, str) else level) return logging.LoggerAdapter(log, {"plugin_name": name})
(name: str = 'core', level: Union[str, int, NoneType] = None) -> logging.LoggerAdapter
22,860
pyglottolog.api
Glottolog
API to access Glottolog data This class provides (read and write) access to a local copy of the Glottolog data, which can be obtained as explained in the `README <https://github.com/glottolog/pyglottolog#install>`_
class Glottolog(API): """ API to access Glottolog data This class provides (read and write) access to a local copy of the Glottolog data, which can be obtained as explained in the `README <https://github.com/glottolog/pyglottolog#install>`_ """ countries = [models.Country(c.alpha_2, c.name) for c in pycountry.countries] def __init__(self, repos='.', *, cache: bool = False): """ :param repos: Path to a copy of `<https://github.com/glottolog/glottolog>`_ :param cache: Indicate whether to cache `Languoid` objects or not. If `True`, the API must \ be used read-only. """ API.__init__(self, repos=repos) #: Absolute path to the copy of the data repository: self.repos: pathlib.Path = pathlib.Path.cwd() / self.repos #: Absolute path to the `tree` directory in the repos. self.tree: pathlib.Path = self.repos / 'languoids' / 'tree' if not self.tree.exists(): raise ValueError('repos dir %s missing tree dir: %s' % (self.repos, self.tree)) if not self.repos.joinpath('references').exists(): raise ValueError('repos dir %s missing references subdir' % (self.repos,)) self.cache = Cache() if cache else None def __str__(self): return '<Glottolog repos {0} at {1}>'.format(git_describe(self.repos), self.repos) def describe(self) -> str: return git_describe(self.repos) def references_path(self, *comps: str): """ Path within the `references` directory of the repos. """ return self.repos.joinpath('references', *comps) def languoids_path(self, *comps): """ Path within the `languoids` directory of the repos. """ return self.repos.joinpath('languoids', *comps) def build_path(self, *comps: str) -> pathlib.Path: build_dir = self.repos.joinpath('build') if not build_dir.exists(): build_dir.mkdir() # pragma: no cover return build_dir.joinpath(*comps) @contextlib.contextmanager def cache_dir(self, name): d = self.build_path(name) if not d.exists(): d.mkdir() yield d def _cfg(self, name, cls=None): return config.Config.from_ini( self.path('config', name + '.ini'), object_class=cls or config.Generic) @functools.cached_property def aes_status(self) -> typing.Dict[str, config.AES]: """ :rtype: mapping with :class:`config.AES` values. """ return self._cfg('aes_status', cls=config.AES) @functools.cached_property def aes_sources(self) -> typing.Dict[str, config.AESSource]: """ :rtype: mapping with :class:`config.AESSource` values """ return self._cfg('aes_sources', cls=config.AESSource) @functools.cached_property def document_types(self) -> typing.Dict[str, config.DocumentType]: """ :rtype: mapping with :class:`config.DocumentType` values """ return self._cfg('document_types', cls=config.DocumentType) @functools.cached_property def med_types(self) -> typing.Dict[str, config.MEDType]: """ :rtype: mapping with :class:`config.MEDType` values """ return self._cfg('med_types', cls=config.MEDType) @functools.cached_property def macroareas(self) -> typing.Dict[str, config.Macroarea]: """ :rtype: mapping with :class:`config.Macroarea` values """ return self._cfg('macroareas', cls=config.Macroarea) @functools.cached_property def language_types(self) -> typing.Dict[str, config.LanguageType]: """ :rtype: mapping with :class:`config.LanguageType` values """ return self._cfg('language_types', cls=config.LanguageType) @functools.cached_property def languoid_levels(self) -> typing.Dict[str, config.LanguoidLevel]: """ :rtype: mapping with :class:`config.LanguoidLevel` values """ return self._cfg('languoid_levels', cls=config.LanguoidLevel) @functools.cached_property def editors(self) -> typing.Dict[str, config.Generic]: """ Metadata about editors of Glottolog :rtype: mapping with :class:`config.Generic` values """ return self._cfg('editors') @functools.cached_property def publication(self) -> typing.Dict[str, config.Generic]: """ Metadata about the Glottolog publication :rtype: mapping with :class:`config.Generic` values """ return self._cfg('publication') @functools.cached_property def iso(self) -> clldutils.iso_639_3.ISO: """ :return: `clldutils.iso_639_3.ISO` instance, fed with the data of the latest \ ISO code table zip found in the `build` directory. """ return util.get_iso(self.build_path()) @property def ftsindex(self) -> pathlib.Path: """ Directory within `build` where the FullTextSearch index is created. """ return self.build_path('whoosh') @functools.cached_property def _tree_dirs(self): return list(walk(self.tree, mode='dirs')) @property def glottocodes(self) -> models.Glottocodes: """ Registry of Glottocodes. """ return models.Glottocodes(self.languoids_path('glottocodes.json')) def languoid(self, id_: typing.Union[str, lls.Languoid]) -> lls.Languoid: """ Retrieve a languoid specified by language code. :param id_: Glottocode or ISO code. """ if isinstance(id_, lls.Languoid): return id_ if self.cache and id_ in self.cache: return self.cache[id_] if ISO_CODE_PATTERN.match(id_): for d in self._tree_dirs if self.cache else walk(self.tree, mode='dirs'): if self.cache: l_ = self.cache.add(d, self) else: l_ = lls.Languoid.from_dir(d, _api=self) if l_.iso_code == id_: return l_ else: for d in self._tree_dirs if self.cache else walk(self.tree, mode='dirs'): l_ = None if self.cache: # If we cache Languoids, we might as well instantiate the ones we traverse: l_ = self.cache.add(d, self) if d.name == id_: if self.cache: return l_ return lls.Languoid.from_dir(d, _api=self) def languoids( self, ids: set = None, maxlevel: typing.Union[int, config.LanguoidLevel, str] = None, exclude_pseudo_families: bool = False ) -> typing.Generator[lls.Languoid, None, None]: """ Yields languoid objects. :param ids: `set` of Glottocodes to limit the result to. This is useful to increase \ performance, since INI file reading can be skipped for languoids not listed. :param maxlevel: Numeric maximal nesting depth of languoids, or Languoid.level. :param exclude_pseudo_families: Flag signaling whether to exclude pseud families, \ i.e. languoids from non-genealogical trees. """ is_max_level_int = isinstance(maxlevel, int) # Non-numeric levels are interpreted as `Languoid.level` descriptors. if not is_max_level_int: maxlevel = self.languoid_levels.get(maxlevel or 'dialect') # Since we traverse the tree topdown, we can cache a mapping of Languoid.id to triples # (name, id, level) for populating `Languoid.lineage`. nodes = {} for d in self._tree_dirs if self.cache else walk(self.tree, mode='dirs'): if ids is None or d.name in ids: if self.cache: lang = self.cache.add(d, self) else: lang = lls.Languoid.from_dir(d, nodes=nodes, _api=self) if (is_max_level_int and len(lang.lineage) <= maxlevel) \ or ((not is_max_level_int) and lang.level <= maxlevel): if (not exclude_pseudo_families) or not lang.category.startswith('Pseudo'): yield lang def languoids_by_code(self, nodes=None) -> typing.Dict[str, lls.Languoid]: """ Returns a `dict` mapping the three major language code schemes (Glottocode, ISO code, and Harald's NOCODE_s) to Languoid objects. """ res = {} for lang in (self.languoids() if nodes is None else nodes.values()): res[lang.id] = lang if lang.hid: res[lang.hid] = lang if lang.iso: res[lang.iso] = lang return res def ascii_tree(self, start: typing.Union[str, lls.Languoid], maxlevel=None): """ Prints an ASCII representation of the languoid tree starting at `start` to `stdout`. """ _ascii_node( self.languoid(start), 0, True, self.languoid_levels.get(maxlevel, maxlevel) if maxlevel else None, '', self.languoid_levels) def newick_tree( self, start: typing.Union[None, str, lls.Languoid] = None, template: str = None, nodes=None, maxlevel: typing.Union[int, config.LanguoidLevel] = None ) -> str: """ Returns the Newick representation of a (set of) Glottolog classification tree(s). :param start: Root languoid of the tree (or `None` to return the complete classification). :param template: Python format string accepting the `Languoid` instance as single \ variable named `l`, used to format node labels. """ template = template or lls.Languoid._newick_default_template if start: return self.languoid(start).newick_node( template=template, nodes=nodes, maxlevel=maxlevel, level=1).newick + ';' if nodes is None: nodes = collections.OrderedDict((lang.id, lang) for lang in self.languoids()) trees = [] for lang in nodes.values(): if not lang.lineage and not lang.category.startswith('Pseudo '): ns = lang.newick_node( nodes=nodes, template=template, maxlevel=maxlevel, level=1).newick if lang.level == self.languoid_levels.language: # An isolate: we wrap it in a pseudo-family with the same name and ID. fam = lls.Languoid.from_name_id_level( lang.dir.parent, lang.name, lang.id, 'family', _api=self) ns = '({0}){1}:1'.format(ns, template.format(l=fam)) # noqa: E741 trees.append('{0};'.format(ns)) return '\n'.join(trees) @functools.cached_property def bibfiles(self) -> references.BibFiles: """ Access reference data by BibFile. :rtype: :class:`references.BibFiles` """ return references.BibFiles.from_path(self.references_path(), api=self) def refs_by_languoid(self, *bibfiles, **kw): nodes = kw.get('nodes') if bibfiles: bibfiles = [ bib if isinstance(bib, references.BibFile) else self.bibfiles[bib] for bib in bibfiles] else: bibfiles = self.bibfiles all_ = {} languoids_by_code = self.languoids_by_code( nodes or {lang.id: lang for lang in self.languoids()}) res = collections.defaultdict(list) for bib in tqdm(bibfiles): for entry in bib.iterentries(): all_[entry.id] = entry for lang in entry.languoids(languoids_by_code)[0]: res[lang.id].append(entry) return res, all_ @functools.cached_property def hhtypes(self): # Note: The file `hhtype.ini` does not exist anymore. This is fixed in HHTypes, when # calling `config.get_ini`. Only used when compiling monster.bib. return references.HHTypes(self.references_path('hhtype.ini')) @functools.cached_property def triggers(self): res = {'inlg': [], 'lgcode': []} for lang in self.languoids(): for type_ in res: if lang.cfg.has_option('triggers', type_): label = '%s [%s]' % (lang.name, lang.hid or lang.id) res[type_].extend([util.Trigger(type_, label, text) for text in lang.cfg.getlist('triggers', type_)]) return res @functools.cached_property def macroarea_map(self): res = {} for lang in self.languoids(): ma = lang.macroareas[0].name if lang.macroareas else '' res[lang.id] = ma if lang.iso: res[lang.iso] = ma if lang.hid: res[lang.hid] = ma return res @property def current_editors(self): return sorted([e for e in self.editors.values() if e.current], key=lambda e: int(e.ord)) def write_languoids_table(self, outdir, version=None): version = version or self.describe() out = outdir / 'glottolog-languoids-{0}.csv'.format(version) md = outdir / (out.name + '-metadata.json') tg = TableGroup.fromvalue({ "@context": "http://www.w3.org/ns/csvw", "dc:version": version, "dc:bibliographicCitation": "{0}. " "{1} [Data set]. " "Zenodo. https://doi.org/{2}".format( ' & '.join([e.name for e in self.current_editors]), self.publication.zenodo.title_format.format('(Version {0})'.format(version)), self.publication.zenodo.doi, ), "tables": [load(pycldf.util.pkg_path('components', 'LanguageTable-metadata.json'))], }) tg.tables[0].url = out.name for col in [ dict(name='LL_Code'), dict(name='Classification', separator='/'), dict(name='Family_Glottocode'), dict(name='Family_Name'), dict(name='Language_Glottocode'), dict(name='Language_Name'), dict(name='Level', datatype=dict(base='string', format='family|language|dialect')), dict(name='Status'), ]: tg.tables[0].tableSchema.columns.append(Column.fromvalue(col)) langs = [] for lang in self.languoids(): lid, lname = None, None if lang.level == self.languoid_levels.language: lid, lname = lang.id, lang.name elif lang.level == self.languoid_levels.dialect: for lname, lid, level in reversed(lang.lineage): if level == self.languoid_levels.language: break else: # pragma: no cover raise ValueError langs.append(dict( ID=lang.id, Name=lang.name, Macroarea=lang.macroareas[0].name if lang.macroareas else None, Latitude=lang.latitude, Longitude=lang.longitude, Glottocode=lang.id, ISO639P3code=lang.iso, LL_Code=lang.identifier.get('multitree'), Classification=[c[1] for c in lang.lineage], Language_Glottocode=lid, Language_Name=lname, Family_Name=lang.lineage[0][0] if lang.lineage else None, Family_Glottocode=lang.lineage[0][1] if lang.lineage else None, Level=lang.level.name, Status=lang.endangerment.status.name if lang.endangerment else None, )) tg.to_file(md) tg.tables[0].write(langs, fname=out) return md, out
(repos='.', *, cache: bool = False)
22,861
pyglottolog.api
__init__
:param repos: Path to a copy of `<https://github.com/glottolog/glottolog>`_ :param cache: Indicate whether to cache `Languoid` objects or not. If `True`, the API must be used read-only.
def __init__(self, repos='.', *, cache: bool = False): """ :param repos: Path to a copy of `<https://github.com/glottolog/glottolog>`_ :param cache: Indicate whether to cache `Languoid` objects or not. If `True`, the API must \ be used read-only. """ API.__init__(self, repos=repos) #: Absolute path to the copy of the data repository: self.repos: pathlib.Path = pathlib.Path.cwd() / self.repos #: Absolute path to the `tree` directory in the repos. self.tree: pathlib.Path = self.repos / 'languoids' / 'tree' if not self.tree.exists(): raise ValueError('repos dir %s missing tree dir: %s' % (self.repos, self.tree)) if not self.repos.joinpath('references').exists(): raise ValueError('repos dir %s missing references subdir' % (self.repos,)) self.cache = Cache() if cache else None
(self, repos='.', *, cache: bool = False)
22,862
pyglottolog.api
__str__
null
def __str__(self): return '<Glottolog repos {0} at {1}>'.format(git_describe(self.repos), self.repos)
(self)
22,863
pyglottolog.api
_cfg
null
def _cfg(self, name, cls=None): return config.Config.from_ini( self.path('config', name + '.ini'), object_class=cls or config.Generic)
(self, name, cls=None)
22,864
pyglottolog.api
ascii_tree
Prints an ASCII representation of the languoid tree starting at `start` to `stdout`.
def ascii_tree(self, start: typing.Union[str, lls.Languoid], maxlevel=None): """ Prints an ASCII representation of the languoid tree starting at `start` to `stdout`. """ _ascii_node( self.languoid(start), 0, True, self.languoid_levels.get(maxlevel, maxlevel) if maxlevel else None, '', self.languoid_levels)
(self, start: Union[str, pyglottolog.languoids.languoid.Languoid], maxlevel=None)
22,865
clldutils.apilib
assert_release
null
def assert_release(self): return assert_release(self.repos)
(self)
22,866
pyglottolog.api
build_path
null
def build_path(self, *comps: str) -> pathlib.Path: build_dir = self.repos.joinpath('build') if not build_dir.exists(): build_dir.mkdir() # pragma: no cover return build_dir.joinpath(*comps)
(self, *comps: str) -> pathlib.Path
22,867
pyglottolog.api
cache_dir
null
def languoids_by_code(self, nodes=None) -> typing.Dict[str, lls.Languoid]: """ Returns a `dict` mapping the three major language code schemes (Glottocode, ISO code, and Harald's NOCODE_s) to Languoid objects. """ res = {} for lang in (self.languoids() if nodes is None else nodes.values()): res[lang.id] = lang if lang.hid: res[lang.hid] = lang if lang.iso: res[lang.iso] = lang return res
(self, name)
22,868
pyglottolog.api
describe
null
def describe(self) -> str: return git_describe(self.repos)
(self) -> str
22,869
pyglottolog.api
languoid
Retrieve a languoid specified by language code. :param id_: Glottocode or ISO code.
def languoid(self, id_: typing.Union[str, lls.Languoid]) -> lls.Languoid: """ Retrieve a languoid specified by language code. :param id_: Glottocode or ISO code. """ if isinstance(id_, lls.Languoid): return id_ if self.cache and id_ in self.cache: return self.cache[id_] if ISO_CODE_PATTERN.match(id_): for d in self._tree_dirs if self.cache else walk(self.tree, mode='dirs'): if self.cache: l_ = self.cache.add(d, self) else: l_ = lls.Languoid.from_dir(d, _api=self) if l_.iso_code == id_: return l_ else: for d in self._tree_dirs if self.cache else walk(self.tree, mode='dirs'): l_ = None if self.cache: # If we cache Languoids, we might as well instantiate the ones we traverse: l_ = self.cache.add(d, self) if d.name == id_: if self.cache: return l_ return lls.Languoid.from_dir(d, _api=self)
(self, id_: Union[str, pyglottolog.languoids.languoid.Languoid]) -> pyglottolog.languoids.languoid.Languoid
22,870
pyglottolog.api
languoids
Yields languoid objects. :param ids: `set` of Glottocodes to limit the result to. This is useful to increase performance, since INI file reading can be skipped for languoids not listed. :param maxlevel: Numeric maximal nesting depth of languoids, or Languoid.level. :param exclude_pseudo_families: Flag signaling whether to exclude pseud families, i.e. languoids from non-genealogical trees.
def languoids( self, ids: set = None, maxlevel: typing.Union[int, config.LanguoidLevel, str] = None, exclude_pseudo_families: bool = False ) -> typing.Generator[lls.Languoid, None, None]: """ Yields languoid objects. :param ids: `set` of Glottocodes to limit the result to. This is useful to increase \ performance, since INI file reading can be skipped for languoids not listed. :param maxlevel: Numeric maximal nesting depth of languoids, or Languoid.level. :param exclude_pseudo_families: Flag signaling whether to exclude pseud families, \ i.e. languoids from non-genealogical trees. """ is_max_level_int = isinstance(maxlevel, int) # Non-numeric levels are interpreted as `Languoid.level` descriptors. if not is_max_level_int: maxlevel = self.languoid_levels.get(maxlevel or 'dialect') # Since we traverse the tree topdown, we can cache a mapping of Languoid.id to triples # (name, id, level) for populating `Languoid.lineage`. nodes = {} for d in self._tree_dirs if self.cache else walk(self.tree, mode='dirs'): if ids is None or d.name in ids: if self.cache: lang = self.cache.add(d, self) else: lang = lls.Languoid.from_dir(d, nodes=nodes, _api=self) if (is_max_level_int and len(lang.lineage) <= maxlevel) \ or ((not is_max_level_int) and lang.level <= maxlevel): if (not exclude_pseudo_families) or not lang.category.startswith('Pseudo'): yield lang
(self, ids: Optional[set] = None, maxlevel: Union[int, pyglottolog.config.LanguoidLevel, str, NoneType] = None, exclude_pseudo_families: bool = False) -> Generator[pyglottolog.languoids.languoid.Languoid, NoneType, NoneType]
22,871
pyglottolog.api
languoids_by_code
Returns a `dict` mapping the three major language code schemes (Glottocode, ISO code, and Harald's NOCODE_s) to Languoid objects.
def languoids_by_code(self, nodes=None) -> typing.Dict[str, lls.Languoid]: """ Returns a `dict` mapping the three major language code schemes (Glottocode, ISO code, and Harald's NOCODE_s) to Languoid objects. """ res = {} for lang in (self.languoids() if nodes is None else nodes.values()): res[lang.id] = lang if lang.hid: res[lang.hid] = lang if lang.iso: res[lang.iso] = lang return res
(self, nodes=None) -> Dict[str, pyglottolog.languoids.languoid.Languoid]
22,872
pyglottolog.api
languoids_path
Path within the `languoids` directory of the repos.
def languoids_path(self, *comps): """ Path within the `languoids` directory of the repos. """ return self.repos.joinpath('languoids', *comps)
(self, *comps)