repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
astropy/astropy-healpix
astropy_healpix/healpy.py
ang2vec
def ang2vec(theta, phi, lonlat=False): """Drop-in replacement for healpy `~healpy.pixelfunc.ang2vec`.""" lon, lat = _healpy_to_lonlat(theta, phi, lonlat=lonlat) rep_sph = UnitSphericalRepresentation(lon, lat) rep_car = rep_sph.represent_as(CartesianRepresentation) return rep_car.xyz.value
python
def ang2vec(theta, phi, lonlat=False): """Drop-in replacement for healpy `~healpy.pixelfunc.ang2vec`.""" lon, lat = _healpy_to_lonlat(theta, phi, lonlat=lonlat) rep_sph = UnitSphericalRepresentation(lon, lat) rep_car = rep_sph.represent_as(CartesianRepresentation) return rep_car.xyz.value
Drop-in replacement for healpy `~healpy.pixelfunc.ang2vec`.
https://github.com/astropy/astropy-healpix/blob/c7fbe36305aadda9946dd37969d5dcb9ff6b1440/astropy_healpix/healpy.py#L169-L174
astropy/astropy-healpix
astropy_healpix/healpy.py
get_interp_weights
def get_interp_weights(nside, theta, phi=None, nest=False, lonlat=False): """ Drop-in replacement for healpy `~healpy.pixelfunc.get_interp_weights`. Although note that the order of the weights and pixels may differ. """ # if phi is not given, theta is interpreted as pixel number if phi is None: theta, phi = pix2ang(nside, ipix=theta, nest=nest) lon, lat = _healpy_to_lonlat(theta, phi, lonlat=lonlat) return bilinear_interpolation_weights(lon, lat, nside, order='nested' if nest else 'ring')
python
def get_interp_weights(nside, theta, phi=None, nest=False, lonlat=False): """ Drop-in replacement for healpy `~healpy.pixelfunc.get_interp_weights`. Although note that the order of the weights and pixels may differ. """ # if phi is not given, theta is interpreted as pixel number if phi is None: theta, phi = pix2ang(nside, ipix=theta, nest=nest) lon, lat = _healpy_to_lonlat(theta, phi, lonlat=lonlat) return bilinear_interpolation_weights(lon, lat, nside, order='nested' if nest else 'ring')
Drop-in replacement for healpy `~healpy.pixelfunc.get_interp_weights`. Although note that the order of the weights and pixels may differ.
https://github.com/astropy/astropy-healpix/blob/c7fbe36305aadda9946dd37969d5dcb9ff6b1440/astropy_healpix/healpy.py#L177-L188
astropy/astropy-healpix
astropy_healpix/healpy.py
get_interp_val
def get_interp_val(m, theta, phi, nest=False, lonlat=False): """ Drop-in replacement for healpy `~healpy.pixelfunc.get_interp_val`. """ lon, lat = _healpy_to_lonlat(theta, phi, lonlat=lonlat) return interpolate_bilinear_lonlat(lon, lat, m, order='nested' if nest else 'ring')
python
def get_interp_val(m, theta, phi, nest=False, lonlat=False): """ Drop-in replacement for healpy `~healpy.pixelfunc.get_interp_val`. """ lon, lat = _healpy_to_lonlat(theta, phi, lonlat=lonlat) return interpolate_bilinear_lonlat(lon, lat, m, order='nested' if nest else 'ring')
Drop-in replacement for healpy `~healpy.pixelfunc.get_interp_val`.
https://github.com/astropy/astropy-healpix/blob/c7fbe36305aadda9946dd37969d5dcb9ff6b1440/astropy_healpix/healpy.py#L191-L196
astropy/astropy-healpix
astropy_healpix/bench.py
bench_run
def bench_run(fast=False): """Run all benchmarks. Return results as a dict.""" results = [] if fast: SIZES = [10, 1e3, 1e5] else: SIZES = [10, 1e3, 1e6] for nest in [True, False]: for size in SIZES: for nside in [1, 128]: results.append(run_single('pix2ang', bench_pix2ang, fast=fast, size=int(size), nside=nside, nest=nest)) for nest in [True, False]: for size in SIZES: for nside in [1, 128]: results.append(run_single('ang2pix', bench_ang2pix, fast=fast, size=int(size), nside=nside, nest=nest)) for size in SIZES: for nside in [1, 128]: results.append(run_single('nest2ring', bench_nest2ring, fast=fast, size=int(size), nside=nside)) for size in SIZES: for nside in [1, 128]: results.append(run_single('ring2nest', bench_ring2nest, fast=fast, size=int(size), nside=nside)) for nest in [True, False]: for size in SIZES: for nside in [1, 128]: results.append(run_single('get_interp_weights', bench_get_interp_weights, fast=fast, size=int(size), nside=nside, nest=nest)) return results
python
def bench_run(fast=False): """Run all benchmarks. Return results as a dict.""" results = [] if fast: SIZES = [10, 1e3, 1e5] else: SIZES = [10, 1e3, 1e6] for nest in [True, False]: for size in SIZES: for nside in [1, 128]: results.append(run_single('pix2ang', bench_pix2ang, fast=fast, size=int(size), nside=nside, nest=nest)) for nest in [True, False]: for size in SIZES: for nside in [1, 128]: results.append(run_single('ang2pix', bench_ang2pix, fast=fast, size=int(size), nside=nside, nest=nest)) for size in SIZES: for nside in [1, 128]: results.append(run_single('nest2ring', bench_nest2ring, fast=fast, size=int(size), nside=nside)) for size in SIZES: for nside in [1, 128]: results.append(run_single('ring2nest', bench_ring2nest, fast=fast, size=int(size), nside=nside)) for nest in [True, False]: for size in SIZES: for nside in [1, 128]: results.append(run_single('get_interp_weights', bench_get_interp_weights, fast=fast, size=int(size), nside=nside, nest=nest)) return results
Run all benchmarks. Return results as a dict.
https://github.com/astropy/astropy-healpix/blob/c7fbe36305aadda9946dd37969d5dcb9ff6b1440/astropy_healpix/bench.py#L150-L188
astropy/astropy-healpix
astropy_healpix/bench.py
bench_report
def bench_report(results): """Print a report for given benchmark results to the console.""" table = Table(names=['function', 'nest', 'nside', 'size', 'time_healpy', 'time_self', 'ratio'], dtype=['S20', bool, int, int, float, float, float], masked=True) for row in results: table.add_row(row) table['time_self'].format = '10.7f' if HEALPY_INSTALLED: table['ratio'] = table['time_self'] / table['time_healpy'] table['time_healpy'].format = '10.7f' table['ratio'].format = '7.2f' table.pprint(max_lines=-1)
python
def bench_report(results): """Print a report for given benchmark results to the console.""" table = Table(names=['function', 'nest', 'nside', 'size', 'time_healpy', 'time_self', 'ratio'], dtype=['S20', bool, int, int, float, float, float], masked=True) for row in results: table.add_row(row) table['time_self'].format = '10.7f' if HEALPY_INSTALLED: table['ratio'] = table['time_self'] / table['time_healpy'] table['time_healpy'].format = '10.7f' table['ratio'].format = '7.2f' table.pprint(max_lines=-1)
Print a report for given benchmark results to the console.
https://github.com/astropy/astropy-healpix/blob/c7fbe36305aadda9946dd37969d5dcb9ff6b1440/astropy_healpix/bench.py#L191-L207
astropy/astropy-healpix
astropy_healpix/bench.py
main
def main(fast=False): """Run all benchmarks and print report to the console.""" print('Running benchmarks...\n') results = bench_run(fast=fast) bench_report(results)
python
def main(fast=False): """Run all benchmarks and print report to the console.""" print('Running benchmarks...\n') results = bench_run(fast=fast) bench_report(results)
Run all benchmarks and print report to the console.
https://github.com/astropy/astropy-healpix/blob/c7fbe36305aadda9946dd37969d5dcb9ff6b1440/astropy_healpix/bench.py#L210-L214
dtheodor/flask-sqlalchemy-session
flask_sqlalchemy_session/__init__.py
flask_scoped_session.init_app
def init_app(self, app): """Setup scoped sesssion creation and teardown for the passed ``app``. :param app: a :class:`~flask.Flask` application """ app.scoped_session = self @app.teardown_appcontext def remove_scoped_session(*args, **kwargs): # pylint: disable=missing-docstring,unused-argument,unused-variable app.scoped_session.remove()
python
def init_app(self, app): """Setup scoped sesssion creation and teardown for the passed ``app``. :param app: a :class:`~flask.Flask` application """ app.scoped_session = self @app.teardown_appcontext def remove_scoped_session(*args, **kwargs): # pylint: disable=missing-docstring,unused-argument,unused-variable app.scoped_session.remove()
Setup scoped sesssion creation and teardown for the passed ``app``. :param app: a :class:`~flask.Flask` application
https://github.com/dtheodor/flask-sqlalchemy-session/blob/c7ddb03e85cdd27fcdcc809b9e1c29d7738d8ebf/flask_sqlalchemy_session/__init__.py#L66-L76
viniciuschiele/flask-apidoc
flask_apidoc/utils.py
cached
def cached(f): """ Cache decorator for functions taking one or more arguments. :param f: The function to be cached. :return: The cached value. """ cache = f.cache = {} @functools.wraps(f) def decorator(*args, **kwargs): key = str(args) + str(kwargs) if key not in cache: cache[key] = f(*args, **kwargs) return cache[key] return decorator
python
def cached(f): """ Cache decorator for functions taking one or more arguments. :param f: The function to be cached. :return: The cached value. """ cache = f.cache = {} @functools.wraps(f) def decorator(*args, **kwargs): key = str(args) + str(kwargs) if key not in cache: cache[key] = f(*args, **kwargs) return cache[key] return decorator
Cache decorator for functions taking one or more arguments. :param f: The function to be cached. :return: The cached value.
https://github.com/viniciuschiele/flask-apidoc/blob/5c3dfd9aae7780622e843bf7e95863264df3a488/flask_apidoc/utils.py#L8-L23
rdidyk/falcon-swagger-ui
falcon_swagger_ui/resources.py
register_swaggerui_app
def register_swaggerui_app(app, swagger_uri, api_url, page_title='Swagger UI', favicon_url=None, config=None, uri_prefix=""): """:type app: falcon.API""" templates_folder = 'templates' static_folder = 'dist' default_config = { 'client_realm': 'null', 'client_id': 'null', 'client_secret': 'null', 'app_name': 'null', 'docExpansion': "none", 'jsonEditor': False, 'defaultModelRendering': 'schema', 'showRequestHeaders': False, 'supportedSubmitMethods': ['get', 'post', 'put', 'delete', 'patch'], } if config: default_config.update(config) default_context = { 'page_title': page_title, 'favicon_url': favicon_url, 'base_url': uri_prefix + swagger_uri, 'api_url': api_url, 'app_name': default_config.pop('app_name'), 'client_realm': default_config.pop('client_realm'), 'client_id': default_config.pop('client_id'), 'client_secret': default_config.pop('client_secret'), # Rest are just serialized into json string # for inclusion in the .js file 'config_json': json.dumps(default_config) } app.add_sink( StaticSinkAdapter(static_folder), r'%s/(?P<filepath>.*)\Z' % swagger_uri, ) app.add_route( swagger_uri, SwaggerUiResource(templates_folder, default_context) )
python
def register_swaggerui_app(app, swagger_uri, api_url, page_title='Swagger UI', favicon_url=None, config=None, uri_prefix=""): """:type app: falcon.API""" templates_folder = 'templates' static_folder = 'dist' default_config = { 'client_realm': 'null', 'client_id': 'null', 'client_secret': 'null', 'app_name': 'null', 'docExpansion': "none", 'jsonEditor': False, 'defaultModelRendering': 'schema', 'showRequestHeaders': False, 'supportedSubmitMethods': ['get', 'post', 'put', 'delete', 'patch'], } if config: default_config.update(config) default_context = { 'page_title': page_title, 'favicon_url': favicon_url, 'base_url': uri_prefix + swagger_uri, 'api_url': api_url, 'app_name': default_config.pop('app_name'), 'client_realm': default_config.pop('client_realm'), 'client_id': default_config.pop('client_id'), 'client_secret': default_config.pop('client_secret'), # Rest are just serialized into json string # for inclusion in the .js file 'config_json': json.dumps(default_config) } app.add_sink( StaticSinkAdapter(static_folder), r'%s/(?P<filepath>.*)\Z' % swagger_uri, ) app.add_route( swagger_uri, SwaggerUiResource(templates_folder, default_context) )
:type app: falcon.API
https://github.com/rdidyk/falcon-swagger-ui/blob/ea6909d78cd03178b1b0888452b2b4bb14e9d571/falcon_swagger_ui/resources.py#L62-L106
viniciuschiele/flask-apidoc
flask_apidoc/apidoc.py
ApiDoc.init_app
def init_app(self, app): """ Adds the flask url routes for the apidoc files. :param app: the flask application. """ self.app = app self.dynamic_url = self.app.config.get('APIDOC_DYNAMIC_URL', self.dynamic_url) self.allow_absolute_url = self.app.config.get('APIDOC_ALLOW_ABSOLUTE_URL', self.allow_absolute_url) url = self.url_path if not self.url_path.endswith('/'): url += '/' app.add_url_rule(url, 'docs', self.__send_static_file, strict_slashes=True) app.add_url_rule(url + '<path:path>', 'docs', self.__send_static_file, strict_slashes=True)
python
def init_app(self, app): """ Adds the flask url routes for the apidoc files. :param app: the flask application. """ self.app = app self.dynamic_url = self.app.config.get('APIDOC_DYNAMIC_URL', self.dynamic_url) self.allow_absolute_url = self.app.config.get('APIDOC_ALLOW_ABSOLUTE_URL', self.allow_absolute_url) url = self.url_path if not self.url_path.endswith('/'): url += '/' app.add_url_rule(url, 'docs', self.__send_static_file, strict_slashes=True) app.add_url_rule(url + '<path:path>', 'docs', self.__send_static_file, strict_slashes=True)
Adds the flask url routes for the apidoc files. :param app: the flask application.
https://github.com/viniciuschiele/flask-apidoc/blob/5c3dfd9aae7780622e843bf7e95863264df3a488/flask_apidoc/apidoc.py#L43-L60
viniciuschiele/flask-apidoc
flask_apidoc/apidoc.py
ApiDoc.__send_static_file
def __send_static_file(self, path=None): """ Send apidoc files from the apidoc folder to the browser. :param path: the apidoc file. """ if not path: path = 'index.html' file_name = join(self.folder_path, path) # the api_project.js has the absolute url # hard coded so we replace them by the current url. if self.dynamic_url and path == 'api_project.js': return self.__send_api_file(file_name) if self.allow_absolute_url and path == 'main.js': return self.__send_main_file(file_name) # Any other apidoc file is treated as a normal static file return self.app.send_static_file(file_name)
python
def __send_static_file(self, path=None): """ Send apidoc files from the apidoc folder to the browser. :param path: the apidoc file. """ if not path: path = 'index.html' file_name = join(self.folder_path, path) # the api_project.js has the absolute url # hard coded so we replace them by the current url. if self.dynamic_url and path == 'api_project.js': return self.__send_api_file(file_name) if self.allow_absolute_url and path == 'main.js': return self.__send_main_file(file_name) # Any other apidoc file is treated as a normal static file return self.app.send_static_file(file_name)
Send apidoc files from the apidoc folder to the browser. :param path: the apidoc file.
https://github.com/viniciuschiele/flask-apidoc/blob/5c3dfd9aae7780622e843bf7e95863264df3a488/flask_apidoc/apidoc.py#L62-L82
viniciuschiele/flask-apidoc
flask_apidoc/apidoc.py
ApiDoc.__send_api_file
def __send_api_file(self, file_name): """ Send apidoc files from the apidoc folder to the browser. This method replaces all absolute urls in the file by the current url. :param file_name: the apidoc file. """ file_name = join(self.app.static_folder, file_name) with codecs.open(file_name, 'r', 'utf-8') as file: data = file.read() # replaces the hard coded url by the current url. api_project = self.__read_api_project() old_url = api_project.get('url') # replaces the project's url only if it is present in the file. if old_url: new_url = request.url_root.strip('/') data = data.replace(old_url, new_url) # creates a flask response to send # the file to the browser headers = Headers() headers['Content-Length'] = getsize(file_name) response = self.app.response_class(data, mimetype=mimetypes.guess_type(file_name)[0], headers=headers, direct_passthrough=True) response.last_modified = int(getmtime(file_name)) return response
python
def __send_api_file(self, file_name): """ Send apidoc files from the apidoc folder to the browser. This method replaces all absolute urls in the file by the current url. :param file_name: the apidoc file. """ file_name = join(self.app.static_folder, file_name) with codecs.open(file_name, 'r', 'utf-8') as file: data = file.read() # replaces the hard coded url by the current url. api_project = self.__read_api_project() old_url = api_project.get('url') # replaces the project's url only if it is present in the file. if old_url: new_url = request.url_root.strip('/') data = data.replace(old_url, new_url) # creates a flask response to send # the file to the browser headers = Headers() headers['Content-Length'] = getsize(file_name) response = self.app.response_class(data, mimetype=mimetypes.guess_type(file_name)[0], headers=headers, direct_passthrough=True) response.last_modified = int(getmtime(file_name)) return response
Send apidoc files from the apidoc folder to the browser. This method replaces all absolute urls in the file by the current url. :param file_name: the apidoc file.
https://github.com/viniciuschiele/flask-apidoc/blob/5c3dfd9aae7780622e843bf7e95863264df3a488/flask_apidoc/apidoc.py#L85-L121
viniciuschiele/flask-apidoc
flask_apidoc/apidoc.py
ApiDoc.__send_main_file
def __send_main_file(self, file_name): """ Send apidoc files from the apidoc folder to the browser. This method replaces all absolute urls in the file by the current url. :param file_name: the apidoc file. """ file_name = join(self.app.static_folder, file_name) with codecs.open(file_name, 'r', 'utf-8') as file: data = file.read() data = data.replace( 'fields.article.url = apiProject.url + fields.article.url;', '''if (fields.article.url.substr(0, 4).toLowerCase() !== \'http\') fields.article.url = apiProject.url + fields.article.url;''') headers = Headers() headers['Content-Length'] = getsize(file_name) response = self.app.response_class(data, mimetype=mimetypes.guess_type(file_name)[0], headers=headers, direct_passthrough=True) response.last_modified = int(getmtime(file_name)) return response
python
def __send_main_file(self, file_name): """ Send apidoc files from the apidoc folder to the browser. This method replaces all absolute urls in the file by the current url. :param file_name: the apidoc file. """ file_name = join(self.app.static_folder, file_name) with codecs.open(file_name, 'r', 'utf-8') as file: data = file.read() data = data.replace( 'fields.article.url = apiProject.url + fields.article.url;', '''if (fields.article.url.substr(0, 4).toLowerCase() !== \'http\') fields.article.url = apiProject.url + fields.article.url;''') headers = Headers() headers['Content-Length'] = getsize(file_name) response = self.app.response_class(data, mimetype=mimetypes.guess_type(file_name)[0], headers=headers, direct_passthrough=True) response.last_modified = int(getmtime(file_name)) return response
Send apidoc files from the apidoc folder to the browser. This method replaces all absolute urls in the file by the current url. :param file_name: the apidoc file.
https://github.com/viniciuschiele/flask-apidoc/blob/5c3dfd9aae7780622e843bf7e95863264df3a488/flask_apidoc/apidoc.py#L124-L152
viniciuschiele/flask-apidoc
flask_apidoc/apidoc.py
ApiDoc.__read_api_project
def __read_api_project(self): """ Reads the api_project.json file from apidoc folder as a json string. :return: a json string """ file_name = join(self.app.static_folder, self.folder_path, 'api_project.json') with open(file_name, 'rt') as file: data = file.read() return json.loads(data)
python
def __read_api_project(self): """ Reads the api_project.json file from apidoc folder as a json string. :return: a json string """ file_name = join(self.app.static_folder, self.folder_path, 'api_project.json') with open(file_name, 'rt') as file: data = file.read() return json.loads(data)
Reads the api_project.json file from apidoc folder as a json string. :return: a json string
https://github.com/viniciuschiele/flask-apidoc/blob/5c3dfd9aae7780622e843bf7e95863264df3a488/flask_apidoc/apidoc.py#L155-L166
dchaplinsky/aiohttp_validate
aiohttp_validate/__init__.py
_raise_exception
def _raise_exception(cls, reason, data=None): """ Raise aiohttp exception and pass payload/reason into it. """ text_dict = { "error": reason } if data is not None: text_dict["errors"] = data raise cls( text=json.dumps(text_dict), content_type="application/json" )
python
def _raise_exception(cls, reason, data=None): """ Raise aiohttp exception and pass payload/reason into it. """ text_dict = { "error": reason } if data is not None: text_dict["errors"] = data raise cls( text=json.dumps(text_dict), content_type="application/json" )
Raise aiohttp exception and pass payload/reason into it.
https://github.com/dchaplinsky/aiohttp_validate/blob/e581cf51df6fcc377c7704315a487b10c3dd6000/aiohttp_validate/__init__.py#L15-L29
dchaplinsky/aiohttp_validate
aiohttp_validate/__init__.py
_validate_data
def _validate_data(data, schema, validator_cls): """ Validate the dict against given schema (using given validator class). """ validator = validator_cls(schema) _errors = defaultdict(list) for err in validator.iter_errors(data): path = err.schema_path # Code courtesy: Ruslan Karalkin # Looking in error schema path for # property that failed validation # Schema example: # { # "type": "object", # "properties": { # "foo": {"type": "number"}, # "bar": {"type": "string"} # } # "required": ["foo", "bar"] # } # # Related err.schema_path examples: # ['required'], # ['properties', 'foo', 'type'] if "properties" in path: path.remove("properties") key = path.popleft() # If validation failed by missing property, # then parse err.message to find property name # as it always first word enclosed in quotes if key == "required": key = err.message.split("'")[1] _errors[key].append(str(err)) if _errors: _raise_exception( web.HTTPBadRequest, "Request is invalid; There are validation errors.", _errors)
python
def _validate_data(data, schema, validator_cls): """ Validate the dict against given schema (using given validator class). """ validator = validator_cls(schema) _errors = defaultdict(list) for err in validator.iter_errors(data): path = err.schema_path # Code courtesy: Ruslan Karalkin # Looking in error schema path for # property that failed validation # Schema example: # { # "type": "object", # "properties": { # "foo": {"type": "number"}, # "bar": {"type": "string"} # } # "required": ["foo", "bar"] # } # # Related err.schema_path examples: # ['required'], # ['properties', 'foo', 'type'] if "properties" in path: path.remove("properties") key = path.popleft() # If validation failed by missing property, # then parse err.message to find property name # as it always first word enclosed in quotes if key == "required": key = err.message.split("'")[1] _errors[key].append(str(err)) if _errors: _raise_exception( web.HTTPBadRequest, "Request is invalid; There are validation errors.", _errors)
Validate the dict against given schema (using given validator class).
https://github.com/dchaplinsky/aiohttp_validate/blob/e581cf51df6fcc377c7704315a487b10c3dd6000/aiohttp_validate/__init__.py#L32-L74
dchaplinsky/aiohttp_validate
aiohttp_validate/__init__.py
validate
def validate(request_schema=None, response_schema=None): """ Decorate request handler to make it automagically validate it's request and response. """ def wrapper(func): # Validating the schemas itself. # Die with exception if they aren't valid if request_schema is not None: _request_schema_validator = validator_for(request_schema) _request_schema_validator.check_schema(request_schema) if response_schema is not None: _response_schema_validator = validator_for(response_schema) _response_schema_validator.check_schema(response_schema) @asyncio.coroutine @functools.wraps(func) def wrapped(*args): if asyncio.iscoroutinefunction(func): coro = func else: coro = asyncio.coroutine(func) # Supports class based views see web.View if isinstance(args[0], AbstractView): class_based = True request = args[0].request else: class_based = False request = args[-1] # Strictly expect json object here try: req_body = yield from request.json() except (json.decoder.JSONDecodeError, TypeError): _raise_exception( web.HTTPBadRequest, "Request is malformed; could not decode JSON object.") # Validate request data against request schema (if given) if request_schema is not None: _validate_data(req_body, request_schema, _request_schema_validator) coro_args = req_body, request if class_based: coro_args = (args[0], ) + coro_args context = yield from coro(*coro_args) # No validation of response for websockets stream if isinstance(context, web.StreamResponse): return context # Validate response data against response schema (if given) if response_schema is not None: _validate_data(context, response_schema, _response_schema_validator) try: return web.json_response(context) except (TypeError, ): _raise_exception( web.HTTPInternalServerError, "Response is malformed; could not encode JSON object.") # Store schemas in wrapped handlers, so it later can be reused setattr(wrapped, "_request_schema", request_schema) setattr(wrapped, "_response_schema", response_schema) return wrapped return wrapper
python
def validate(request_schema=None, response_schema=None): """ Decorate request handler to make it automagically validate it's request and response. """ def wrapper(func): # Validating the schemas itself. # Die with exception if they aren't valid if request_schema is not None: _request_schema_validator = validator_for(request_schema) _request_schema_validator.check_schema(request_schema) if response_schema is not None: _response_schema_validator = validator_for(response_schema) _response_schema_validator.check_schema(response_schema) @asyncio.coroutine @functools.wraps(func) def wrapped(*args): if asyncio.iscoroutinefunction(func): coro = func else: coro = asyncio.coroutine(func) # Supports class based views see web.View if isinstance(args[0], AbstractView): class_based = True request = args[0].request else: class_based = False request = args[-1] # Strictly expect json object here try: req_body = yield from request.json() except (json.decoder.JSONDecodeError, TypeError): _raise_exception( web.HTTPBadRequest, "Request is malformed; could not decode JSON object.") # Validate request data against request schema (if given) if request_schema is not None: _validate_data(req_body, request_schema, _request_schema_validator) coro_args = req_body, request if class_based: coro_args = (args[0], ) + coro_args context = yield from coro(*coro_args) # No validation of response for websockets stream if isinstance(context, web.StreamResponse): return context # Validate response data against response schema (if given) if response_schema is not None: _validate_data(context, response_schema, _response_schema_validator) try: return web.json_response(context) except (TypeError, ): _raise_exception( web.HTTPInternalServerError, "Response is malformed; could not encode JSON object.") # Store schemas in wrapped handlers, so it later can be reused setattr(wrapped, "_request_schema", request_schema) setattr(wrapped, "_response_schema", response_schema) return wrapped return wrapper
Decorate request handler to make it automagically validate it's request and response.
https://github.com/dchaplinsky/aiohttp_validate/blob/e581cf51df6fcc377c7704315a487b10c3dd6000/aiohttp_validate/__init__.py#L77-L148
biolink/biolink-model
metamodel/generators/jsonschemagen.py
cli
def cli(yamlfile, inline, format): """ Generate JSON Schema representation of a biolink model """ print(JsonSchemaGenerator(yamlfile, format).serialize(inline=inline))
python
def cli(yamlfile, inline, format): """ Generate JSON Schema representation of a biolink model """ print(JsonSchemaGenerator(yamlfile, format).serialize(inline=inline))
Generate JSON Schema representation of a biolink model
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/jsonschemagen.py#L90-L92
biolink/biolink-model
metamodel/utils/yamlutils.py
root_representer
def root_representer(dumper: yaml.Dumper, data: YAMLRoot): """ YAML callback -- used to filter out empty values (None, {}, [] and false) @param dumper: data dumper @param data: data to be dumped @return: """ rval = dict() for k, v in data.__dict__.items(): if not k.startswith('_') and v is not None and (not isinstance(v, (dict, list)) or v): rval[k] = v return dumper.represent_data(rval)
python
def root_representer(dumper: yaml.Dumper, data: YAMLRoot): """ YAML callback -- used to filter out empty values (None, {}, [] and false) @param dumper: data dumper @param data: data to be dumped @return: """ rval = dict() for k, v in data.__dict__.items(): if not k.startswith('_') and v is not None and (not isinstance(v, (dict, list)) or v): rval[k] = v return dumper.represent_data(rval)
YAML callback -- used to filter out empty values (None, {}, [] and false) @param dumper: data dumper @param data: data to be dumped @return:
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/utils/yamlutils.py#L47-L58
biolink/biolink-model
metamodel/generators/markdowngen.py
cli
def cli(yamlfile, format, dir, classes, img, noimages): """ Generate markdown documentation of a biolink model """ MarkdownGenerator(yamlfile, format).serialize(classes=classes, directory=dir, image_dir=img, noimages=noimages)
python
def cli(yamlfile, format, dir, classes, img, noimages): """ Generate markdown documentation of a biolink model """ MarkdownGenerator(yamlfile, format).serialize(classes=classes, directory=dir, image_dir=img, noimages=noimages)
Generate markdown documentation of a biolink model
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/markdowngen.py#L316-L318
biolink/biolink-model
metamodel/generators/markdowngen.py
MarkdownGenerator.is_secondary_ref
def is_secondary_ref(self, en: str) -> bool: """ Determine whether 'en' is the name of something in the neighborhood of the requested classes @param en: element name @return: True if 'en' is the name of a slot, class or type in the immediate neighborhood of of what we are building """ if not self.gen_classes: return True elif en in self.schema.classes: return en in self.gen_classes_neighborhood.classrefs elif en in self.schema.slots: return en in self.gen_classes_neighborhood.slotrefs elif en in self.schema.types: return en in self.gen_classes_neighborhood.typerefs else: return True
python
def is_secondary_ref(self, en: str) -> bool: """ Determine whether 'en' is the name of something in the neighborhood of the requested classes @param en: element name @return: True if 'en' is the name of a slot, class or type in the immediate neighborhood of of what we are building """ if not self.gen_classes: return True elif en in self.schema.classes: return en in self.gen_classes_neighborhood.classrefs elif en in self.schema.slots: return en in self.gen_classes_neighborhood.slotrefs elif en in self.schema.types: return en in self.gen_classes_neighborhood.typerefs else: return True
Determine whether 'en' is the name of something in the neighborhood of the requested classes @param en: element name @return: True if 'en' is the name of a slot, class or type in the immediate neighborhood of of what we are building
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/markdowngen.py#L191-L207
biolink/biolink-model
metamodel/generators/markdowngen.py
MarkdownGenerator.bbin
def bbin(obj: Union[str, Element]) -> str: """ Boldify built in types @param obj: object name or id @return: """ return obj.name if isinstance(obj, Element ) else f'**{obj}**' if obj in builtin_names else obj
python
def bbin(obj: Union[str, Element]) -> str: """ Boldify built in types @param obj: object name or id @return: """ return obj.name if isinstance(obj, Element ) else f'**{obj}**' if obj in builtin_names else obj
Boldify built in types @param obj: object name or id @return:
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/markdowngen.py#L254-L260
biolink/biolink-model
metamodel/generators/markdowngen.py
MarkdownGenerator.desc_for
def desc_for(self, obj: Element, doing_descs: bool) -> str: """ Return a description for object if it is unique (different than its parent) @param obj: object to be described @param doing_descs: If false, always return an empty string @return: text or empty string """ if obj.description and doing_descs: if isinstance(obj, SlotDefinition) and obj.is_a: parent = self.schema.slots[obj.is_a] elif isinstance(obj, ClassDefinition) and obj.is_a: parent = self.schema.classes[obj.is_a] else: parent = None return '' if parent and obj.description == parent.description else obj.description return ''
python
def desc_for(self, obj: Element, doing_descs: bool) -> str: """ Return a description for object if it is unique (different than its parent) @param obj: object to be described @param doing_descs: If false, always return an empty string @return: text or empty string """ if obj.description and doing_descs: if isinstance(obj, SlotDefinition) and obj.is_a: parent = self.schema.slots[obj.is_a] elif isinstance(obj, ClassDefinition) and obj.is_a: parent = self.schema.classes[obj.is_a] else: parent = None return '' if parent and obj.description == parent.description else obj.description return ''
Return a description for object if it is unique (different than its parent) @param obj: object to be described @param doing_descs: If false, always return an empty string @return: text or empty string
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/markdowngen.py#L262-L277
biolink/biolink-model
metamodel/generators/markdowngen.py
MarkdownGenerator.link
def link(self, ref: Optional[Union[str, Element]], *, after_link: str = None, use_desc: bool=False, add_subset: bool=True) -> str: """ Create a link to ref if appropriate. @param ref: the name or value of a class, slot, type or the name of a built in type. @param after_link: Text to put between link and description @param use_desc: True means append a description after the link if available @param add_subset: True means add any subset information that is available @return: """ obj = self.obj_for(ref) if isinstance(ref, str) else ref nl = '\n' if isinstance(obj, str) or obj is None or not self.is_secondary_ref(obj.name): return self.bbin(ref) if isinstance(obj, SlotDefinition): link_name = ((be(obj.domain) + '.') if obj.alias else '') + self.aliased_slot_name(obj) link_ref = underscore(obj.name) else: link_name = self.obj_name(obj) link_ref = link_name desc = self.desc_for(obj, use_desc) return f'[{link_name}]' \ f'({link_ref}.{self.format})' + \ (f' *subsets*: ({"| ".join(obj.in_subset)})' if add_subset and obj.in_subset else '') + \ (f' {after_link} ' if after_link else '') + (f' - {desc.split(nl)[0]}' if desc else '')
python
def link(self, ref: Optional[Union[str, Element]], *, after_link: str = None, use_desc: bool=False, add_subset: bool=True) -> str: """ Create a link to ref if appropriate. @param ref: the name or value of a class, slot, type or the name of a built in type. @param after_link: Text to put between link and description @param use_desc: True means append a description after the link if available @param add_subset: True means add any subset information that is available @return: """ obj = self.obj_for(ref) if isinstance(ref, str) else ref nl = '\n' if isinstance(obj, str) or obj is None or not self.is_secondary_ref(obj.name): return self.bbin(ref) if isinstance(obj, SlotDefinition): link_name = ((be(obj.domain) + '.') if obj.alias else '') + self.aliased_slot_name(obj) link_ref = underscore(obj.name) else: link_name = self.obj_name(obj) link_ref = link_name desc = self.desc_for(obj, use_desc) return f'[{link_name}]' \ f'({link_ref}.{self.format})' + \ (f' *subsets*: ({"| ".join(obj.in_subset)})' if add_subset and obj.in_subset else '') + \ (f' {after_link} ' if after_link else '') + (f' - {desc.split(nl)[0]}' if desc else '')
Create a link to ref if appropriate. @param ref: the name or value of a class, slot, type or the name of a built in type. @param after_link: Text to put between link and description @param use_desc: True means append a description after the link if available @param add_subset: True means add any subset information that is available @return:
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/markdowngen.py#L279-L303
biolink/biolink-model
metamodel/generators/owlgen.py
cli
def cli(yamlfile, format, output): """ Generate an OWL representation of a biolink model """ print(OwlSchemaGenerator(yamlfile, format).serialize(output=output))
python
def cli(yamlfile, format, output): """ Generate an OWL representation of a biolink model """ print(OwlSchemaGenerator(yamlfile, format).serialize(output=output))
Generate an OWL representation of a biolink model
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/owlgen.py#L197-L199
biolink/biolink-model
metamodel/generators/owlgen.py
OwlSchemaGenerator.visit_slot
def visit_slot(self, slot_name: str, slot: SlotDefinition) -> None: """ Add a slot definition per slot @param slot_name: @param slot: @return: """ # Note: We use the raw name in OWL and add a subProperty arc slot_uri = self.prop_uri(slot.name) # Parent slots if slot.is_a: self.graph.add((slot_uri, RDFS.subPropertyOf, self.prop_uri(slot.is_a))) for mixin in slot.mixins: self.graph.add((slot_uri, RDFS.subPropertyOf, self.prop_uri(mixin))) # Slot range if not slot.range or slot.range in builtin_names: self.graph.add((slot_uri, RDF.type, OWL.DatatypeProperty if slot.object_property else OWL.AnnotationProperty)) self.graph.add((slot_uri, RDFS.range, URIRef(builtin_uri(slot.range, expand=True)))) elif slot.range in self.schema.types: self.graph.add((slot_uri, RDF.type, OWL.DatatypeProperty if slot.object_property else OWL.AnnotationProperty)) self.graph.add((slot_uri, RDFS.range, self.type_uri(slot.range))) else: self.graph.add((slot_uri, RDF.type, OWL.ObjectProperty if slot.object_property else OWL.AnnotationProperty)) self.graph.add((slot_uri, RDFS.range, self.class_uri(slot.range))) # Slot domain if slot.domain: self.graph.add((slot_uri, RDFS.domain, self.class_uri(slot.domain))) # Annotations self.graph.add((slot_uri, RDFS.label, Literal(slot.name))) if slot.description: self.graph.add((slot_uri, OBO.IAO_0000115, Literal(slot.description)))
python
def visit_slot(self, slot_name: str, slot: SlotDefinition) -> None: """ Add a slot definition per slot @param slot_name: @param slot: @return: """ # Note: We use the raw name in OWL and add a subProperty arc slot_uri = self.prop_uri(slot.name) # Parent slots if slot.is_a: self.graph.add((slot_uri, RDFS.subPropertyOf, self.prop_uri(slot.is_a))) for mixin in slot.mixins: self.graph.add((slot_uri, RDFS.subPropertyOf, self.prop_uri(mixin))) # Slot range if not slot.range or slot.range in builtin_names: self.graph.add((slot_uri, RDF.type, OWL.DatatypeProperty if slot.object_property else OWL.AnnotationProperty)) self.graph.add((slot_uri, RDFS.range, URIRef(builtin_uri(slot.range, expand=True)))) elif slot.range in self.schema.types: self.graph.add((slot_uri, RDF.type, OWL.DatatypeProperty if slot.object_property else OWL.AnnotationProperty)) self.graph.add((slot_uri, RDFS.range, self.type_uri(slot.range))) else: self.graph.add((slot_uri, RDF.type, OWL.ObjectProperty if slot.object_property else OWL.AnnotationProperty)) self.graph.add((slot_uri, RDFS.range, self.class_uri(slot.range))) # Slot domain if slot.domain: self.graph.add((slot_uri, RDFS.domain, self.class_uri(slot.domain))) # Annotations self.graph.add((slot_uri, RDFS.label, Literal(slot.name))) if slot.description: self.graph.add((slot_uri, OBO.IAO_0000115, Literal(slot.description)))
Add a slot definition per slot @param slot_name: @param slot: @return:
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/owlgen.py#L153-L189
biolink/biolink-model
metamodel/utils/loadschema.py
load_raw_schema
def load_raw_schema(data: Union[str, TextIO], source_file: str=None, source_file_date: str=None, source_file_size: int=None, base_dir: Optional[str]=None) -> SchemaDefinition: """ Load and flatten SchemaDefinition from a file name, a URL or a block of text @param data: URL, file name or block of text @param source_file: Source file name for the schema @param source_file_date: timestamp of source file @param source_file_size: size of source file @param base_dir: Working directory of sources @return: Map from schema name to SchemaDefinition """ if isinstance(data, str): if '\n' in data: return load_raw_schema((cast(TextIO, StringIO(data)))) # Not sure why typing doesn't see StringIO as TextIO elif '://' in data: # TODO: complete and test URL access req = Request(data) req.add_header("Accept", "application/yaml, text/yaml;q=0.9") with urlopen(req) as response: return load_raw_schema(response) else: fname = os.path.join(base_dir if base_dir else '', data) with open(fname) as f: return load_raw_schema(f, data, time.ctime(os.path.getmtime(fname)), os.path.getsize(fname)) else: schemadefs = yaml.load(data, DupCheckYamlLoader) # Some schemas don't have an outermost identifier. Construct one if necessary if 'name' in schemadefs: schemadefs = {schemadefs.pop('name'): schemadefs} elif 'id' in schemadefs: schemadefs = {schemadefs['id']: schemadefs} elif len(schemadefs) > 1 or not isinstance(list(schemadefs.values())[0], dict): schemadefs = {'Unnamed Schema': schemadefs} schema: SchemaDefinition = None for sname, sdef in {k: SchemaDefinition(name=k, **v) for k, v in schemadefs.items()}.items(): if schema is None: schema = sdef schema.source_file = os.path.basename(source_file) if source_file else None schema.source_file_date = source_file_date schema.source_file_size = source_file_size schema.generation_date = datetime.now().strftime("%Y-%m-%d %H:%M") schema.metamodel_version = metamodel_version else: merge_schemas(schema, sdef) return schema
python
def load_raw_schema(data: Union[str, TextIO], source_file: str=None, source_file_date: str=None, source_file_size: int=None, base_dir: Optional[str]=None) -> SchemaDefinition: """ Load and flatten SchemaDefinition from a file name, a URL or a block of text @param data: URL, file name or block of text @param source_file: Source file name for the schema @param source_file_date: timestamp of source file @param source_file_size: size of source file @param base_dir: Working directory of sources @return: Map from schema name to SchemaDefinition """ if isinstance(data, str): if '\n' in data: return load_raw_schema((cast(TextIO, StringIO(data)))) # Not sure why typing doesn't see StringIO as TextIO elif '://' in data: # TODO: complete and test URL access req = Request(data) req.add_header("Accept", "application/yaml, text/yaml;q=0.9") with urlopen(req) as response: return load_raw_schema(response) else: fname = os.path.join(base_dir if base_dir else '', data) with open(fname) as f: return load_raw_schema(f, data, time.ctime(os.path.getmtime(fname)), os.path.getsize(fname)) else: schemadefs = yaml.load(data, DupCheckYamlLoader) # Some schemas don't have an outermost identifier. Construct one if necessary if 'name' in schemadefs: schemadefs = {schemadefs.pop('name'): schemadefs} elif 'id' in schemadefs: schemadefs = {schemadefs['id']: schemadefs} elif len(schemadefs) > 1 or not isinstance(list(schemadefs.values())[0], dict): schemadefs = {'Unnamed Schema': schemadefs} schema: SchemaDefinition = None for sname, sdef in {k: SchemaDefinition(name=k, **v) for k, v in schemadefs.items()}.items(): if schema is None: schema = sdef schema.source_file = os.path.basename(source_file) if source_file else None schema.source_file_date = source_file_date schema.source_file_size = source_file_size schema.generation_date = datetime.now().strftime("%Y-%m-%d %H:%M") schema.metamodel_version = metamodel_version else: merge_schemas(schema, sdef) return schema
Load and flatten SchemaDefinition from a file name, a URL or a block of text @param data: URL, file name or block of text @param source_file: Source file name for the schema @param source_file_date: timestamp of source file @param source_file_size: size of source file @param base_dir: Working directory of sources @return: Map from schema name to SchemaDefinition
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/utils/loadschema.py#L14-L61
biolink/biolink-model
metamodel/utils/loadschema.py
DupCheckYamlLoader.map_constructor
def map_constructor(self, loader, node, deep=False): """ Walk the mapping, recording any duplicate keys. """ mapping = {} for key_node, value_node in node.value: key = loader.construct_object(key_node, deep=deep) value = loader.construct_object(value_node, deep=deep) if key in mapping: raise ValueError(f"Duplicate key: \"{key}\"") mapping[key] = value return mapping
python
def map_constructor(self, loader, node, deep=False): """ Walk the mapping, recording any duplicate keys. """ mapping = {} for key_node, value_node in node.value: key = loader.construct_object(key_node, deep=deep) value = loader.construct_object(value_node, deep=deep) if key in mapping: raise ValueError(f"Duplicate key: \"{key}\"") mapping[key] = value return mapping
Walk the mapping, recording any duplicate keys.
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/utils/loadschema.py#L69-L81
biolink/biolink-model
metamodel/utils/comparefiles.py
cli
def cli(file1, file2, comments) -> int: """ Compare file1 to file2 using a filter """ sys.exit(compare_files(file1, file2, comments))
python
def cli(file1, file2, comments) -> int: """ Compare file1 to file2 using a filter """ sys.exit(compare_files(file1, file2, comments))
Compare file1 to file2 using a filter
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/utils/comparefiles.py#L26-L28
biolink/biolink-model
metamodel/generators/golrgen.py
cli
def cli(file, dir, format): """ Generate GOLR representation of a biolink model """ print(GolrSchemaGenerator(file, format).serialize(dirname=dir))
python
def cli(file, dir, format): """ Generate GOLR representation of a biolink model """ print(GolrSchemaGenerator(file, format).serialize(dirname=dir))
Generate GOLR representation of a biolink model
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/golrgen.py#L87-L89
biolink/biolink-model
metamodel/generators/dotgen.py
cli
def cli(yamlfile, directory, out, classname, format): """ Generate graphviz representations of the biolink model """ DotGenerator(yamlfile, format).serialize(classname=classname, dirname=directory, filename=out)
python
def cli(yamlfile, directory, out, classname, format): """ Generate graphviz representations of the biolink model """ DotGenerator(yamlfile, format).serialize(classname=classname, dirname=directory, filename=out)
Generate graphviz representations of the biolink model
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/dotgen.py#L101-L103
biolink/biolink-model
metamodel/generators/jsonldgen.py
cli
def cli(yamlfile, format, context): """ Generate JSONLD file from biolink schema """ print(JSONLDGenerator(yamlfile, format).serialize(context=context))
python
def cli(yamlfile, format, context): """ Generate JSONLD file from biolink schema """ print(JSONLDGenerator(yamlfile, format).serialize(context=context))
Generate JSONLD file from biolink schema
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/jsonldgen.py#L102-L104
biolink/biolink-model
metamodel/generators/rdfgen.py
cli
def cli(yamlfile, format, output, context): """ Generate an RDF representation of a biolink model """ print(RDFGenerator(yamlfile, format).serialize(output=output, context=context))
python
def cli(yamlfile, format, output, context): """ Generate an RDF representation of a biolink model """ print(RDFGenerator(yamlfile, format).serialize(output=output, context=context))
Generate an RDF representation of a biolink model
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/rdfgen.py#L48-L50
biolink/biolink-model
metamodel/utils/generator.py
Generator.cls_slots
def cls_slots(self, cls: CLASS_OR_CLASSNAME) -> List[SlotDefinition]: """ Return the list of slots directly included in the class definition. Includes slots whose domain is cls -- as declared in slot.domain or class.slots Does not include slots declared in mixins, apply_to or is_a links @param cls: class name or class definition name @return: all direct class slots """ if not isinstance(cls, ClassDefinition): cls = self.schema.classes[cls] return [self.schema.slots[s] for s in cls.slots]
python
def cls_slots(self, cls: CLASS_OR_CLASSNAME) -> List[SlotDefinition]: """ Return the list of slots directly included in the class definition. Includes slots whose domain is cls -- as declared in slot.domain or class.slots Does not include slots declared in mixins, apply_to or is_a links @param cls: class name or class definition name @return: all direct class slots """ if not isinstance(cls, ClassDefinition): cls = self.schema.classes[cls] return [self.schema.slots[s] for s in cls.slots]
Return the list of slots directly included in the class definition. Includes slots whose domain is cls -- as declared in slot.domain or class.slots Does not include slots declared in mixins, apply_to or is_a links @param cls: class name or class definition name @return: all direct class slots
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/utils/generator.py#L95-L106
biolink/biolink-model
metamodel/utils/generator.py
Generator.all_slots
def all_slots(self, cls: CLASS_OR_CLASSNAME, *, cls_slots_first: bool = False) \ -> List[SlotDefinition]: """ Return all slots that are part of the class definition. This includes all is_a, mixin and apply_to slots but does NOT include slot_usage targets. If class B has a slot_usage entry for slot "s", only the slot definition for the redefined slot will be included, not its base. Slots are added in the order they appear in classes, with recursive is_a's being added first followed by mixins and finally apply_tos @param cls: class definition or class definition name @param cls_slots_first: True means return class slots at the top of the list @return: ordered list of slots in the class with slot usages removed """ def merge_definitions(cls_name: Optional[ClassDefinitionName]) -> None: if cls_name: for slot in self.all_slots(cls_name): aliased_name = self.aliased_slot_name(slot) if aliased_name not in known_slots: known_slots.add(aliased_name) rval.append(slot) if not isinstance(cls, ClassDefinition): cls = self.schema.classes[cls] known_slots: Set[str] = self.aliased_slot_names(cls.slots) rval: List[SlotDefinition] = [] if cls_slots_first: rval += self.cls_slots(cls) for mixin in cls.mixins: merge_definitions(mixin) merge_definitions(cls.is_a) else: merge_definitions(cls.is_a) for mixin in cls.mixins: merge_definitions(mixin) rval += self.cls_slots(cls) return rval
python
def all_slots(self, cls: CLASS_OR_CLASSNAME, *, cls_slots_first: bool = False) \ -> List[SlotDefinition]: """ Return all slots that are part of the class definition. This includes all is_a, mixin and apply_to slots but does NOT include slot_usage targets. If class B has a slot_usage entry for slot "s", only the slot definition for the redefined slot will be included, not its base. Slots are added in the order they appear in classes, with recursive is_a's being added first followed by mixins and finally apply_tos @param cls: class definition or class definition name @param cls_slots_first: True means return class slots at the top of the list @return: ordered list of slots in the class with slot usages removed """ def merge_definitions(cls_name: Optional[ClassDefinitionName]) -> None: if cls_name: for slot in self.all_slots(cls_name): aliased_name = self.aliased_slot_name(slot) if aliased_name not in known_slots: known_slots.add(aliased_name) rval.append(slot) if not isinstance(cls, ClassDefinition): cls = self.schema.classes[cls] known_slots: Set[str] = self.aliased_slot_names(cls.slots) rval: List[SlotDefinition] = [] if cls_slots_first: rval += self.cls_slots(cls) for mixin in cls.mixins: merge_definitions(mixin) merge_definitions(cls.is_a) else: merge_definitions(cls.is_a) for mixin in cls.mixins: merge_definitions(mixin) rval += self.cls_slots(cls) return rval
Return all slots that are part of the class definition. This includes all is_a, mixin and apply_to slots but does NOT include slot_usage targets. If class B has a slot_usage entry for slot "s", only the slot definition for the redefined slot will be included, not its base. Slots are added in the order they appear in classes, with recursive is_a's being added first followed by mixins and finally apply_tos @param cls: class definition or class definition name @param cls_slots_first: True means return class slots at the top of the list @return: ordered list of slots in the class with slot usages removed
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/utils/generator.py#L108-L143
biolink/biolink-model
metamodel/utils/generator.py
Generator.ancestors
def ancestors(self, definition: Union[SLOT_OR_SLOTNAME, CLASS_OR_CLASSNAME]) \ -> List[Union[SlotDefinitionName, ClassDefinitionName]]: """ Return an ordered list of ancestor names for the supplied slot or class @param definition: Slot or class name or definition @return: List of ancestor names """ definition = self.obj_for(definition) if definition is not None: return [definition.name] + self.ancestors(definition.is_a) else: return []
python
def ancestors(self, definition: Union[SLOT_OR_SLOTNAME, CLASS_OR_CLASSNAME]) \ -> List[Union[SlotDefinitionName, ClassDefinitionName]]: """ Return an ordered list of ancestor names for the supplied slot or class @param definition: Slot or class name or definition @return: List of ancestor names """ definition = self.obj_for(definition) if definition is not None: return [definition.name] + self.ancestors(definition.is_a) else: return []
Return an ordered list of ancestor names for the supplied slot or class @param definition: Slot or class name or definition @return: List of ancestor names
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/utils/generator.py#L145-L157
biolink/biolink-model
metamodel/utils/generator.py
Generator.neighborhood
def neighborhood(self, elements: List[ELEMENT_NAME]) \ -> References: """ Return a list of all slots, classes and types that touch any element in elements, including the element itself @param elements: Elements to do proximity with @return: All slots and classes that touch element """ touches = References() for element in elements: if element in self.schema.classes: touches.classrefs.add(element) if None in touches.classrefs: raise ValueError("1") cls = self.schema.classes[element] if cls.is_a: touches.classrefs.add(cls.is_a) if None in touches.classrefs: raise ValueError("1") # Mixins include apply_to's touches.classrefs.update(set(cls.mixins)) for slotname in cls.slots: slot = self.schema.slots[slotname] if slot.range in self.schema.classes: touches.classrefs.add(slot.range) elif slot.range in self.schema.types: touches.typerefs.add(slot.range) if None in touches.classrefs: raise ValueError("1") if element in self.synopsis.rangerefs: for slotname in self.synopsis.rangerefs[element]: touches.slotrefs.add(slotname) if self.schema.slots[slotname].domain: touches.classrefs.add(self.schema.slots[slotname].domain) elif element in self.schema.slots: touches.slotrefs.add(element) slot = self.schema.slots[element] touches.slotrefs.update(set(slot.mixins)) if slot.is_a: touches.slotrefs.add(slot.is_a) if element in self.synopsis.inverses: touches.slotrefs.update(self.synopsis.inverses[element]) if slot.domain: touches.classrefs.add(slot.domain) if slot.range in self.schema.classes: touches.classrefs.add(slot.range) elif slot.range in self.schema.types: touches.typerefs.add(slot.range) elif element in self.schema.types: if element in self.synopsis.rangerefs: touches.slotrefs.update(self.synopsis.rangerefs[element]) return touches
python
def neighborhood(self, elements: List[ELEMENT_NAME]) \ -> References: """ Return a list of all slots, classes and types that touch any element in elements, including the element itself @param elements: Elements to do proximity with @return: All slots and classes that touch element """ touches = References() for element in elements: if element in self.schema.classes: touches.classrefs.add(element) if None in touches.classrefs: raise ValueError("1") cls = self.schema.classes[element] if cls.is_a: touches.classrefs.add(cls.is_a) if None in touches.classrefs: raise ValueError("1") # Mixins include apply_to's touches.classrefs.update(set(cls.mixins)) for slotname in cls.slots: slot = self.schema.slots[slotname] if slot.range in self.schema.classes: touches.classrefs.add(slot.range) elif slot.range in self.schema.types: touches.typerefs.add(slot.range) if None in touches.classrefs: raise ValueError("1") if element in self.synopsis.rangerefs: for slotname in self.synopsis.rangerefs[element]: touches.slotrefs.add(slotname) if self.schema.slots[slotname].domain: touches.classrefs.add(self.schema.slots[slotname].domain) elif element in self.schema.slots: touches.slotrefs.add(element) slot = self.schema.slots[element] touches.slotrefs.update(set(slot.mixins)) if slot.is_a: touches.slotrefs.add(slot.is_a) if element in self.synopsis.inverses: touches.slotrefs.update(self.synopsis.inverses[element]) if slot.domain: touches.classrefs.add(slot.domain) if slot.range in self.schema.classes: touches.classrefs.add(slot.range) elif slot.range in self.schema.types: touches.typerefs.add(slot.range) elif element in self.schema.types: if element in self.synopsis.rangerefs: touches.slotrefs.update(self.synopsis.rangerefs[element]) return touches
Return a list of all slots, classes and types that touch any element in elements, including the element itself @param elements: Elements to do proximity with @return: All slots and classes that touch element
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/utils/generator.py#L159-L211
biolink/biolink-model
metamodel/utils/generator.py
Generator.grounded_slot_range
def grounded_slot_range(self, slot: Optional[Union[SlotDefinition, Optional[str]]]) -> str: """ Chase the slot range to its final form @param slot: slot to check @return: name of resolved range """ if slot is not None and not isinstance(slot, str): slot = slot.range if slot is None: return DEFAULT_BUILTIN_TYPE_NAME # Default type name elif slot in builtin_names: return slot elif slot in self.schema.types: return self.grounded_slot_range(self.schema.types[slot].typeof) else: return slot
python
def grounded_slot_range(self, slot: Optional[Union[SlotDefinition, Optional[str]]]) -> str: """ Chase the slot range to its final form @param slot: slot to check @return: name of resolved range """ if slot is not None and not isinstance(slot, str): slot = slot.range if slot is None: return DEFAULT_BUILTIN_TYPE_NAME # Default type name elif slot in builtin_names: return slot elif slot in self.schema.types: return self.grounded_slot_range(self.schema.types[slot].typeof) else: return slot
Chase the slot range to its final form @param slot: slot to check @return: name of resolved range
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/utils/generator.py#L213-L228
biolink/biolink-model
metamodel/utils/generator.py
Generator.aliased_slot_name
def aliased_slot_name(self, slot: SLOT_OR_SLOTNAME) -> str: """ Return the overloaded slot name -- the alias if one exists otherwise the actual name @param slot: either a slot name or a definition @return: overloaded name """ if isinstance(slot, str): slot = self.schema.slots[slot] return slot.alias if slot.alias else slot.name
python
def aliased_slot_name(self, slot: SLOT_OR_SLOTNAME) -> str: """ Return the overloaded slot name -- the alias if one exists otherwise the actual name @param slot: either a slot name or a definition @return: overloaded name """ if isinstance(slot, str): slot = self.schema.slots[slot] return slot.alias if slot.alias else slot.name
Return the overloaded slot name -- the alias if one exists otherwise the actual name @param slot: either a slot name or a definition @return: overloaded name
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/utils/generator.py#L230-L238
biolink/biolink-model
metamodel/utils/generator.py
Generator.aliased_slot_names
def aliased_slot_names(self, slot_names: List[SlotDefinitionName]) -> Set[str]: """ Return the aliased slot names for all members of the list @param slot_names: actual slot names @return: aliases w/ duplicates removed """ return {self.aliased_slot_name(sn) for sn in slot_names}
python
def aliased_slot_names(self, slot_names: List[SlotDefinitionName]) -> Set[str]: """ Return the aliased slot names for all members of the list @param slot_names: actual slot names @return: aliases w/ duplicates removed """ return {self.aliased_slot_name(sn) for sn in slot_names}
Return the aliased slot names for all members of the list @param slot_names: actual slot names @return: aliases w/ duplicates removed
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/utils/generator.py#L240-L246
biolink/biolink-model
metamodel/utils/generator.py
Generator.obj_for
def obj_for(self, obj_or_name: Union[str, Element]) -> Optional[Union[str, Element]]: """ Return the class, slot or type that represents name or name itself if it is a builtin @param obj_or_name: Object or name @return: Corresponding element or None if not found (most likely cause is that it is a builtin type) """ name = obj_or_name.name if isinstance(obj_or_name, Element) else obj_or_name return self.schema.classes[name] if name in self.schema.classes \ else self.schema.slots[name] if name in self.schema.slots \ else self.schema.types[name] if name in self.schema.types else name if name in builtin_names \ else None
python
def obj_for(self, obj_or_name: Union[str, Element]) -> Optional[Union[str, Element]]: """ Return the class, slot or type that represents name or name itself if it is a builtin @param obj_or_name: Object or name @return: Corresponding element or None if not found (most likely cause is that it is a builtin type) """ name = obj_or_name.name if isinstance(obj_or_name, Element) else obj_or_name return self.schema.classes[name] if name in self.schema.classes \ else self.schema.slots[name] if name in self.schema.slots \ else self.schema.types[name] if name in self.schema.types else name if name in builtin_names \ else None
Return the class, slot or type that represents name or name itself if it is a builtin @param obj_or_name: Object or name @return: Corresponding element or None if not found (most likely cause is that it is a builtin type)
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/utils/generator.py#L248-L258
biolink/biolink-model
metamodel/utils/generator.py
Generator.obj_name
def obj_name(self, obj: Union[str, Element]) -> str: """ Return the formatted name used for the supplied definition """ if isinstance(obj, str): obj = self.obj_for(obj) if isinstance(obj, SlotDefinition): return underscore(self.aliased_slot_name(obj)) else: return camelcase(obj if isinstance(obj, str) else obj.name)
python
def obj_name(self, obj: Union[str, Element]) -> str: """ Return the formatted name used for the supplied definition """ if isinstance(obj, str): obj = self.obj_for(obj) if isinstance(obj, SlotDefinition): return underscore(self.aliased_slot_name(obj)) else: return camelcase(obj if isinstance(obj, str) else obj.name)
Return the formatted name used for the supplied definition
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/utils/generator.py#L260-L267
biolink/biolink-model
metamodel/generators/csvgen.py
cli
def cli(yamlfile, root, format): """ Generate CSV/TSV file from biolink model """ print(CsvGenerator(yamlfile, format).serialize(classes=root))
python
def cli(yamlfile, root, format): """ Generate CSV/TSV file from biolink model """ print(CsvGenerator(yamlfile, format).serialize(classes=root))
Generate CSV/TSV file from biolink model
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/csvgen.py#L52-L54
biolink/biolink-model
metamodel/generators/pythongen.py
PythonGenerator.gen_inherited
def gen_inherited(self) -> str: """ Generate the list of slot properties that are inherited across slot_usage or is_a paths """ inherited_head = 'inherited_slots: List[str] = [' inherited_slots = ', '.join([f'"{underscore(slot.name)}"' for slot in self.schema.slots.values() if slot.inherited]) is_rows = split_line(inherited_slots, 120 - len(inherited_head)) return inherited_head + ('\n' + len(inherited_head) * ' ').join([r.strip() for r in is_rows]) + ']'
python
def gen_inherited(self) -> str: """ Generate the list of slot properties that are inherited across slot_usage or is_a paths """ inherited_head = 'inherited_slots: List[str] = [' inherited_slots = ', '.join([f'"{underscore(slot.name)}"' for slot in self.schema.slots.values() if slot.inherited]) is_rows = split_line(inherited_slots, 120 - len(inherited_head)) return inherited_head + ('\n' + len(inherited_head) * ' ').join([r.strip() for r in is_rows]) + ']'
Generate the list of slot properties that are inherited across slot_usage or is_a paths
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/pythongen.py#L58-L64
biolink/biolink-model
metamodel/generators/pythongen.py
PythonGenerator.gen_references
def gen_references(self) -> str: """ Generate python type declarations for all identifiers (primary keys) """ rval = [] for cls in self.schema.classes.values(): pkeys = self.primary_keys_for(cls) for pk in pkeys: pk_slot = self.schema.slots[pk] classname = camelcase(cls.name) + camelcase(pk) if cls.is_a and getattr(self.schema.classes[cls.is_a], pk, None): parent = self.range_type_name(pk_slot, cls.is_a) else: parent = self.python_name_for(pk_slot.range) rval.append(f'class {classname}({parent}):\n\tpass') return '\n\n\n'.join(rval)
python
def gen_references(self) -> str: """ Generate python type declarations for all identifiers (primary keys) """ rval = [] for cls in self.schema.classes.values(): pkeys = self.primary_keys_for(cls) for pk in pkeys: pk_slot = self.schema.slots[pk] classname = camelcase(cls.name) + camelcase(pk) if cls.is_a and getattr(self.schema.classes[cls.is_a], pk, None): parent = self.range_type_name(pk_slot, cls.is_a) else: parent = self.python_name_for(pk_slot.range) rval.append(f'class {classname}({parent}):\n\tpass') return '\n\n\n'.join(rval)
Generate python type declarations for all identifiers (primary keys)
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/pythongen.py#L66-L81
biolink/biolink-model
metamodel/generators/pythongen.py
PythonGenerator.gen_typedefs
def gen_typedefs(self) -> str: """ Generate python type declarations for all defined types """ rval = [] for typ in self.schema.types.values(): typname = self.python_name_for(typ.name) parent = self.python_name_for(typ.typeof) rval.append(f'class {typname}({parent}):\n\tpass') return '\n\n\n'.join(rval) + ('\n' if rval else '')
python
def gen_typedefs(self) -> str: """ Generate python type declarations for all defined types """ rval = [] for typ in self.schema.types.values(): typname = self.python_name_for(typ.name) parent = self.python_name_for(typ.typeof) rval.append(f'class {typname}({parent}):\n\tpass') return '\n\n\n'.join(rval) + ('\n' if rval else '')
Generate python type declarations for all defined types
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/pythongen.py#L83-L90
biolink/biolink-model
metamodel/generators/pythongen.py
PythonGenerator.gen_classdefs
def gen_classdefs(self) -> str: """ Create class definitions for all non-mixin classes in the model Note that apply_to classes are transformed to mixins """ return '\n'.join([self.gen_classdef(k, v) for k, v in self.schema.classes.items() if not v.mixin])
python
def gen_classdefs(self) -> str: """ Create class definitions for all non-mixin classes in the model Note that apply_to classes are transformed to mixins """ return '\n'.join([self.gen_classdef(k, v) for k, v in self.schema.classes.items() if not v.mixin])
Create class definitions for all non-mixin classes in the model Note that apply_to classes are transformed to mixins
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/pythongen.py#L92-L96
biolink/biolink-model
metamodel/generators/pythongen.py
PythonGenerator.gen_classdef
def gen_classdef(self, clsname: str, cls: ClassDefinition) -> str: """ Generate python definition for class clsname """ parentref = f'({self.python_name_for(cls.is_a) if cls.is_a else "YAMLRoot"})' slotdefs = self.gen_slot_variables(cls) postinits = self.gen_postinits(cls) if not slotdefs: slotdefs = 'pass' wrapped_description = f''' """ {wrapped_annotation(be(cls.description))} """''' if be(cls.description) else '' return f''' @dataclass class {camelcase(clsname)}{parentref}:{wrapped_description} {slotdefs} {postinits}'''
python
def gen_classdef(self, clsname: str, cls: ClassDefinition) -> str: """ Generate python definition for class clsname """ parentref = f'({self.python_name_for(cls.is_a) if cls.is_a else "YAMLRoot"})' slotdefs = self.gen_slot_variables(cls) postinits = self.gen_postinits(cls) if not slotdefs: slotdefs = 'pass' wrapped_description = f''' """ {wrapped_annotation(be(cls.description))} """''' if be(cls.description) else '' return f''' @dataclass class {camelcase(clsname)}{parentref}:{wrapped_description} {slotdefs} {postinits}'''
Generate python definition for class clsname
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/pythongen.py#L98-L113
biolink/biolink-model
metamodel/generators/pythongen.py
PythonGenerator.gen_slot_variables
def gen_slot_variables(self, cls: ClassDefinition) -> str: """ Generate python definition for class cls, generating primary keys first followed by the rest of the slots """ return '\n\t'.join([self.gen_slot_variable(cls, pk) for pk in self.primary_keys_for(cls)] + [self.gen_slot_variable(cls, slot) for slot in cls.slots if not self.schema.slots[slot].primary_key and not self.schema.slots[slot].identifier])
python
def gen_slot_variables(self, cls: ClassDefinition) -> str: """ Generate python definition for class cls, generating primary keys first followed by the rest of the slots """ return '\n\t'.join([self.gen_slot_variable(cls, pk) for pk in self.primary_keys_for(cls)] + [self.gen_slot_variable(cls, slot) for slot in cls.slots if not self.schema.slots[slot].primary_key and not self.schema.slots[slot].identifier])
Generate python definition for class cls, generating primary keys first followed by the rest of the slots
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/pythongen.py#L115-L121
biolink/biolink-model
metamodel/generators/pythongen.py
PythonGenerator.gen_slot_variable
def gen_slot_variable(self, cls: ClassDefinition, slotname: str) -> str: """ Generate a slot variable for slotname as defined in class """ slot = self.schema.slots[slotname] # Alias allows re-use of slot names in different contexts if slot.alias: slotname = slot.alias range_type = self.range_type_name(slot, cls.name) # Python version < 3.7 -- forward references have to be quoted if slot.inlined and slot.range in self.schema.classes and self.forward_reference(slot.range, cls.name): range_type = f'"{range_type}"' slot_range, default_val = self.range_cardinality(range_type, slot, cls) default = f'= {default_val}' if default_val else '' return f'''{underscore(slotname)}: {slot_range} {default}'''
python
def gen_slot_variable(self, cls: ClassDefinition, slotname: str) -> str: """ Generate a slot variable for slotname as defined in class """ slot = self.schema.slots[slotname] # Alias allows re-use of slot names in different contexts if slot.alias: slotname = slot.alias range_type = self.range_type_name(slot, cls.name) # Python version < 3.7 -- forward references have to be quoted if slot.inlined and slot.range in self.schema.classes and self.forward_reference(slot.range, cls.name): range_type = f'"{range_type}"' slot_range, default_val = self.range_cardinality(range_type, slot, cls) default = f'= {default_val}' if default_val else '' return f'''{underscore(slotname)}: {slot_range} {default}'''
Generate a slot variable for slotname as defined in class
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/pythongen.py#L123-L138
biolink/biolink-model
metamodel/generators/pythongen.py
PythonGenerator.gen_postinits
def gen_postinits(self, cls: ClassDefinition) -> str: """ Generate all the typing and existence checks post initialize """ post_inits = [] if not cls.abstract: pkeys = self.primary_keys_for(cls) for pkey in pkeys: post_inits.append(self.gen_postinit(cls, pkey)) for slotname in cls.slots: slot = self.schema.slots[slotname] if not (slot.primary_key or slot.identifier): post_inits.append(self.gen_postinit(cls, slotname)) post_inits_line = '\n\t\t'.join([p for p in post_inits if p]) return (f''' def _fix_elements(self): super()._fix_elements() {post_inits_line}''' + '\n') if post_inits_line else ''
python
def gen_postinits(self, cls: ClassDefinition) -> str: """ Generate all the typing and existence checks post initialize """ post_inits = [] if not cls.abstract: pkeys = self.primary_keys_for(cls) for pkey in pkeys: post_inits.append(self.gen_postinit(cls, pkey)) for slotname in cls.slots: slot = self.schema.slots[slotname] if not (slot.primary_key or slot.identifier): post_inits.append(self.gen_postinit(cls, slotname)) post_inits_line = '\n\t\t'.join([p for p in post_inits if p]) return (f''' def _fix_elements(self): super()._fix_elements() {post_inits_line}''' + '\n') if post_inits_line else ''
Generate all the typing and existence checks post initialize
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/pythongen.py#L140-L156
biolink/biolink-model
metamodel/generators/pythongen.py
PythonGenerator.gen_postinit
def gen_postinit(self, cls: ClassDefinition, slotname: str) -> Optional[str]: """ Generate python post init rules for slot in class """ rlines: List[str] = [] slot = self.schema.slots[slotname] if slot.alias: slotname = slot.alias slotname = self.python_name_for(slotname) range_type_name = self.range_type_name(slot, cls.name) # Generate existence check for required slots. Note that inherited classes have to check post-init because # named variables can't be mixed in the class signature if slot.primary_key or slot.identifier or slot.required: if cls.is_a: rlines.append(f'if self.{slotname} is None:') rlines.append(f'\traise ValueError(f"{slotname} must be supplied")') rlines.append(f'if not isinstance(self.{slotname}, {range_type_name}):') rlines.append(f'\tself.{slotname} = {range_type_name}(self.{slotname})') elif slot.range in self.schema.classes or slot.range in self.schema.types: if not slot.multivalued: rlines.append(f'if self.{slotname} and not isinstance(self.{slotname}, {range_type_name}):') # Another really wierd case -- a class that has no properties if slot.range in self.schema.classes and not self.all_slots_for(self.schema.classes[slot.range]): rlines.append(f'\tself.{slotname} = {range_type_name}()') else: rlines.append(f'\tself.{slotname} = {range_type_name}(self.{slotname})') elif slot.inlined: slot_range_cls = self.schema.classes[slot.range] pkeys = self.primary_keys_for(slot_range_cls) if pkeys: # Special situation -- if there are only two values: primary key and value, # we load it is a list, not a dictionary if len(self.all_slots_for(slot_range_cls)) - len(pkeys) == 1: class_init = '(k, v)' else: pkey_name = self.python_name_for(pkeys[0]) class_init = f'({pkey_name}=k, **({{}} if v is None else v))' rlines.append(f'for k, v in self.{slotname}.items():') rlines.append(f'\tif not isinstance(v, {range_type_name}):') rlines.append(f'\t\tself.{slotname}[k] = {range_type_name}{class_init}') else: rlines.append(f'self.{slotname} = [v if isinstance(v, {range_type_name})') indent = len(f'self.{slotname} = [') * ' ' rlines.append(f'{indent}else {range_type_name}(v) for v in self.{slotname}]') return '\n\t\t'.join(rlines)
python
def gen_postinit(self, cls: ClassDefinition, slotname: str) -> Optional[str]: """ Generate python post init rules for slot in class """ rlines: List[str] = [] slot = self.schema.slots[slotname] if slot.alias: slotname = slot.alias slotname = self.python_name_for(slotname) range_type_name = self.range_type_name(slot, cls.name) # Generate existence check for required slots. Note that inherited classes have to check post-init because # named variables can't be mixed in the class signature if slot.primary_key or slot.identifier or slot.required: if cls.is_a: rlines.append(f'if self.{slotname} is None:') rlines.append(f'\traise ValueError(f"{slotname} must be supplied")') rlines.append(f'if not isinstance(self.{slotname}, {range_type_name}):') rlines.append(f'\tself.{slotname} = {range_type_name}(self.{slotname})') elif slot.range in self.schema.classes or slot.range in self.schema.types: if not slot.multivalued: rlines.append(f'if self.{slotname} and not isinstance(self.{slotname}, {range_type_name}):') # Another really wierd case -- a class that has no properties if slot.range in self.schema.classes and not self.all_slots_for(self.schema.classes[slot.range]): rlines.append(f'\tself.{slotname} = {range_type_name}()') else: rlines.append(f'\tself.{slotname} = {range_type_name}(self.{slotname})') elif slot.inlined: slot_range_cls = self.schema.classes[slot.range] pkeys = self.primary_keys_for(slot_range_cls) if pkeys: # Special situation -- if there are only two values: primary key and value, # we load it is a list, not a dictionary if len(self.all_slots_for(slot_range_cls)) - len(pkeys) == 1: class_init = '(k, v)' else: pkey_name = self.python_name_for(pkeys[0]) class_init = f'({pkey_name}=k, **({{}} if v is None else v))' rlines.append(f'for k, v in self.{slotname}.items():') rlines.append(f'\tif not isinstance(v, {range_type_name}):') rlines.append(f'\t\tself.{slotname}[k] = {range_type_name}{class_init}') else: rlines.append(f'self.{slotname} = [v if isinstance(v, {range_type_name})') indent = len(f'self.{slotname} = [') * ' ' rlines.append(f'{indent}else {range_type_name}(v) for v in self.{slotname}]') return '\n\t\t'.join(rlines)
Generate python post init rules for slot in class
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/pythongen.py#L158-L202
biolink/biolink-model
metamodel/generators/pythongen.py
PythonGenerator.primary_keys_for
def primary_keys_for(self, cls: ClassDefinition) -> List[SlotDefinitionName]: """ Return all primary keys / identifiers for cls @param cls: class to get keys for @return: List of primary keys """ return [slot_name for slot_name in self.all_slots_for(cls) if self.schema.slots[slot_name].primary_key or self.schema.slots[slot_name].identifier]
python
def primary_keys_for(self, cls: ClassDefinition) -> List[SlotDefinitionName]: """ Return all primary keys / identifiers for cls @param cls: class to get keys for @return: List of primary keys """ return [slot_name for slot_name in self.all_slots_for(cls) if self.schema.slots[slot_name].primary_key or self.schema.slots[slot_name].identifier]
Return all primary keys / identifiers for cls @param cls: class to get keys for @return: List of primary keys
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/pythongen.py#L233-L240
biolink/biolink-model
metamodel/generators/pythongen.py
PythonGenerator.all_slots_for
def all_slots_for(self, cls: ClassDefinition) -> List[SlotDefinitionName]: """ Return all slots for class cls """ if not cls.is_a: return cls.slots else: return [sn for sn in self.all_slots_for(self.schema.classes[cls.is_a]) if sn not in cls.slot_usage] \ + cls.slots
python
def all_slots_for(self, cls: ClassDefinition) -> List[SlotDefinitionName]: """ Return all slots for class cls """ if not cls.is_a: return cls.slots else: return [sn for sn in self.all_slots_for(self.schema.classes[cls.is_a]) if sn not in cls.slot_usage] \ + cls.slots
Return all slots for class cls
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/pythongen.py#L242-L248
biolink/biolink-model
metamodel/generators/pythongen.py
PythonGenerator.range_type_name
def range_type_name(self, slot: SlotDefinition, containing_class_name: ClassDefinitionName) -> str: """ Generate the type name for the slot """ if slot.primary_key or slot.identifier: return self.python_name_for(containing_class_name) + camelcase(slot.name) if slot.range in self.schema.classes and not slot.inlined: class_key = self.key_name_for(cast(ClassDefinitionName, slot.range)) if class_key: return class_key return self.python_name_for(slot.range)
python
def range_type_name(self, slot: SlotDefinition, containing_class_name: ClassDefinitionName) -> str: """ Generate the type name for the slot """ if slot.primary_key or slot.identifier: return self.python_name_for(containing_class_name) + camelcase(slot.name) if slot.range in self.schema.classes and not slot.inlined: class_key = self.key_name_for(cast(ClassDefinitionName, slot.range)) if class_key: return class_key return self.python_name_for(slot.range)
Generate the type name for the slot
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/pythongen.py#L255-L264
biolink/biolink-model
metamodel/generators/pythongen.py
PythonGenerator.forward_reference
def forward_reference(self, slot_range: str, owning_class: str) -> bool: """ Determine whether slot_range is a forward reference """ for cname in self.schema.classes: if cname == owning_class: return True # Occurs on or after elif cname == slot_range: return False # Occurs before return True
python
def forward_reference(self, slot_range: str, owning_class: str) -> bool: """ Determine whether slot_range is a forward reference """ for cname in self.schema.classes: if cname == owning_class: return True # Occurs on or after elif cname == slot_range: return False # Occurs before return True
Determine whether slot_range is a forward reference
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/pythongen.py#L266-L273
biolink/biolink-model
metamodel/utils/schemaloader.py
SchemaLoader.resolve
def resolve(self) -> SchemaDefinition: """ Return a fully resolved schema """ if not isinstance(self.schema.slots, dict): raise ValueError(f"File: {self.schema.source_file} Slots are not a dictionary") if not isinstance(self.schema.classes, dict): raise ValueError(f"File: {self.schema.source_file} Classes are not a dictionary") # Process imports for sname in self.schema.imports: if sname not in self.loaded: self.loaded.add(sname) merge_schemas(self.schema, load_raw_schema(sname + '.yaml', base_dir=self.base_dir)) # slot.domain --> class.slots for slot in self.schema.slots.values(): if slot.domain in self.schema.classes and slot.name not in self.schema.classes[slot.domain].slots: self.schema.classes[slot.domain].slots.append(slot.name) # class.slots --> slot.domain for cls in self.schema.classes.values(): if not isinstance(cls, ClassDefinition): raise ValueError( f'File: {self.schema.source_file} Class "{cls} (type: {type(cls)})" definition is peculiar') if isinstance(cls.slots, str): print(f"File: {self.schema.source_file} Class: {cls.name} Slots are not an array", file=sys.stderr) cls.slots = [cls.slots] for slotname in cls.slots: if slotname in self.schema.slots: if self.schema.slots[slotname].domain is None: self.schema.slots[slotname].domain = cls.name # apply to --> mixins for cls in self.schema.classes.values(): if cls.apply_to in self.schema.classes: self.schema.classes[cls.apply_to].mixins.append(cls.name) # Override class slots with slot usage definitions for cls in self.schema.classes.values(): for slot_name, slot_usage in cls.slot_usage.items(): # Construct a new slot # Follow the ancestry of the class to get the most proximal parent parent_slot = self.slot_definition_for(slot_name, cls) if not parent_slot and slot_name in self.schema.slots: parent_slot = self.schema.slots[slot_name] # If parent slot is still not defined, it means that we introduced a NEW slot in the slot usages child_name = SlotDefinitionName(cls.name + ' ' + slot_name) if parent_slot else slot_name new_slot = SlotDefinition(name=child_name, alias=slot_name, domain=cls.name) merge_slots(new_slot, slot_usage) # Copy the parent definition. If there is no parent definition, the slot is being defined # locally as a slot_usage if parent_slot is not None: new_slot.is_a = parent_slot.name merge_slots(new_slot, parent_slot) # Add the slot usage overrides merge_slots(new_slot, slot_usage) self.schema.slots[child_name] = new_slot # Add or replace the slot in the class definition append = True for i, s in enumerate(cls.slots): if s == slot_name: cls.slots[i] = SlotDefinitionName(child_name) append = False break if append: cls.slots.append(SlotDefinitionName(child_name)) # Update slots with parental information merged_slots: List[SlotDefinition] = [] for slot in self.schema.slots.values(): self.merge_slot(slot, merged_slots) # Clean up the slot range defaults for slot in self.schema.slots.values(): if not slot.range: slot.range = 'string' return self.schema
python
def resolve(self) -> SchemaDefinition: """ Return a fully resolved schema """ if not isinstance(self.schema.slots, dict): raise ValueError(f"File: {self.schema.source_file} Slots are not a dictionary") if not isinstance(self.schema.classes, dict): raise ValueError(f"File: {self.schema.source_file} Classes are not a dictionary") # Process imports for sname in self.schema.imports: if sname not in self.loaded: self.loaded.add(sname) merge_schemas(self.schema, load_raw_schema(sname + '.yaml', base_dir=self.base_dir)) # slot.domain --> class.slots for slot in self.schema.slots.values(): if slot.domain in self.schema.classes and slot.name not in self.schema.classes[slot.domain].slots: self.schema.classes[slot.domain].slots.append(slot.name) # class.slots --> slot.domain for cls in self.schema.classes.values(): if not isinstance(cls, ClassDefinition): raise ValueError( f'File: {self.schema.source_file} Class "{cls} (type: {type(cls)})" definition is peculiar') if isinstance(cls.slots, str): print(f"File: {self.schema.source_file} Class: {cls.name} Slots are not an array", file=sys.stderr) cls.slots = [cls.slots] for slotname in cls.slots: if slotname in self.schema.slots: if self.schema.slots[slotname].domain is None: self.schema.slots[slotname].domain = cls.name # apply to --> mixins for cls in self.schema.classes.values(): if cls.apply_to in self.schema.classes: self.schema.classes[cls.apply_to].mixins.append(cls.name) # Override class slots with slot usage definitions for cls in self.schema.classes.values(): for slot_name, slot_usage in cls.slot_usage.items(): # Construct a new slot # Follow the ancestry of the class to get the most proximal parent parent_slot = self.slot_definition_for(slot_name, cls) if not parent_slot and slot_name in self.schema.slots: parent_slot = self.schema.slots[slot_name] # If parent slot is still not defined, it means that we introduced a NEW slot in the slot usages child_name = SlotDefinitionName(cls.name + ' ' + slot_name) if parent_slot else slot_name new_slot = SlotDefinition(name=child_name, alias=slot_name, domain=cls.name) merge_slots(new_slot, slot_usage) # Copy the parent definition. If there is no parent definition, the slot is being defined # locally as a slot_usage if parent_slot is not None: new_slot.is_a = parent_slot.name merge_slots(new_slot, parent_slot) # Add the slot usage overrides merge_slots(new_slot, slot_usage) self.schema.slots[child_name] = new_slot # Add or replace the slot in the class definition append = True for i, s in enumerate(cls.slots): if s == slot_name: cls.slots[i] = SlotDefinitionName(child_name) append = False break if append: cls.slots.append(SlotDefinitionName(child_name)) # Update slots with parental information merged_slots: List[SlotDefinition] = [] for slot in self.schema.slots.values(): self.merge_slot(slot, merged_slots) # Clean up the slot range defaults for slot in self.schema.slots.values(): if not slot.range: slot.range = 'string' return self.schema
Return a fully resolved schema
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/utils/schemaloader.py#L17-L99
biolink/biolink-model
metamodel/utils/schemaloader.py
SchemaLoader.slot_definition_for
def slot_definition_for(self, slotname: SlotDefinitionName, cls: ClassDefinition) -> Optional[SlotDefinition]: """ Find the most proximal definition for slotname in the context of cls""" if cls.is_a: for sn in self.schema.classes[cls.is_a].slots: slot = self.schema.slots[sn] if slot.alias and slotname == slot.alias or slotname == slot.name: return slot for mixin in cls.mixins: for sn in self.schema.classes[mixin].slots: slot = self.schema.slots[sn] if slot.alias and slotname == slot.alias or slotname == slot.name: return slot if cls.is_a: defn = self.slot_definition_for(slotname, self.schema.classes[cls.is_a]) if defn: return defn for mixin in cls.mixins: defn = self.slot_definition_for(slotname, self.schema.classes[mixin]) if defn: return defn return None
python
def slot_definition_for(self, slotname: SlotDefinitionName, cls: ClassDefinition) -> Optional[SlotDefinition]: """ Find the most proximal definition for slotname in the context of cls""" if cls.is_a: for sn in self.schema.classes[cls.is_a].slots: slot = self.schema.slots[sn] if slot.alias and slotname == slot.alias or slotname == slot.name: return slot for mixin in cls.mixins: for sn in self.schema.classes[mixin].slots: slot = self.schema.slots[sn] if slot.alias and slotname == slot.alias or slotname == slot.name: return slot if cls.is_a: defn = self.slot_definition_for(slotname, self.schema.classes[cls.is_a]) if defn: return defn for mixin in cls.mixins: defn = self.slot_definition_for(slotname, self.schema.classes[mixin]) if defn: return defn return None
Find the most proximal definition for slotname in the context of cls
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/utils/schemaloader.py#L111-L131
biolink/biolink-model
metamodel/generators/yumlgen.py
cli
def cli(yamlfile, format, classes, directory): """ Generate a UML representation of a biolink model """ print(YumlGenerator(yamlfile, format).serialize(classes=classes, directory=directory), end="")
python
def cli(yamlfile, format, classes, directory): """ Generate a UML representation of a biolink model """ print(YumlGenerator(yamlfile, format).serialize(classes=classes, directory=directory), end="")
Generate a UML representation of a biolink model
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/yumlgen.py#L219-L221
biolink/biolink-model
metamodel/generators/yumlgen.py
YumlGenerator.class_box
def class_box(self, cn: ClassDefinitionName) -> str: """ Generate a box for the class. Populate its interior only if (a) it hasn't previously been generated and (b) it appears in the gen_classes list @param cn: @param inherited: @return: """ slot_defs: List[str] = [] if cn not in self.box_generated and (not self.focus_classes or cn in self.focus_classes): cls = self.schema.classes[cn] for slotname in self.filtered_cls_slots(cn, all_slots=True): slot = self.schema.slots[slotname] if not slot.range or slot.range in builtin_names or slot.range in self.schema.types: mod = self.prop_modifier(cls, slot) slot_defs.append(underscore(self.aliased_slot_name(slot)) + mod + ':' + underscore(slot.range) + self.cardinality(slot)) self.box_generated.add(cn) self.referenced.add(cn) return '[' + camelcase(cn) + ('|' + ';'.join(slot_defs) if slot_defs else '') + ']'
python
def class_box(self, cn: ClassDefinitionName) -> str: """ Generate a box for the class. Populate its interior only if (a) it hasn't previously been generated and (b) it appears in the gen_classes list @param cn: @param inherited: @return: """ slot_defs: List[str] = [] if cn not in self.box_generated and (not self.focus_classes or cn in self.focus_classes): cls = self.schema.classes[cn] for slotname in self.filtered_cls_slots(cn, all_slots=True): slot = self.schema.slots[slotname] if not slot.range or slot.range in builtin_names or slot.range in self.schema.types: mod = self.prop_modifier(cls, slot) slot_defs.append(underscore(self.aliased_slot_name(slot)) + mod + ':' + underscore(slot.range) + self.cardinality(slot)) self.box_generated.add(cn) self.referenced.add(cn) return '[' + camelcase(cn) + ('|' + ';'.join(slot_defs) if slot_defs else '') + ']'
Generate a box for the class. Populate its interior only if (a) it hasn't previously been generated and (b) it appears in the gen_classes list @param cn: @param inherited: @return:
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/yumlgen.py#L89-L109
biolink/biolink-model
metamodel/generators/yumlgen.py
YumlGenerator.class_associations
def class_associations(self, cn: ClassDefinitionName, must_render: bool=False) -> str: """ Emit all associations for a focus class. If none are specified, all classes are generated @param cn: Name of class to be emitted @param must_render: True means render even if this is a target (class is specifically requested) @return: YUML representation of the association """ # NOTE: YUML diagrams draw in the opposite order in which they are created, so we work from bottom to top and # from right to left assocs: List[str] = [] if cn not in self.associations_generated and (not self.focus_classes or cn in self.focus_classes): cls = self.schema.classes[cn] # Slots for slotname in self.filtered_cls_slots(cn, False)[::-1]: slot = self.schema.slots[slotname] if slot.range in self.schema.classes: assocs.append(self.class_box(cn) + (yuml_inline if slot.inlined else yuml_ref) + self.aliased_slot_name(slot) + self.prop_modifier(cls, slot) + self.cardinality(slot) + '>' + self.class_box(slot.range)) # Referencing slots if cn in self.synopsis.rangerefs: for slotname in sorted(self.synopsis.rangerefs[cn]): slot = self.schema.slots[slotname] if slot.domain in self.schema.classes and (slot.range != cls.name or must_render): assocs.append(self.class_box(slot.domain) + (yuml_inline if slot.inlined else yuml_ref) + self.aliased_slot_name(slot) + self.prop_modifier(cls, slot) + self.cardinality(slot) + '>' + self.class_box(cn)) # Mixins used in the class for mixin in cls.mixins: assocs.append(self.class_box(cn) + yuml_uses + self.class_box(mixin)) # Classes that use the class as a mixin if cls.name in self.synopsis.mixinrefs: for mixin in sorted(self.synopsis.mixinrefs[cls.name].classrefs, reverse=True): assocs.append(self.class_box(ClassDefinitionName(mixin)) + yuml_uses + self.class_box(cn)) # Classes that inject information if cn in self.synopsis.applytos: for injector in sorted(self.synopsis.applytos[cn].classrefs, reverse=True): assocs.append(self.class_box(cn) + yuml_injected + self.class_box(ClassDefinitionName(injector))) self.associations_generated.add(cn) # Children if cn in self.synopsis.isarefs: for is_a_cls in sorted(self.synopsis.isarefs[cn].classrefs, reverse=True): assocs.append(self.class_box(cn) + yuml_is_a + self.class_box(ClassDefinitionName(is_a_cls))) # Parent if cls.is_a: assocs.append(self.class_box(cls.is_a) + yuml_is_a + self.class_box(cn)) return ', '.join(assocs)
python
def class_associations(self, cn: ClassDefinitionName, must_render: bool=False) -> str: """ Emit all associations for a focus class. If none are specified, all classes are generated @param cn: Name of class to be emitted @param must_render: True means render even if this is a target (class is specifically requested) @return: YUML representation of the association """ # NOTE: YUML diagrams draw in the opposite order in which they are created, so we work from bottom to top and # from right to left assocs: List[str] = [] if cn not in self.associations_generated and (not self.focus_classes or cn in self.focus_classes): cls = self.schema.classes[cn] # Slots for slotname in self.filtered_cls_slots(cn, False)[::-1]: slot = self.schema.slots[slotname] if slot.range in self.schema.classes: assocs.append(self.class_box(cn) + (yuml_inline if slot.inlined else yuml_ref) + self.aliased_slot_name(slot) + self.prop_modifier(cls, slot) + self.cardinality(slot) + '>' + self.class_box(slot.range)) # Referencing slots if cn in self.synopsis.rangerefs: for slotname in sorted(self.synopsis.rangerefs[cn]): slot = self.schema.slots[slotname] if slot.domain in self.schema.classes and (slot.range != cls.name or must_render): assocs.append(self.class_box(slot.domain) + (yuml_inline if slot.inlined else yuml_ref) + self.aliased_slot_name(slot) + self.prop_modifier(cls, slot) + self.cardinality(slot) + '>' + self.class_box(cn)) # Mixins used in the class for mixin in cls.mixins: assocs.append(self.class_box(cn) + yuml_uses + self.class_box(mixin)) # Classes that use the class as a mixin if cls.name in self.synopsis.mixinrefs: for mixin in sorted(self.synopsis.mixinrefs[cls.name].classrefs, reverse=True): assocs.append(self.class_box(ClassDefinitionName(mixin)) + yuml_uses + self.class_box(cn)) # Classes that inject information if cn in self.synopsis.applytos: for injector in sorted(self.synopsis.applytos[cn].classrefs, reverse=True): assocs.append(self.class_box(cn) + yuml_injected + self.class_box(ClassDefinitionName(injector))) self.associations_generated.add(cn) # Children if cn in self.synopsis.isarefs: for is_a_cls in sorted(self.synopsis.isarefs[cn].classrefs, reverse=True): assocs.append(self.class_box(cn) + yuml_is_a + self.class_box(ClassDefinitionName(is_a_cls))) # Parent if cls.is_a: assocs.append(self.class_box(cls.is_a) + yuml_is_a + self.class_box(cn)) return ', '.join(assocs)
Emit all associations for a focus class. If none are specified, all classes are generated @param cn: Name of class to be emitted @param must_render: True means render even if this is a target (class is specifically requested) @return: YUML representation of the association
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/yumlgen.py#L111-L165
biolink/biolink-model
metamodel/generators/yumlgen.py
YumlGenerator.filtered_cls_slots
def filtered_cls_slots(self, cn: ClassDefinitionName, all_slots: bool=True) \ -> List[SlotDefinitionName]: """ Return the set of slots associated with the class that meet the filter criteria. Slots will be returned in defining order, with class slots returned last @param cn: name of class to filter @param all_slots: True means include attributes @return: List of slot definitions """ rval = [] cls = self.schema.classes[cn] cls_slots = self.all_slots(cls, cls_slots_first=True) for slot in cls_slots: if all_slots or slot.range in self.schema.classes: rval.append(slot.name) return rval
python
def filtered_cls_slots(self, cn: ClassDefinitionName, all_slots: bool=True) \ -> List[SlotDefinitionName]: """ Return the set of slots associated with the class that meet the filter criteria. Slots will be returned in defining order, with class slots returned last @param cn: name of class to filter @param all_slots: True means include attributes @return: List of slot definitions """ rval = [] cls = self.schema.classes[cn] cls_slots = self.all_slots(cls, cls_slots_first=True) for slot in cls_slots: if all_slots or slot.range in self.schema.classes: rval.append(slot.name) return rval
Return the set of slots associated with the class that meet the filter criteria. Slots will be returned in defining order, with class slots returned last @param cn: name of class to filter @param all_slots: True means include attributes @return: List of slot definitions
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/yumlgen.py#L174-L190
biolink/biolink-model
metamodel/generators/yumlgen.py
YumlGenerator.prop_modifier
def prop_modifier(self, cls: ClassDefinition, slot: SlotDefinition) -> str: """ Return the modifiers for the slot: (i) - inherited (m) - inherited through mixin (a) - injected (pk) - primary ckey @param cls: @param slot: @return: """ pk = '(pk)' if slot.primary_key else '' inherited = slot.name not in cls.slots mixin = inherited and slot.name in \ [mslot.name for mslot in [self.schema.classes[m] for m in cls.mixins]] injected = cls.name in self.synopsis.applytos and \ slot.name in [aslot.name for aslot in [self.schema.classes[a] for a in sorted(self.synopsis.applytos[cls.name].classrefs)]] return pk + '(a)' if injected else '(m)' if mixin else '(i)' if inherited else ''
python
def prop_modifier(self, cls: ClassDefinition, slot: SlotDefinition) -> str: """ Return the modifiers for the slot: (i) - inherited (m) - inherited through mixin (a) - injected (pk) - primary ckey @param cls: @param slot: @return: """ pk = '(pk)' if slot.primary_key else '' inherited = slot.name not in cls.slots mixin = inherited and slot.name in \ [mslot.name for mslot in [self.schema.classes[m] for m in cls.mixins]] injected = cls.name in self.synopsis.applytos and \ slot.name in [aslot.name for aslot in [self.schema.classes[a] for a in sorted(self.synopsis.applytos[cls.name].classrefs)]] return pk + '(a)' if injected else '(m)' if mixin else '(i)' if inherited else ''
Return the modifiers for the slot: (i) - inherited (m) - inherited through mixin (a) - injected (pk) - primary ckey @param cls: @param slot: @return:
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/yumlgen.py#L192-L210
biolink/biolink-model
metamodel/generators/shexgen.py
cli
def cli(yamlfile, format, output, collections): """ Generate a ShEx Schema for a biolink model """ print(ShExGenerator(yamlfile, format).serialize(output=output, collections=collections))
python
def cli(yamlfile, format, output, collections): """ Generate a ShEx Schema for a biolink model """ print(ShExGenerator(yamlfile, format).serialize(output=output, collections=collections))
Generate a ShEx Schema for a biolink model
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/shexgen.py#L174-L176
biolink/biolink-model
metamodel/generators/shexgen.py
ShExGenerator.gen_multivalued_slot
def gen_multivalued_slot(self, target_name_base: str, target_type: IRIREF) -> IRIREF: """ Generate a shape that represents an RDF list of target_type @param target_name_base: @param target_type: @return: """ list_shape_id = IRIREF(target_name_base + "__List") if list_shape_id not in self.list_shapes: list_shape = Shape(id=list_shape_id, closed=True) list_shape.expression = EachOf() expressions = [TripleConstraint(predicate=RDF.first, valueExpr=target_type, min=0, max=1)] targets = ShapeOr() targets.shapeExprs = [(NodeConstraint(values=[RDF.nil])), list_shape_id] expressions.append(TripleConstraint(predicate=RDF.rest, valueExpr=targets)) list_shape.expression.expressions = expressions self.shapes.append(list_shape) self.list_shapes.append(list_shape_id) return list_shape_id
python
def gen_multivalued_slot(self, target_name_base: str, target_type: IRIREF) -> IRIREF: """ Generate a shape that represents an RDF list of target_type @param target_name_base: @param target_type: @return: """ list_shape_id = IRIREF(target_name_base + "__List") if list_shape_id not in self.list_shapes: list_shape = Shape(id=list_shape_id, closed=True) list_shape.expression = EachOf() expressions = [TripleConstraint(predicate=RDF.first, valueExpr=target_type, min=0, max=1)] targets = ShapeOr() targets.shapeExprs = [(NodeConstraint(values=[RDF.nil])), list_shape_id] expressions.append(TripleConstraint(predicate=RDF.rest, valueExpr=targets)) list_shape.expression.expressions = expressions self.shapes.append(list_shape) self.list_shapes.append(list_shape_id) return list_shape_id
Generate a shape that represents an RDF list of target_type @param target_name_base: @param target_type: @return:
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/shexgen.py#L119-L137
biolink/biolink-model
metamodel/generators/contextgen.py
ContextGenerator.add_prefix
def add_prefix(self, ncname: str) -> None: """ Look up ncname and add it to the prefix map if necessary @param ncname: name to add """ if ncname not in self.prefixmap: uri = cu.expand_uri(ncname + ':', self.curi_maps) if uri and '://' in uri: self.prefixmap[ncname] = uri else: print(f"Unrecognized prefix: {ncname}", file=sys.stderr) self.prefixmap[ncname] = f"http://example.org/unknown/{ncname}/"
python
def add_prefix(self, ncname: str) -> None: """ Look up ncname and add it to the prefix map if necessary @param ncname: name to add """ if ncname not in self.prefixmap: uri = cu.expand_uri(ncname + ':', self.curi_maps) if uri and '://' in uri: self.prefixmap[ncname] = uri else: print(f"Unrecognized prefix: {ncname}", file=sys.stderr) self.prefixmap[ncname] = f"http://example.org/unknown/{ncname}/"
Look up ncname and add it to the prefix map if necessary @param ncname: name to add
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/contextgen.py#L92-L103
biolink/biolink-model
metamodel/generators/contextgen.py
ContextGenerator.get_uri
def get_uri(self, ncname: str) -> Optional[str]: """ Get the URI associated with ncname @param ncname: """ uri = cu.expand_uri(ncname + ':', self.curi_maps) return uri if uri and uri.startswith('http') else None
python
def get_uri(self, ncname: str) -> Optional[str]: """ Get the URI associated with ncname @param ncname: """ uri = cu.expand_uri(ncname + ':', self.curi_maps) return uri if uri and uri.startswith('http') else None
Get the URI associated with ncname @param ncname:
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/contextgen.py#L105-L111
biolink/biolink-model
metamodel/generators/contextgen.py
ContextGenerator.add_mappings
def add_mappings(self, defn: Definition, target: Dict) -> None: """ Process any mappings in defn, adding all of the mappings prefixes to the namespace map and add a link to the first mapping to the target @param defn: Class or Slot definition @param target: context target """ self.add_id_prefixes(defn) for mapping in defn.mappings: if '://' in mapping: target['@id'] = mapping else: if ':' not in mapping or len(mapping.split(':')) != 2: raise ValueError(f"Definition {defn.name} = unrecognized mapping: {mapping}") ns = mapping.split(':')[0] self.add_prefix(ns) target['@id'] = defn.mappings[0]
python
def add_mappings(self, defn: Definition, target: Dict) -> None: """ Process any mappings in defn, adding all of the mappings prefixes to the namespace map and add a link to the first mapping to the target @param defn: Class or Slot definition @param target: context target """ self.add_id_prefixes(defn) for mapping in defn.mappings: if '://' in mapping: target['@id'] = mapping else: if ':' not in mapping or len(mapping.split(':')) != 2: raise ValueError(f"Definition {defn.name} = unrecognized mapping: {mapping}") ns = mapping.split(':')[0] self.add_prefix(ns) target['@id'] = defn.mappings[0]
Process any mappings in defn, adding all of the mappings prefixes to the namespace map and add a link to the first mapping to the target @param defn: Class or Slot definition @param target: context target
https://github.com/biolink/biolink-model/blob/f379e28d5d4085e1115798c6cb28e5acc4dba8b4/metamodel/generators/contextgen.py#L117-L133
Dfenestrator/GooPyCharts
gpcharts.py
figure.plot
def plot(self,xdata,ydata=[],logScale=False,disp=True,**kwargs): '''Graphs a line plot. xdata: list of independent variable data. Can optionally include a header, see testGraph.py in https://github.com/Dfenestrator/GooPyCharts for an example. ydata: list of dependent variable data. Can be multidimensional. If xdata includes a header, include a header list on ydata as well. logScale: set to True to set the y axis to log scale. disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot. **kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code. ''' #combine data into proper format #check if only 1 vector was sent, then plot against a count if ydata: data = combineData(xdata,ydata,self.xlabel) else: data = combineData(range(len(xdata)),xdata,self.xlabel) #determine log scale parameter if logScale: logScaleStr = 'true' else: logScaleStr = 'false' #Include other options, supplied by **kwargs other = '' for option in kwargs: other += option + ': ' + kwargs[option] + ',\n' #input argument format to template is in dictionary format (see template for where variables are inserted) argDict = { 'data': str(data), 'title':self.title, 'functionName':slugify(self.title), 'height': self.height, 'width': self.width, 'logScaleFlag': logScaleStr, 'ylabel': self.ylabel, 'plotType': 'LineChart', 'numFig': self.numFig, 'other': other} self.javascript = templateType(xdata) % argDict if disp: self.dispFile()
python
def plot(self,xdata,ydata=[],logScale=False,disp=True,**kwargs): '''Graphs a line plot. xdata: list of independent variable data. Can optionally include a header, see testGraph.py in https://github.com/Dfenestrator/GooPyCharts for an example. ydata: list of dependent variable data. Can be multidimensional. If xdata includes a header, include a header list on ydata as well. logScale: set to True to set the y axis to log scale. disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot. **kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code. ''' #combine data into proper format #check if only 1 vector was sent, then plot against a count if ydata: data = combineData(xdata,ydata,self.xlabel) else: data = combineData(range(len(xdata)),xdata,self.xlabel) #determine log scale parameter if logScale: logScaleStr = 'true' else: logScaleStr = 'false' #Include other options, supplied by **kwargs other = '' for option in kwargs: other += option + ': ' + kwargs[option] + ',\n' #input argument format to template is in dictionary format (see template for where variables are inserted) argDict = { 'data': str(data), 'title':self.title, 'functionName':slugify(self.title), 'height': self.height, 'width': self.width, 'logScaleFlag': logScaleStr, 'ylabel': self.ylabel, 'plotType': 'LineChart', 'numFig': self.numFig, 'other': other} self.javascript = templateType(xdata) % argDict if disp: self.dispFile()
Graphs a line plot. xdata: list of independent variable data. Can optionally include a header, see testGraph.py in https://github.com/Dfenestrator/GooPyCharts for an example. ydata: list of dependent variable data. Can be multidimensional. If xdata includes a header, include a header list on ydata as well. logScale: set to True to set the y axis to log scale. disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot. **kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code.
https://github.com/Dfenestrator/GooPyCharts/blob/57117f213111dfe0401b1dc9720cdba8a23c3028/gpcharts.py#L358-L402
Dfenestrator/GooPyCharts
gpcharts.py
figure.bar
def bar(self,xdata,ydata,disp=True,**kwargs): '''Displays a bar graph. xdata: list of bar graph categories/bins. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example. ydata: list of values associated with categories in xdata. If xdata includes a header, include a header list on ydata as well. disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot. **kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code. ''' #combine data into proper format data = combineData(xdata,ydata,self.xlabel) #Include other options, supplied by **kwargs other = '' for option in kwargs: other += option + ': ' + kwargs[option] + ',\n' #input argument format to template is in dictionary format (see template for where variables are inserted) argDict = { 'data':str(data), 'title':self.title, 'functionName':slugify(self.title), 'height':self.height, 'width':self.width, 'logScaleFlag':'false', 'ylabel':self.ylabel, 'plotType':'BarChart', 'numFig':self.numFig, 'other':other} self.javascript = templateType(xdata) % argDict if disp: self.dispFile()
python
def bar(self,xdata,ydata,disp=True,**kwargs): '''Displays a bar graph. xdata: list of bar graph categories/bins. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example. ydata: list of values associated with categories in xdata. If xdata includes a header, include a header list on ydata as well. disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot. **kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code. ''' #combine data into proper format data = combineData(xdata,ydata,self.xlabel) #Include other options, supplied by **kwargs other = '' for option in kwargs: other += option + ': ' + kwargs[option] + ',\n' #input argument format to template is in dictionary format (see template for where variables are inserted) argDict = { 'data':str(data), 'title':self.title, 'functionName':slugify(self.title), 'height':self.height, 'width':self.width, 'logScaleFlag':'false', 'ylabel':self.ylabel, 'plotType':'BarChart', 'numFig':self.numFig, 'other':other} self.javascript = templateType(xdata) % argDict if disp: self.dispFile()
Displays a bar graph. xdata: list of bar graph categories/bins. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example. ydata: list of values associated with categories in xdata. If xdata includes a header, include a header list on ydata as well. disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot. **kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code.
https://github.com/Dfenestrator/GooPyCharts/blob/57117f213111dfe0401b1dc9720cdba8a23c3028/gpcharts.py#L450-L483
Dfenestrator/GooPyCharts
gpcharts.py
figure.hist
def hist(self,xdata,disp=True,**kwargs): '''Graphs a histogram. xdata: List of values to bin. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example. disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot. **kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code. ''' #combine data into proper format data = [self.xlabel]+xdata #Include other options, supplied by **kwargs other = '' for option in kwargs: other += option + ': ' + kwargs[option] + ',\n' #input argument format to template is in dictionary format (see template for where variables are inserted) argDict = { 'data':str(data), 'title':self.title, 'functionName':slugify(self.title), 'height':self.height, 'width':self.width, 'logScaleFlag':'false', 'ylabel':self.ylabel, 'plotType':'Histogram', 'numFig':self.numFig, 'other':other} self.javascript = (graphPgTemplateStart+graphPgTemplate_hist+graphPgTemplateEnd) % argDict if disp: self.dispFile()
python
def hist(self,xdata,disp=True,**kwargs): '''Graphs a histogram. xdata: List of values to bin. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example. disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot. **kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code. ''' #combine data into proper format data = [self.xlabel]+xdata #Include other options, supplied by **kwargs other = '' for option in kwargs: other += option + ': ' + kwargs[option] + ',\n' #input argument format to template is in dictionary format (see template for where variables are inserted) argDict = { 'data':str(data), 'title':self.title, 'functionName':slugify(self.title), 'height':self.height, 'width':self.width, 'logScaleFlag':'false', 'ylabel':self.ylabel, 'plotType':'Histogram', 'numFig':self.numFig, 'other':other} self.javascript = (graphPgTemplateStart+graphPgTemplate_hist+graphPgTemplateEnd) % argDict if disp: self.dispFile()
Graphs a histogram. xdata: List of values to bin. Can optionally include a header, see testGraph_barAndHist.py in https://github.com/Dfenestrator/GooPyCharts for an example. disp: for displaying plots immediately. Set to True by default. Set to False for other operations, then use show() to display the plot. **kwargs: Access to other Google Charts API options. The key is the option name, the value is the option's full JS code.
https://github.com/Dfenestrator/GooPyCharts/blob/57117f213111dfe0401b1dc9720cdba8a23c3028/gpcharts.py#L522-L554
Dfenestrator/GooPyCharts
gpcharts.py
figure.plot_nb
def plot_nb(self,xdata,ydata=[],logScale=False): '''Graphs a line plot and embeds it in a Jupyter notebook. See 'help(figure.plot)' for more info.''' self.plot(xdata,ydata,logScale)
python
def plot_nb(self,xdata,ydata=[],logScale=False): '''Graphs a line plot and embeds it in a Jupyter notebook. See 'help(figure.plot)' for more info.''' self.plot(xdata,ydata,logScale)
Graphs a line plot and embeds it in a Jupyter notebook. See 'help(figure.plot)' for more info.
https://github.com/Dfenestrator/GooPyCharts/blob/57117f213111dfe0401b1dc9720cdba8a23c3028/gpcharts.py#L557-L559
Dfenestrator/GooPyCharts
gpcharts.py
figure.scatter_nb
def scatter_nb(self,xdata,ydata=[],trendline=False): '''Graphs a scatter plot and embeds it in a Jupyter notebook. See 'help(figure.scatter)' for more info.''' self.scatter(xdata,ydata,trendline)
python
def scatter_nb(self,xdata,ydata=[],trendline=False): '''Graphs a scatter plot and embeds it in a Jupyter notebook. See 'help(figure.scatter)' for more info.''' self.scatter(xdata,ydata,trendline)
Graphs a scatter plot and embeds it in a Jupyter notebook. See 'help(figure.scatter)' for more info.
https://github.com/Dfenestrator/GooPyCharts/blob/57117f213111dfe0401b1dc9720cdba8a23c3028/gpcharts.py#L561-L563
lepture/mistune-contrib
mistune_contrib/meta.py
parse
def parse(text): """Parse the given text into metadata and strip it for a Markdown parser. :param text: text to be parsed """ rv = {} m = META.match(text) while m: key = m.group(1) value = m.group(2) value = INDENTATION.sub('\n', value.strip()) rv[key] = value text = text[len(m.group(0)):] m = META.match(text) return rv, text
python
def parse(text): """Parse the given text into metadata and strip it for a Markdown parser. :param text: text to be parsed """ rv = {} m = META.match(text) while m: key = m.group(1) value = m.group(2) value = INDENTATION.sub('\n', value.strip()) rv[key] = value text = text[len(m.group(0)):] m = META.match(text) return rv, text
Parse the given text into metadata and strip it for a Markdown parser. :param text: text to be parsed
https://github.com/lepture/mistune-contrib/blob/3180edfc6b4477ead5ef7754a57907ae94080c24/mistune_contrib/meta.py#L24-L40
gorakhargosh/pathtools
pathtools/path.py
get_dir_walker
def get_dir_walker(recursive, topdown=True, followlinks=False): """ Returns a recursive or a non-recursive directory walker. :param recursive: ``True`` produces a recursive walker; ``False`` produces a non-recursive walker. :returns: A walker function. """ if recursive: walk = partial(os.walk, topdown=topdown, followlinks=followlinks) else: def walk(path, topdown=topdown, followlinks=followlinks): try: yield next(os.walk(path, topdown=topdown, followlinks=followlinks)) except NameError: yield os.walk(path, topdown=topdown, followlinks=followlinks).next() #IGNORE:E1101 return walk
python
def get_dir_walker(recursive, topdown=True, followlinks=False): """ Returns a recursive or a non-recursive directory walker. :param recursive: ``True`` produces a recursive walker; ``False`` produces a non-recursive walker. :returns: A walker function. """ if recursive: walk = partial(os.walk, topdown=topdown, followlinks=followlinks) else: def walk(path, topdown=topdown, followlinks=followlinks): try: yield next(os.walk(path, topdown=topdown, followlinks=followlinks)) except NameError: yield os.walk(path, topdown=topdown, followlinks=followlinks).next() #IGNORE:E1101 return walk
Returns a recursive or a non-recursive directory walker. :param recursive: ``True`` produces a recursive walker; ``False`` produces a non-recursive walker. :returns: A walker function.
https://github.com/gorakhargosh/pathtools/blob/a3522fc61b00ee2d992ca375c600513bfb9020e9/pathtools/path.py#L58-L76
gorakhargosh/pathtools
pathtools/path.py
walk
def walk(dir_pathname, recursive=True, topdown=True, followlinks=False): """ Walks a directory tree optionally recursively. Works exactly like :func:`os.walk` only adding the `recursive` argument. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk` """ walk_func = get_dir_walker(recursive, topdown, followlinks) for root, dirnames, filenames in walk_func(dir_pathname): yield (root, dirnames, filenames)
python
def walk(dir_pathname, recursive=True, topdown=True, followlinks=False): """ Walks a directory tree optionally recursively. Works exactly like :func:`os.walk` only adding the `recursive` argument. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk` """ walk_func = get_dir_walker(recursive, topdown, followlinks) for root, dirnames, filenames in walk_func(dir_pathname): yield (root, dirnames, filenames)
Walks a directory tree optionally recursively. Works exactly like :func:`os.walk` only adding the `recursive` argument. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk`
https://github.com/gorakhargosh/pathtools/blob/a3522fc61b00ee2d992ca375c600513bfb9020e9/pathtools/path.py#L79-L96
gorakhargosh/pathtools
pathtools/path.py
listdir
def listdir(dir_pathname, recursive=True, topdown=True, followlinks=False): """ Enlists all items using their absolute paths in a directory, optionally recursively. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk` """ for root, dirnames, filenames\ in walk(dir_pathname, recursive, topdown, followlinks): for dirname in dirnames: yield absolute_path(os.path.join(root, dirname)) for filename in filenames: yield absolute_path(os.path.join(root, filename))
python
def listdir(dir_pathname, recursive=True, topdown=True, followlinks=False): """ Enlists all items using their absolute paths in a directory, optionally recursively. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk` """ for root, dirnames, filenames\ in walk(dir_pathname, recursive, topdown, followlinks): for dirname in dirnames: yield absolute_path(os.path.join(root, dirname)) for filename in filenames: yield absolute_path(os.path.join(root, filename))
Enlists all items using their absolute paths in a directory, optionally recursively. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk`
https://github.com/gorakhargosh/pathtools/blob/a3522fc61b00ee2d992ca375c600513bfb9020e9/pathtools/path.py#L99-L122
gorakhargosh/pathtools
pathtools/path.py
list_directories
def list_directories(dir_pathname, recursive=True, topdown=True, followlinks=False): """ Enlists all the directories using their absolute paths within the specified directory, optionally recursively. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk` """ for root, dirnames, filenames\ in walk(dir_pathname, recursive, topdown, followlinks): for dirname in dirnames: yield absolute_path(os.path.join(root, dirname))
python
def list_directories(dir_pathname, recursive=True, topdown=True, followlinks=False): """ Enlists all the directories using their absolute paths within the specified directory, optionally recursively. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk` """ for root, dirnames, filenames\ in walk(dir_pathname, recursive, topdown, followlinks): for dirname in dirnames: yield absolute_path(os.path.join(root, dirname))
Enlists all the directories using their absolute paths within the specified directory, optionally recursively. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk`
https://github.com/gorakhargosh/pathtools/blob/a3522fc61b00ee2d992ca375c600513bfb9020e9/pathtools/path.py#L125-L146
gorakhargosh/pathtools
pathtools/path.py
list_files
def list_files(dir_pathname, recursive=True, topdown=True, followlinks=False): """ Enlists all the files using their absolute paths within the specified directory, optionally recursively. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk` """ for root, dirnames, filenames\ in walk(dir_pathname, recursive, topdown, followlinks): for filename in filenames: yield absolute_path(os.path.join(root, filename))
python
def list_files(dir_pathname, recursive=True, topdown=True, followlinks=False): """ Enlists all the files using their absolute paths within the specified directory, optionally recursively. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk` """ for root, dirnames, filenames\ in walk(dir_pathname, recursive, topdown, followlinks): for filename in filenames: yield absolute_path(os.path.join(root, filename))
Enlists all the files using their absolute paths within the specified directory, optionally recursively. :param dir_pathname: The directory to traverse. :param recursive: ``True`` for walking recursively through the directory tree; ``False`` otherwise. :param topdown: Please see the documentation for :func:`os.walk` :param followlinks: Please see the documentation for :func:`os.walk`
https://github.com/gorakhargosh/pathtools/blob/a3522fc61b00ee2d992ca375c600513bfb9020e9/pathtools/path.py#L149-L170
gorakhargosh/pathtools
pathtools/patterns.py
match_path_against
def match_path_against(pathname, patterns, case_sensitive=True): """ Determines whether the pathname matches any of the given wildcard patterns, optionally ignoring the case of the pathname and patterns. :param pathname: A path name that will be matched against a wildcard pattern. :param patterns: A list of wildcard patterns to match_path the filename against. :param case_sensitive: ``True`` if the matching should be case-sensitive; ``False`` otherwise. :returns: ``True`` if the pattern matches; ``False`` otherwise. Doctests:: >>> match_path_against("/home/username/foobar/blah.py", ["*.py", "*.txt"], False) True >>> match_path_against("/home/username/foobar/blah.py", ["*.PY", "*.txt"], True) False >>> match_path_against("/home/username/foobar/blah.py", ["*.PY", "*.txt"], False) True >>> match_path_against("C:\\windows\\blah\\BLAH.PY", ["*.py", "*.txt"], True) False >>> match_path_against("C:\\windows\\blah\\BLAH.PY", ["*.py", "*.txt"], False) True """ if case_sensitive: match_func = fnmatchcase pattern_transform_func = (lambda w: w) else: match_func = fnmatch pathname = pathname.lower() pattern_transform_func = _string_lower for pattern in set(patterns): pattern = pattern_transform_func(pattern) if match_func(pathname, pattern): return True return False
python
def match_path_against(pathname, patterns, case_sensitive=True): """ Determines whether the pathname matches any of the given wildcard patterns, optionally ignoring the case of the pathname and patterns. :param pathname: A path name that will be matched against a wildcard pattern. :param patterns: A list of wildcard patterns to match_path the filename against. :param case_sensitive: ``True`` if the matching should be case-sensitive; ``False`` otherwise. :returns: ``True`` if the pattern matches; ``False`` otherwise. Doctests:: >>> match_path_against("/home/username/foobar/blah.py", ["*.py", "*.txt"], False) True >>> match_path_against("/home/username/foobar/blah.py", ["*.PY", "*.txt"], True) False >>> match_path_against("/home/username/foobar/blah.py", ["*.PY", "*.txt"], False) True >>> match_path_against("C:\\windows\\blah\\BLAH.PY", ["*.py", "*.txt"], True) False >>> match_path_against("C:\\windows\\blah\\BLAH.PY", ["*.py", "*.txt"], False) True """ if case_sensitive: match_func = fnmatchcase pattern_transform_func = (lambda w: w) else: match_func = fnmatch pathname = pathname.lower() pattern_transform_func = _string_lower for pattern in set(patterns): pattern = pattern_transform_func(pattern) if match_func(pathname, pattern): return True return False
Determines whether the pathname matches any of the given wildcard patterns, optionally ignoring the case of the pathname and patterns. :param pathname: A path name that will be matched against a wildcard pattern. :param patterns: A list of wildcard patterns to match_path the filename against. :param case_sensitive: ``True`` if the matching should be case-sensitive; ``False`` otherwise. :returns: ``True`` if the pattern matches; ``False`` otherwise. Doctests:: >>> match_path_against("/home/username/foobar/blah.py", ["*.py", "*.txt"], False) True >>> match_path_against("/home/username/foobar/blah.py", ["*.PY", "*.txt"], True) False >>> match_path_against("/home/username/foobar/blah.py", ["*.PY", "*.txt"], False) True >>> match_path_against("C:\\windows\\blah\\BLAH.PY", ["*.py", "*.txt"], True) False >>> match_path_against("C:\\windows\\blah\\BLAH.PY", ["*.py", "*.txt"], False) True
https://github.com/gorakhargosh/pathtools/blob/a3522fc61b00ee2d992ca375c600513bfb9020e9/pathtools/patterns.py#L58-L95
gorakhargosh/pathtools
pathtools/patterns.py
_match_path
def _match_path(pathname, included_patterns, excluded_patterns, case_sensitive=True): """Internal function same as :func:`match_path` but does not check arguments. Doctests:: >>> _match_path("/users/gorakhargosh/foobar.py", ["*.py"], ["*.PY"], True) True >>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], True) False >>> _match_path("/users/gorakhargosh/foobar/", ["*.py"], ["*.txt"], False) False >>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], False) Traceback (most recent call last): ... ValueError: conflicting patterns `set(['*.py'])` included and excluded """ if not case_sensitive: included_patterns = set(map(_string_lower, included_patterns)) excluded_patterns = set(map(_string_lower, excluded_patterns)) else: included_patterns = set(included_patterns) excluded_patterns = set(excluded_patterns) common_patterns = included_patterns & excluded_patterns if common_patterns: raise ValueError('conflicting patterns `%s` included and excluded'\ % common_patterns) return (match_path_against(pathname, included_patterns, case_sensitive)\ and not match_path_against(pathname, excluded_patterns, case_sensitive))
python
def _match_path(pathname, included_patterns, excluded_patterns, case_sensitive=True): """Internal function same as :func:`match_path` but does not check arguments. Doctests:: >>> _match_path("/users/gorakhargosh/foobar.py", ["*.py"], ["*.PY"], True) True >>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], True) False >>> _match_path("/users/gorakhargosh/foobar/", ["*.py"], ["*.txt"], False) False >>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], False) Traceback (most recent call last): ... ValueError: conflicting patterns `set(['*.py'])` included and excluded """ if not case_sensitive: included_patterns = set(map(_string_lower, included_patterns)) excluded_patterns = set(map(_string_lower, excluded_patterns)) else: included_patterns = set(included_patterns) excluded_patterns = set(excluded_patterns) common_patterns = included_patterns & excluded_patterns if common_patterns: raise ValueError('conflicting patterns `%s` included and excluded'\ % common_patterns) return (match_path_against(pathname, included_patterns, case_sensitive)\ and not match_path_against(pathname, excluded_patterns, case_sensitive))
Internal function same as :func:`match_path` but does not check arguments. Doctests:: >>> _match_path("/users/gorakhargosh/foobar.py", ["*.py"], ["*.PY"], True) True >>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], True) False >>> _match_path("/users/gorakhargosh/foobar/", ["*.py"], ["*.txt"], False) False >>> _match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], False) Traceback (most recent call last): ... ValueError: conflicting patterns `set(['*.py'])` included and excluded
https://github.com/gorakhargosh/pathtools/blob/a3522fc61b00ee2d992ca375c600513bfb9020e9/pathtools/patterns.py#L98-L128
gorakhargosh/pathtools
pathtools/patterns.py
match_path
def match_path(pathname, included_patterns=None, excluded_patterns=None, case_sensitive=True): """ Matches a pathname against a set of acceptable and ignored patterns. :param pathname: A pathname which will be matched against a pattern. :param included_patterns: Allow filenames matching wildcard patterns specified in this list. If no pattern is specified, the function treats the pathname as a match_path. :param excluded_patterns: Ignores filenames matching wildcard patterns specified in this list. If no pattern is specified, the function treats the pathname as a match_path. :param case_sensitive: ``True`` if matching should be case-sensitive; ``False`` otherwise. :returns: ``True`` if the pathname matches; ``False`` otherwise. :raises: ValueError if included patterns and excluded patterns contain the same pattern. Doctests:: >>> match_path("/Users/gorakhargosh/foobar.py") True >>> match_path("/Users/gorakhargosh/foobar.py", case_sensitive=False) True >>> match_path("/users/gorakhargosh/foobar.py", ["*.py"], ["*.PY"], True) True >>> match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], True) False >>> match_path("/users/gorakhargosh/foobar/", ["*.py"], ["*.txt"], False) False >>> match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], False) Traceback (most recent call last): ... ValueError: conflicting patterns `set(['*.py'])` included and excluded """ included = ["*"] if included_patterns is None else included_patterns excluded = [] if excluded_patterns is None else excluded_patterns return _match_path(pathname, included, excluded, case_sensitive)
python
def match_path(pathname, included_patterns=None, excluded_patterns=None, case_sensitive=True): """ Matches a pathname against a set of acceptable and ignored patterns. :param pathname: A pathname which will be matched against a pattern. :param included_patterns: Allow filenames matching wildcard patterns specified in this list. If no pattern is specified, the function treats the pathname as a match_path. :param excluded_patterns: Ignores filenames matching wildcard patterns specified in this list. If no pattern is specified, the function treats the pathname as a match_path. :param case_sensitive: ``True`` if matching should be case-sensitive; ``False`` otherwise. :returns: ``True`` if the pathname matches; ``False`` otherwise. :raises: ValueError if included patterns and excluded patterns contain the same pattern. Doctests:: >>> match_path("/Users/gorakhargosh/foobar.py") True >>> match_path("/Users/gorakhargosh/foobar.py", case_sensitive=False) True >>> match_path("/users/gorakhargosh/foobar.py", ["*.py"], ["*.PY"], True) True >>> match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], True) False >>> match_path("/users/gorakhargosh/foobar/", ["*.py"], ["*.txt"], False) False >>> match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], False) Traceback (most recent call last): ... ValueError: conflicting patterns `set(['*.py'])` included and excluded """ included = ["*"] if included_patterns is None else included_patterns excluded = [] if excluded_patterns is None else excluded_patterns return _match_path(pathname, included, excluded, case_sensitive)
Matches a pathname against a set of acceptable and ignored patterns. :param pathname: A pathname which will be matched against a pattern. :param included_patterns: Allow filenames matching wildcard patterns specified in this list. If no pattern is specified, the function treats the pathname as a match_path. :param excluded_patterns: Ignores filenames matching wildcard patterns specified in this list. If no pattern is specified, the function treats the pathname as a match_path. :param case_sensitive: ``True`` if matching should be case-sensitive; ``False`` otherwise. :returns: ``True`` if the pathname matches; ``False`` otherwise. :raises: ValueError if included patterns and excluded patterns contain the same pattern. Doctests:: >>> match_path("/Users/gorakhargosh/foobar.py") True >>> match_path("/Users/gorakhargosh/foobar.py", case_sensitive=False) True >>> match_path("/users/gorakhargosh/foobar.py", ["*.py"], ["*.PY"], True) True >>> match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], True) False >>> match_path("/users/gorakhargosh/foobar/", ["*.py"], ["*.txt"], False) False >>> match_path("/users/gorakhargosh/FOOBAR.PY", ["*.py"], ["*.PY"], False) Traceback (most recent call last): ... ValueError: conflicting patterns `set(['*.py'])` included and excluded
https://github.com/gorakhargosh/pathtools/blob/a3522fc61b00ee2d992ca375c600513bfb9020e9/pathtools/patterns.py#L131-L174
gorakhargosh/pathtools
pathtools/patterns.py
filter_paths
def filter_paths(pathnames, included_patterns=None, excluded_patterns=None, case_sensitive=True): """ Filters from a set of paths based on acceptable patterns and ignorable patterns. :param pathnames: A list of path names that will be filtered based on matching and ignored patterns. :param included_patterns: Allow filenames matching wildcard patterns specified in this list. If no pattern list is specified, ["*"] is used as the default pattern, which matches all files. :param excluded_patterns: Ignores filenames matching wildcard patterns specified in this list. If no pattern list is specified, no files are ignored. :param case_sensitive: ``True`` if matching should be case-sensitive; ``False`` otherwise. :returns: A list of pathnames that matched the allowable patterns and passed through the ignored patterns. Doctests:: >>> pathnames = set(["/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python"]) >>> set(filter_paths(pathnames)) == pathnames True >>> set(filter_paths(pathnames, case_sensitive=False)) == pathnames True >>> set(filter_paths(pathnames, ["*.py", "*.conf"], ["*.status"], case_sensitive=True)) == set(["/users/gorakhargosh/foobar.py", "/etc/pdnsd.conf"]) True """ included = ["*"] if included_patterns is None else included_patterns excluded = [] if excluded_patterns is None else excluded_patterns for pathname in pathnames: # We don't call the public match_path because it checks arguments # and sets default values if none are found. We're already doing that # above. if _match_path(pathname, included, excluded, case_sensitive): yield pathname
python
def filter_paths(pathnames, included_patterns=None, excluded_patterns=None, case_sensitive=True): """ Filters from a set of paths based on acceptable patterns and ignorable patterns. :param pathnames: A list of path names that will be filtered based on matching and ignored patterns. :param included_patterns: Allow filenames matching wildcard patterns specified in this list. If no pattern list is specified, ["*"] is used as the default pattern, which matches all files. :param excluded_patterns: Ignores filenames matching wildcard patterns specified in this list. If no pattern list is specified, no files are ignored. :param case_sensitive: ``True`` if matching should be case-sensitive; ``False`` otherwise. :returns: A list of pathnames that matched the allowable patterns and passed through the ignored patterns. Doctests:: >>> pathnames = set(["/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python"]) >>> set(filter_paths(pathnames)) == pathnames True >>> set(filter_paths(pathnames, case_sensitive=False)) == pathnames True >>> set(filter_paths(pathnames, ["*.py", "*.conf"], ["*.status"], case_sensitive=True)) == set(["/users/gorakhargosh/foobar.py", "/etc/pdnsd.conf"]) True """ included = ["*"] if included_patterns is None else included_patterns excluded = [] if excluded_patterns is None else excluded_patterns for pathname in pathnames: # We don't call the public match_path because it checks arguments # and sets default values if none are found. We're already doing that # above. if _match_path(pathname, included, excluded, case_sensitive): yield pathname
Filters from a set of paths based on acceptable patterns and ignorable patterns. :param pathnames: A list of path names that will be filtered based on matching and ignored patterns. :param included_patterns: Allow filenames matching wildcard patterns specified in this list. If no pattern list is specified, ["*"] is used as the default pattern, which matches all files. :param excluded_patterns: Ignores filenames matching wildcard patterns specified in this list. If no pattern list is specified, no files are ignored. :param case_sensitive: ``True`` if matching should be case-sensitive; ``False`` otherwise. :returns: A list of pathnames that matched the allowable patterns and passed through the ignored patterns. Doctests:: >>> pathnames = set(["/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python"]) >>> set(filter_paths(pathnames)) == pathnames True >>> set(filter_paths(pathnames, case_sensitive=False)) == pathnames True >>> set(filter_paths(pathnames, ["*.py", "*.conf"], ["*.status"], case_sensitive=True)) == set(["/users/gorakhargosh/foobar.py", "/etc/pdnsd.conf"]) True
https://github.com/gorakhargosh/pathtools/blob/a3522fc61b00ee2d992ca375c600513bfb9020e9/pathtools/patterns.py#L177-L218
gorakhargosh/pathtools
pathtools/patterns.py
match_any_paths
def match_any_paths(pathnames, included_patterns=None, excluded_patterns=None, case_sensitive=True): """ Matches from a set of paths based on acceptable patterns and ignorable patterns. :param pathnames: A list of path names that will be filtered based on matching and ignored patterns. :param included_patterns: Allow filenames matching wildcard patterns specified in this list. If no pattern list is specified, ["*"] is used as the default pattern, which matches all files. :param excluded_patterns: Ignores filenames matching wildcard patterns specified in this list. If no pattern list is specified, no files are ignored. :param case_sensitive: ``True`` if matching should be case-sensitive; ``False`` otherwise. :returns: ``True`` if any of the paths matches; ``False`` otherwise. Doctests:: >>> pathnames = set(["/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python"]) >>> match_any_paths(pathnames) True >>> match_any_paths(pathnames, case_sensitive=False) True >>> match_any_paths(pathnames, ["*.py", "*.conf"], ["*.status"], case_sensitive=True) True >>> match_any_paths(pathnames, ["*.txt"], case_sensitive=False) False >>> match_any_paths(pathnames, ["*.txt"], case_sensitive=True) False """ included = ["*"] if included_patterns is None else included_patterns excluded = [] if excluded_patterns is None else excluded_patterns for pathname in pathnames: # We don't call the public match_path because it checks arguments # and sets default values if none are found. We're already doing that # above. if _match_path(pathname, included, excluded, case_sensitive): return True return False
python
def match_any_paths(pathnames, included_patterns=None, excluded_patterns=None, case_sensitive=True): """ Matches from a set of paths based on acceptable patterns and ignorable patterns. :param pathnames: A list of path names that will be filtered based on matching and ignored patterns. :param included_patterns: Allow filenames matching wildcard patterns specified in this list. If no pattern list is specified, ["*"] is used as the default pattern, which matches all files. :param excluded_patterns: Ignores filenames matching wildcard patterns specified in this list. If no pattern list is specified, no files are ignored. :param case_sensitive: ``True`` if matching should be case-sensitive; ``False`` otherwise. :returns: ``True`` if any of the paths matches; ``False`` otherwise. Doctests:: >>> pathnames = set(["/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python"]) >>> match_any_paths(pathnames) True >>> match_any_paths(pathnames, case_sensitive=False) True >>> match_any_paths(pathnames, ["*.py", "*.conf"], ["*.status"], case_sensitive=True) True >>> match_any_paths(pathnames, ["*.txt"], case_sensitive=False) False >>> match_any_paths(pathnames, ["*.txt"], case_sensitive=True) False """ included = ["*"] if included_patterns is None else included_patterns excluded = [] if excluded_patterns is None else excluded_patterns for pathname in pathnames: # We don't call the public match_path because it checks arguments # and sets default values if none are found. We're already doing that # above. if _match_path(pathname, included, excluded, case_sensitive): return True return False
Matches from a set of paths based on acceptable patterns and ignorable patterns. :param pathnames: A list of path names that will be filtered based on matching and ignored patterns. :param included_patterns: Allow filenames matching wildcard patterns specified in this list. If no pattern list is specified, ["*"] is used as the default pattern, which matches all files. :param excluded_patterns: Ignores filenames matching wildcard patterns specified in this list. If no pattern list is specified, no files are ignored. :param case_sensitive: ``True`` if matching should be case-sensitive; ``False`` otherwise. :returns: ``True`` if any of the paths matches; ``False`` otherwise. Doctests:: >>> pathnames = set(["/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python"]) >>> match_any_paths(pathnames) True >>> match_any_paths(pathnames, case_sensitive=False) True >>> match_any_paths(pathnames, ["*.py", "*.conf"], ["*.status"], case_sensitive=True) True >>> match_any_paths(pathnames, ["*.txt"], case_sensitive=False) False >>> match_any_paths(pathnames, ["*.txt"], case_sensitive=True) False
https://github.com/gorakhargosh/pathtools/blob/a3522fc61b00ee2d992ca375c600513bfb9020e9/pathtools/patterns.py#L220-L265
gorakhargosh/pathtools
scripts/nosy.py
match_patterns
def match_patterns(pathname, patterns): """Returns ``True`` if the pathname matches any of the given patterns.""" for pattern in patterns: if fnmatch(pathname, pattern): return True return False
python
def match_patterns(pathname, patterns): """Returns ``True`` if the pathname matches any of the given patterns.""" for pattern in patterns: if fnmatch(pathname, pattern): return True return False
Returns ``True`` if the pathname matches any of the given patterns.
https://github.com/gorakhargosh/pathtools/blob/a3522fc61b00ee2d992ca375c600513bfb9020e9/scripts/nosy.py#L39-L44
gorakhargosh/pathtools
scripts/nosy.py
filter_paths
def filter_paths(pathnames, patterns=None, ignore_patterns=None): """Filters from a set of paths based on acceptable patterns and ignorable patterns.""" result = [] if patterns is None: patterns = ['*'] if ignore_patterns is None: ignore_patterns = [] for pathname in pathnames: if match_patterns(pathname, patterns) and not match_patterns(pathname, ignore_patterns): result.append(pathname) return result
python
def filter_paths(pathnames, patterns=None, ignore_patterns=None): """Filters from a set of paths based on acceptable patterns and ignorable patterns.""" result = [] if patterns is None: patterns = ['*'] if ignore_patterns is None: ignore_patterns = [] for pathname in pathnames: if match_patterns(pathname, patterns) and not match_patterns(pathname, ignore_patterns): result.append(pathname) return result
Filters from a set of paths based on acceptable patterns and ignorable patterns.
https://github.com/gorakhargosh/pathtools/blob/a3522fc61b00ee2d992ca375c600513bfb9020e9/scripts/nosy.py#L47-L58
evansloan/sports.py
sports/teams.py
get_team
def get_team(sport, team): """ Get extra info that pertains to a certain team. Info available to all teams: - name: Name of the team - seasons: Number of seasons played - record: Overall record - champs: Number of championships won - leaders: Statistical leaders Info specific to baseball teams: - pennants: Number of times a team has won AL/NL league Info specific to football teams: - super_bowls: Number of Super Bowls won Info specific to hockey teams: - points: Number of overall points gained throughout all seasons played Info specific to baseball/hockey teams: - playoff_app: Total number of playoff appearances Info specific to football/hockey teams: - playoff_record: Overall record in the playoffs :param sport: The sport of the team to look for (baseball, football, hockey) :param team: The name/city of the team to look for :return: Team object containing information described above """ team_pattern = re.compile(team, re.IGNORECASE) supported_sports = ['baseball', 'football', 'hockey', 'basketball'] if sport not in supported_sports: raise errors.StatsNotFound(sport) elif sport == constants.FOOTBALL: sport = 'pro-football' base_url = 'https://www.{}-reference.com/teams/'.format(sport) table_id = 'active_franchises' if sport == 'hockey' else 'teams_active' links = SoupStrainer('table', {'id': table_id}) soup = BeautifulSoup(requests.get(base_url).content, 'html.parser', parse_only=links) team_info_raw = _get_team_info_raw(soup, base_url, team_pattern, team, sport) if sport == constants.BASEBALL: team_info = { 'name': team_info_raw[0], 'record': team_info_raw[9], 'seasons': team_info_raw[6:7][0], 'playoff_app': team_info_raw[11], 'pennants': team_info_raw[13], 'champs': team_info_raw[15], 'leaders': ' '.join(team_info_raw[16:18]) } return Team(team_info) elif sport == constants.BASKETBALL: team_info = { 'name': team_info_raw[0], 'record': team_info_raw[11].split(',')[0], 'seasons': team_info_raw[7].replace(';', ''), 'playoff_app': team_info_raw[14], 'champs': team_info_raw[17] } return Team(team_info) elif sport == 'pro-football': team_info = { 'name': team_info_raw[0], 'seasons': team_info_raw[2].split()[1], 'record': team_info_raw[4], 'playoff_record': team_info_raw[5].split()[2], 'super_bowls': team_info_raw[7], 'champs': team_info_raw[10], 'leaders': team_info_raw[11:17] } return Team(team_info) elif sport == constants.HOCKEY: team_info = { 'name': team_info_raw[0], 'record': team_info_raw[9], 'points': team_info_raw[10][1:-1], 'seasons': team_info_raw[2].split()[1], 'playoff_app': team_info_raw[3].split()[3], 'playoff_record': team_info_raw[7].split()[2], 'champs': team_info_raw[5], 'leaders': [ team_info_raw[11:13], ' '.join(team_info_raw[13:15]), ' '.join(team_info_raw[15:17]) ] } return Team(team_info)
python
def get_team(sport, team): """ Get extra info that pertains to a certain team. Info available to all teams: - name: Name of the team - seasons: Number of seasons played - record: Overall record - champs: Number of championships won - leaders: Statistical leaders Info specific to baseball teams: - pennants: Number of times a team has won AL/NL league Info specific to football teams: - super_bowls: Number of Super Bowls won Info specific to hockey teams: - points: Number of overall points gained throughout all seasons played Info specific to baseball/hockey teams: - playoff_app: Total number of playoff appearances Info specific to football/hockey teams: - playoff_record: Overall record in the playoffs :param sport: The sport of the team to look for (baseball, football, hockey) :param team: The name/city of the team to look for :return: Team object containing information described above """ team_pattern = re.compile(team, re.IGNORECASE) supported_sports = ['baseball', 'football', 'hockey', 'basketball'] if sport not in supported_sports: raise errors.StatsNotFound(sport) elif sport == constants.FOOTBALL: sport = 'pro-football' base_url = 'https://www.{}-reference.com/teams/'.format(sport) table_id = 'active_franchises' if sport == 'hockey' else 'teams_active' links = SoupStrainer('table', {'id': table_id}) soup = BeautifulSoup(requests.get(base_url).content, 'html.parser', parse_only=links) team_info_raw = _get_team_info_raw(soup, base_url, team_pattern, team, sport) if sport == constants.BASEBALL: team_info = { 'name': team_info_raw[0], 'record': team_info_raw[9], 'seasons': team_info_raw[6:7][0], 'playoff_app': team_info_raw[11], 'pennants': team_info_raw[13], 'champs': team_info_raw[15], 'leaders': ' '.join(team_info_raw[16:18]) } return Team(team_info) elif sport == constants.BASKETBALL: team_info = { 'name': team_info_raw[0], 'record': team_info_raw[11].split(',')[0], 'seasons': team_info_raw[7].replace(';', ''), 'playoff_app': team_info_raw[14], 'champs': team_info_raw[17] } return Team(team_info) elif sport == 'pro-football': team_info = { 'name': team_info_raw[0], 'seasons': team_info_raw[2].split()[1], 'record': team_info_raw[4], 'playoff_record': team_info_raw[5].split()[2], 'super_bowls': team_info_raw[7], 'champs': team_info_raw[10], 'leaders': team_info_raw[11:17] } return Team(team_info) elif sport == constants.HOCKEY: team_info = { 'name': team_info_raw[0], 'record': team_info_raw[9], 'points': team_info_raw[10][1:-1], 'seasons': team_info_raw[2].split()[1], 'playoff_app': team_info_raw[3].split()[3], 'playoff_record': team_info_raw[7].split()[2], 'champs': team_info_raw[5], 'leaders': [ team_info_raw[11:13], ' '.join(team_info_raw[13:15]), ' '.join(team_info_raw[15:17]) ] } return Team(team_info)
Get extra info that pertains to a certain team. Info available to all teams: - name: Name of the team - seasons: Number of seasons played - record: Overall record - champs: Number of championships won - leaders: Statistical leaders Info specific to baseball teams: - pennants: Number of times a team has won AL/NL league Info specific to football teams: - super_bowls: Number of Super Bowls won Info specific to hockey teams: - points: Number of overall points gained throughout all seasons played Info specific to baseball/hockey teams: - playoff_app: Total number of playoff appearances Info specific to football/hockey teams: - playoff_record: Overall record in the playoffs :param sport: The sport of the team to look for (baseball, football, hockey) :param team: The name/city of the team to look for :return: Team object containing information described above
https://github.com/evansloan/sports.py/blob/852cd1e439f2de46ef68357e43f5f55b2e16d2b3/sports/teams.py#L23-L116
evansloan/sports.py
sports/teams.py
_get_team_info_raw
def _get_team_info_raw(soup, base_url, team_pattern, team, sport): """ Parses through html page to gather raw data about team :param soup: BeautifulSoup object containing html to be parsed :param base_url: Pre-formatted url that is formatted depending on sport :param team_pattern: Compiled regex pattern of team name/city :param team: Name of the team that is being searched for :param sport: Sport that is being searched for :return: List containing raw data of team """ team_url = None team_name = None for link in soup.find_all('a'): if re.search(team_pattern, link.string): team_name = link.string team_url = base_url.replace('/teams/', link['href']) if team_url is not None and team_name is not None: team_soup = BeautifulSoup(requests.get(team_url).content, 'html.parser') team_info_raw = team_soup.find('div', id='meta').contents[3].get_text().split('\n') team_info_raw = [x.replace('\t', '') for x in team_info_raw] team_info_raw = [x.strip() for x in team_info_raw if x != ''] team_info_raw[0] = team_name return team_info_raw else: raise errors.TeamNotFoundError(sport, team)
python
def _get_team_info_raw(soup, base_url, team_pattern, team, sport): """ Parses through html page to gather raw data about team :param soup: BeautifulSoup object containing html to be parsed :param base_url: Pre-formatted url that is formatted depending on sport :param team_pattern: Compiled regex pattern of team name/city :param team: Name of the team that is being searched for :param sport: Sport that is being searched for :return: List containing raw data of team """ team_url = None team_name = None for link in soup.find_all('a'): if re.search(team_pattern, link.string): team_name = link.string team_url = base_url.replace('/teams/', link['href']) if team_url is not None and team_name is not None: team_soup = BeautifulSoup(requests.get(team_url).content, 'html.parser') team_info_raw = team_soup.find('div', id='meta').contents[3].get_text().split('\n') team_info_raw = [x.replace('\t', '') for x in team_info_raw] team_info_raw = [x.strip() for x in team_info_raw if x != ''] team_info_raw[0] = team_name return team_info_raw else: raise errors.TeamNotFoundError(sport, team)
Parses through html page to gather raw data about team :param soup: BeautifulSoup object containing html to be parsed :param base_url: Pre-formatted url that is formatted depending on sport :param team_pattern: Compiled regex pattern of team name/city :param team: Name of the team that is being searched for :param sport: Sport that is being searched for :return: List containing raw data of team
https://github.com/evansloan/sports.py/blob/852cd1e439f2de46ef68357e43f5f55b2e16d2b3/sports/teams.py#L124-L150
evansloan/sports.py
sports/scores.py
_request_xml
def _request_xml(sport): """ Request XML data from scorespro.com :param sport: sport being played :type sport: string :return: XML data :rtype: string """ url = 'http://www.scorespro.com/rss2/live-{}.xml'.format(sport) r = requests.get(url) if r.ok: return _load_xml(r.content) else: raise errors.SportError(sport)
python
def _request_xml(sport): """ Request XML data from scorespro.com :param sport: sport being played :type sport: string :return: XML data :rtype: string """ url = 'http://www.scorespro.com/rss2/live-{}.xml'.format(sport) r = requests.get(url) if r.ok: return _load_xml(r.content) else: raise errors.SportError(sport)
Request XML data from scorespro.com :param sport: sport being played :type sport: string :return: XML data :rtype: string
https://github.com/evansloan/sports.py/blob/852cd1e439f2de46ef68357e43f5f55b2e16d2b3/sports/scores.py#L38-L52
evansloan/sports.py
sports/scores.py
_parse_match_info
def _parse_match_info(match, soccer=False): """ Parse string containing info of a specific match :param match: Match data :type match: string :param soccer: Set to true if match contains soccer data, defaults to False :type soccer: bool, optional :return: Dictionary containing match information :rtype: dict """ match_info = {} i_open = match.index('(') i_close = match.index(')') match_info['league'] = match[i_open + 1:i_close].strip() match = match[i_close + 1:] i_vs = match.index('vs') i_colon = match.index(':') match_info['home_team'] = match[0:i_vs].replace('#', ' ').strip() match_info['away_team'] = match[i_vs + 2:i_colon].replace('#', ' ').strip() match = match[i_colon:] if soccer: i_hyph = match.index('-') match_info['match_score'] = match[1:i_hyph + 2].strip() match = match[i_hyph + 1:] i_hyph = match.index('-') match_info['match_time'] = match[i_hyph + 1:].strip() else: match_info['match_score'] = match[1:].strip() return match_info
python
def _parse_match_info(match, soccer=False): """ Parse string containing info of a specific match :param match: Match data :type match: string :param soccer: Set to true if match contains soccer data, defaults to False :type soccer: bool, optional :return: Dictionary containing match information :rtype: dict """ match_info = {} i_open = match.index('(') i_close = match.index(')') match_info['league'] = match[i_open + 1:i_close].strip() match = match[i_close + 1:] i_vs = match.index('vs') i_colon = match.index(':') match_info['home_team'] = match[0:i_vs].replace('#', ' ').strip() match_info['away_team'] = match[i_vs + 2:i_colon].replace('#', ' ').strip() match = match[i_colon:] if soccer: i_hyph = match.index('-') match_info['match_score'] = match[1:i_hyph + 2].strip() match = match[i_hyph + 1:] i_hyph = match.index('-') match_info['match_time'] = match[i_hyph + 1:].strip() else: match_info['match_score'] = match[1:].strip() return match_info
Parse string containing info of a specific match :param match: Match data :type match: string :param soccer: Set to true if match contains soccer data, defaults to False :type soccer: bool, optional :return: Dictionary containing match information :rtype: dict
https://github.com/evansloan/sports.py/blob/852cd1e439f2de46ef68357e43f5f55b2e16d2b3/sports/scores.py#L67-L100
evansloan/sports.py
sports/scores.py
get_sport
def get_sport(sport): """ Get live scores for all matches in a particular sport :param sport: the sport being played :type sport: string :return: List containing Match objects :rtype: list """ sport = sport.lower() data = _request_xml(sport) matches = [] for match in data: if sport == constants.SOCCER: desc = match.find('description').text match_info = _parse_match_info(desc, soccer=True) else: desc = match.find('title').text match_info = _parse_match_info(desc) match_info['match_time'] = match.find('description').text match_info['match_date'] = match.find('pubDate').text match_info['match_link'] = match.find('guid').text matches.append(Match(sport, match_info)) return matches
python
def get_sport(sport): """ Get live scores for all matches in a particular sport :param sport: the sport being played :type sport: string :return: List containing Match objects :rtype: list """ sport = sport.lower() data = _request_xml(sport) matches = [] for match in data: if sport == constants.SOCCER: desc = match.find('description').text match_info = _parse_match_info(desc, soccer=True) else: desc = match.find('title').text match_info = _parse_match_info(desc) match_info['match_time'] = match.find('description').text match_info['match_date'] = match.find('pubDate').text match_info['match_link'] = match.find('guid').text matches.append(Match(sport, match_info)) return matches
Get live scores for all matches in a particular sport :param sport: the sport being played :type sport: string :return: List containing Match objects :rtype: list
https://github.com/evansloan/sports.py/blob/852cd1e439f2de46ef68357e43f5f55b2e16d2b3/sports/scores.py#L103-L130
evansloan/sports.py
sports/scores.py
get_match
def get_match(sport, team1, team2): """ Get live scores for a single match :param sport: the sport being played :type sport: string :param team1: first team participating in the match :ttype team1: string :param team2: second team participating in the match :type team2: string :return: A specific match :rtype: Match """ sport = sport.lower() team1_pattern = re.compile(team1, re.I) team2_pattern = re.compile(team2, re.I) matches = get_sport(sport) for match in matches: if re.search(team1_pattern, match.home_team) or re.search(team1_pattern, match.away_team) \ and re.search(team2_pattern, match.away_team) or re.search(team2_pattern, match.home_team): return match raise errors.MatchError(sport, [team1, team2])
python
def get_match(sport, team1, team2): """ Get live scores for a single match :param sport: the sport being played :type sport: string :param team1: first team participating in the match :ttype team1: string :param team2: second team participating in the match :type team2: string :return: A specific match :rtype: Match """ sport = sport.lower() team1_pattern = re.compile(team1, re.I) team2_pattern = re.compile(team2, re.I) matches = get_sport(sport) for match in matches: if re.search(team1_pattern, match.home_team) or re.search(team1_pattern, match.away_team) \ and re.search(team2_pattern, match.away_team) or re.search(team2_pattern, match.home_team): return match raise errors.MatchError(sport, [team1, team2])
Get live scores for a single match :param sport: the sport being played :type sport: string :param team1: first team participating in the match :ttype team1: string :param team2: second team participating in the match :type team2: string :return: A specific match :rtype: Match
https://github.com/evansloan/sports.py/blob/852cd1e439f2de46ef68357e43f5f55b2e16d2b3/sports/scores.py#L133-L156
bitlabstudio/django-influxdb-metrics
influxdb_metrics/models.py
user_post_delete_handler
def user_post_delete_handler(sender, **kwargs): """Sends a metric to InfluxDB when a User object is deleted.""" total = get_user_model().objects.all().count() data = [{ 'measurement': 'django_auth_user_delete', 'tags': {'host': settings.INFLUXDB_TAGS_HOST, }, 'fields': {'value': 1, }, 'time': timezone.now().isoformat(), }] write_points(data) data = [{ 'measurement': 'django_auth_user_count', 'tags': {'host': settings.INFLUXDB_TAGS_HOST, }, 'fields': {'value': total, }, 'time': timezone.now().isoformat(), }] write_points(data)
python
def user_post_delete_handler(sender, **kwargs): """Sends a metric to InfluxDB when a User object is deleted.""" total = get_user_model().objects.all().count() data = [{ 'measurement': 'django_auth_user_delete', 'tags': {'host': settings.INFLUXDB_TAGS_HOST, }, 'fields': {'value': 1, }, 'time': timezone.now().isoformat(), }] write_points(data) data = [{ 'measurement': 'django_auth_user_count', 'tags': {'host': settings.INFLUXDB_TAGS_HOST, }, 'fields': {'value': total, }, 'time': timezone.now().isoformat(), }] write_points(data)
Sends a metric to InfluxDB when a User object is deleted.
https://github.com/bitlabstudio/django-influxdb-metrics/blob/c9f368e28a6072813454b6b549b4afa64aad778a/influxdb_metrics/models.py#L23-L40
bitlabstudio/django-influxdb-metrics
influxdb_metrics/models.py
user_post_save_handler
def user_post_save_handler(**kwargs): """Sends a metric to InfluxDB when a new User object is created.""" if kwargs.get('created'): total = get_user_model().objects.all().count() data = [{ 'measurement': 'django_auth_user_create', 'tags': {'host': settings.INFLUXDB_TAGS_HOST, }, 'fields': {'value': 1, }, 'time': timezone.now().isoformat(), }] write_points(data) data = [{ 'measurement': 'django_auth_user_count', 'tags': {'host': settings.INFLUXDB_TAGS_HOST, }, 'fields': {'value': total, }, 'time': timezone.now().isoformat(), }] write_points(data)
python
def user_post_save_handler(**kwargs): """Sends a metric to InfluxDB when a new User object is created.""" if kwargs.get('created'): total = get_user_model().objects.all().count() data = [{ 'measurement': 'django_auth_user_create', 'tags': {'host': settings.INFLUXDB_TAGS_HOST, }, 'fields': {'value': 1, }, 'time': timezone.now().isoformat(), }] write_points(data) data = [{ 'measurement': 'django_auth_user_count', 'tags': {'host': settings.INFLUXDB_TAGS_HOST, }, 'fields': {'value': total, }, 'time': timezone.now().isoformat(), }] write_points(data)
Sends a metric to InfluxDB when a new User object is created.
https://github.com/bitlabstudio/django-influxdb-metrics/blob/c9f368e28a6072813454b6b549b4afa64aad778a/influxdb_metrics/models.py#L46-L64
marcocor/tagme-python
tagme/__init__.py
annotate
def annotate(text, gcube_token=None, lang=DEFAULT_LANG, api=DEFAULT_TAG_API, long_text=DEFAULT_LONG_TEXT): ''' Annotate a text, linking it to Wikipedia entities. :param text: the text to annotate. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint. :param long_text: long_text parameter (see TagMe documentation). ''' payload = [("text", text.encode("utf-8")), ("long_text", long_text), ("lang", lang)] json_response = _issue_request(api, payload, gcube_token) return AnnotateResponse(json_response) if json_response else None
python
def annotate(text, gcube_token=None, lang=DEFAULT_LANG, api=DEFAULT_TAG_API, long_text=DEFAULT_LONG_TEXT): ''' Annotate a text, linking it to Wikipedia entities. :param text: the text to annotate. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint. :param long_text: long_text parameter (see TagMe documentation). ''' payload = [("text", text.encode("utf-8")), ("long_text", long_text), ("lang", lang)] json_response = _issue_request(api, payload, gcube_token) return AnnotateResponse(json_response) if json_response else None
Annotate a text, linking it to Wikipedia entities. :param text: the text to annotate. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint. :param long_text: long_text parameter (see TagMe documentation).
https://github.com/marcocor/tagme-python/blob/e3a2fcd5a7081b00cd7edcad5d4fc3542a7eaccb/tagme/__init__.py#L188-L202
marcocor/tagme-python
tagme/__init__.py
mentions
def mentions(text, gcube_token=None, lang=DEFAULT_LANG, api=DEFAULT_SPOT_API): ''' Find possible mentions in a text, do not link them to any entity. :param text: the text where to find mentions. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint. ''' payload = [("text", text.encode("utf-8")), ("lang", lang.encode("utf-8"))] json_response = _issue_request(api, payload, gcube_token) return MentionsResponse(json_response) if json_response else None
python
def mentions(text, gcube_token=None, lang=DEFAULT_LANG, api=DEFAULT_SPOT_API): ''' Find possible mentions in a text, do not link them to any entity. :param text: the text where to find mentions. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint. ''' payload = [("text", text.encode("utf-8")), ("lang", lang.encode("utf-8"))] json_response = _issue_request(api, payload, gcube_token) return MentionsResponse(json_response) if json_response else None
Find possible mentions in a text, do not link them to any entity. :param text: the text where to find mentions. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint.
https://github.com/marcocor/tagme-python/blob/e3a2fcd5a7081b00cd7edcad5d4fc3542a7eaccb/tagme/__init__.py#L205-L216
marcocor/tagme-python
tagme/__init__.py
relatedness_wid
def relatedness_wid(wid_pairs, gcube_token=None, lang=DEFAULT_LANG, api=DEFAULT_REL_API): ''' Get the semantic relatedness among pairs of entities. Entities are indicated by their Wikipedia ID (an integer). :param wid_pairs: either one pair or a list of pairs of Wikipedia IDs. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint. ''' return _relatedness("id", wid_pairs, gcube_token, lang, api)
python
def relatedness_wid(wid_pairs, gcube_token=None, lang=DEFAULT_LANG, api=DEFAULT_REL_API): ''' Get the semantic relatedness among pairs of entities. Entities are indicated by their Wikipedia ID (an integer). :param wid_pairs: either one pair or a list of pairs of Wikipedia IDs. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint. ''' return _relatedness("id", wid_pairs, gcube_token, lang, api)
Get the semantic relatedness among pairs of entities. Entities are indicated by their Wikipedia ID (an integer). :param wid_pairs: either one pair or a list of pairs of Wikipedia IDs. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint.
https://github.com/marcocor/tagme-python/blob/e3a2fcd5a7081b00cd7edcad5d4fc3542a7eaccb/tagme/__init__.py#L219-L228
marcocor/tagme-python
tagme/__init__.py
relatedness_title
def relatedness_title(tt_pairs, gcube_token=None, lang=DEFAULT_LANG, api=DEFAULT_REL_API): ''' Get the semantic relatedness among pairs of entities. Entities are indicated by their Wikipedia ID (an integer). :param tt_pairs: either one pair or a list of pairs of entity titles. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint. ''' return _relatedness("tt", tt_pairs, gcube_token, lang, api)
python
def relatedness_title(tt_pairs, gcube_token=None, lang=DEFAULT_LANG, api=DEFAULT_REL_API): ''' Get the semantic relatedness among pairs of entities. Entities are indicated by their Wikipedia ID (an integer). :param tt_pairs: either one pair or a list of pairs of entity titles. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint. ''' return _relatedness("tt", tt_pairs, gcube_token, lang, api)
Get the semantic relatedness among pairs of entities. Entities are indicated by their Wikipedia ID (an integer). :param tt_pairs: either one pair or a list of pairs of entity titles. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint.
https://github.com/marcocor/tagme-python/blob/e3a2fcd5a7081b00cd7edcad5d4fc3542a7eaccb/tagme/__init__.py#L231-L240
marcocor/tagme-python
tagme/__init__.py
AnnotateResponse.get_annotations
def get_annotations(self, min_rho=None): ''' Get the list of annotations found. :param min_rho: if set, only get entities with a rho-score (confidence) higher than this. ''' return (a for a in self.annotations if min_rho is None or a.score > min_rho)
python
def get_annotations(self, min_rho=None): ''' Get the list of annotations found. :param min_rho: if set, only get entities with a rho-score (confidence) higher than this. ''' return (a for a in self.annotations if min_rho is None or a.score > min_rho)
Get the list of annotations found. :param min_rho: if set, only get entities with a rho-score (confidence) higher than this.
https://github.com/marcocor/tagme-python/blob/e3a2fcd5a7081b00cd7edcad5d4fc3542a7eaccb/tagme/__init__.py#L67-L72
marcocor/tagme-python
tagme/__init__.py
MentionsResponse.get_mentions
def get_mentions(self, min_lp=None): ''' Get the list of mentions found. :param min_lp: if set, only get mentions with a link probability higher than this. ''' return (m for m in self.mentions if min_lp is None or m.linkprob > min_lp)
python
def get_mentions(self, min_lp=None): ''' Get the list of mentions found. :param min_lp: if set, only get mentions with a link probability higher than this. ''' return (m for m in self.mentions if min_lp is None or m.linkprob > min_lp)
Get the list of mentions found. :param min_lp: if set, only get mentions with a link probability higher than this.
https://github.com/marcocor/tagme-python/blob/e3a2fcd5a7081b00cd7edcad5d4fc3542a7eaccb/tagme/__init__.py#L103-L108