index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
23,223
flask_cognito_auth.cognito_auth_manager
__init__
Create the CognitoAuthManager instance. You can either pass a flask application in directly to register the extension with the flask app, or call init_app (lazy initalization) after creating the object (in a factory pattern). :param app: A flask application
def __init__(self, app=None): """ Create the CognitoAuthManager instance. You can either pass a flask application in directly to register the extension with the flask app, or call init_app (lazy initalization) after creating the object (in a factory pattern). :param app: A flask application """ if app is not None: self.init(app) self.jwt_key = None
(self, app=None)
23,224
flask_cognito_auth.cognito_auth_manager
init
Register this extension with the flask app. :param app: A flask application
def init(self, app): """ Register this extension with the flask app. :param app: A flask application """ # Save this so we can use it later in the extension if not hasattr(app, 'extensions'): # pragma: no cover app.extensions = {} app.extensions['cognito-flask-auth'] = self
(self, app)
23,225
flask_cognito_auth.decorators
callback_handler
A decorator to handle redirects from AWS Cognito login and signup. It handles and verifies and exchangs the code for tokens. This decorator also pushes the basic informations in Flask session. Basic informations are: * username * group (List of Cognito groups if any) * id * email * expires * refresh_token * access_token * roles (List of AWS Cognito Assume roles) * All SAML assertions. Use this decorator on the redirect endpoint on your application.
def callback_handler(fn): """ A decorator to handle redirects from AWS Cognito login and signup. It handles and verifies and exchangs the code for tokens. This decorator also pushes the basic informations in Flask session. Basic informations are: * username * group (List of Cognito groups if any) * id * email * expires * refresh_token * access_token * roles (List of AWS Cognito Assume roles) * All SAML assertions. Use this decorator on the redirect endpoint on your application. """ @wraps(fn) def wrapper(*args, **kwargs): auth_success = False logger.info("Login is successfull from AWS Cognito.") logger.info( "Authenticating AWS Cognito application / client, with code exchange.") csrf_token = config.state csrf_state = None if csrf_token: csrf_state = request.args.get('state') code = request.args.get('code') request_parameters = {'grant_type': 'authorization_code', 'client_id': config.client_id, 'code': code, "redirect_uri": config.redirect_uri} response = requests.post(config.jwt_code_exchange_uri, data=request_parameters, auth=HTTPBasicAuth(config.client_id, config.client_secret)) # the response: # http://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html if response.status_code == requests.codes.ok: logger.info("Code exchange is successfull.") logger.info("Validating CSRF state exchange of AWS Cognito") if csrf_state == csrf_token: auth_success = True if csrf_token: logger.info( "CSRF state validation successfull. Login is successfull for AWS Cognito") logger.info("Decode the access token from response.") verify(response.json()["access_token"]) id_token = verify( response.json()["id_token"], response.json()["access_token"]) username = None email = None provider_type = "cognito" if "identities" in id_token: logger.info( "Identities are present in authentication token. Will use that as priority.") for identity in id_token['identities']: if 'userId' in identity and not email: email = identity['userId'] if "providerType" in identity and "cognito" in provider_type: provider_type = identity["providerType"] if 'primary' in identity and identity['primary']: if 'userId' in identity: email = identity['userId'] if "providerType" in identity: provider_type = identity["providerType"] if not username: username = id_token["cognito:username"] if not email and 'email' in id_token: email = id_token["email"] groups = [] if "cognito:groups" in id_token: groups = id_token['cognito:groups'] roles = [] # Check if claim has preferred_role then set it. if 'cognito:preferred_role' in id_token: roles.append(id_token['cognito:preferred_role']) # If preferred_role is not part of claim and list of # assume roles part of claim, then set it. if 'cognito:roles' in id_token and not roles: roles = id_token['cognito:roles'] skip_tokens = ["cognito:preferred_role", "cognito:roles", "cognito:username", "cognito:groups", "email", "identities", "at_hash", "sub", "email_verified", "iss", "nonce", "aud", "token_use", "auth_time", "iat", "exp"] saml_assertions = [] for token, val in id_token.items(): if token not in skip_tokens: token_vals = val.replace("[", "") token_vals = token_vals.replace("]", "") token_vals = token_vals.split(",") vals = [] for token_val in token_vals: vals.append(token_val.strip()) saml_assertions.append({token: vals}) update_session(username=username, id=id_token["sub"], groups=groups, email=email, expires=id_token["exp"], refresh_token=response.json()["refresh_token"], access_token=response.json()["access_token"], roles=roles, provider_type=provider_type, saml_assertions=saml_assertions) if not auth_success: error_uri = config.redirect_error_uri if error_uri: resp = redirect(url_for(error_uri)) return resp else: msg = f"Something went wrong during authentication" return json.dumps({'Error': msg}), 500 return fn(*args, **kwargs) return wrapper
(fn)
23,229
flask_cognito_auth.decorators
login_handler
A decorator to redirect users to AWS Cognito login if they aren't already. If already logged in user will redirect redirect uri. Use this decorator on the login endpoint. This handle will not return to handle the respose rather redirect to redirect uri.
def login_handler(fn): """ A decorator to redirect users to AWS Cognito login if they aren't already. If already logged in user will redirect redirect uri. Use this decorator on the login endpoint. This handle will not return to handle the respose rather redirect to redirect uri. """ @wraps(fn) def wrapper(*args, **kwargs): aws_cognito_login = config.login_uri # https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html res = redirect(aws_cognito_login) logger.info("Got Cognito Login, redirecting to AWS Cognito for Auth") return res return wrapper
(fn)
23,230
flask_cognito_auth.decorators
logout_handler
A decorator to logout from AWS Cognito and return to signout uri. Use this decorator on the cognito logout endpoint. This handle will not return to handle any respose rather redirect to signout uri. This decorator also clears the basic informations from Flask session. Basic informations are: * username * group (List of Cognito groups if any) * id * email * expires * refresh_token * access_token * roles * provider_type * saml_assertions
def logout_handler(fn): """ A decorator to logout from AWS Cognito and return to signout uri. Use this decorator on the cognito logout endpoint. This handle will not return to handle any respose rather redirect to signout uri. This decorator also clears the basic informations from Flask session. Basic informations are: * username * group (List of Cognito groups if any) * id * email * expires * refresh_token * access_token * roles * provider_type * saml_assertions """ @wraps(fn) def wrapper(*args, **kwargs): update_session(username=None, id=None, groups=[], email=None, expires=None, refresh_token=None, access_token=None, roles=[], provider_type=None, saml_assertions=[]) logger.info( "AWS Cognito Login, redirecting to AWS Cognito for logout and terminating sessions") aws_cognito_logout = config.logout_uri # https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html res = redirect(aws_cognito_logout) return res return wrapper
(fn)
23,231
binary.core
convert_units
Converts between and within binary and decimal units. If no ``unit`` is specified, ``n`` is assumed to already be in bytes. If no ``to`` is specified, ``n`` will be converted to the highest unit possible. If no ``unit`` nor ``to`` is specified, the output will be binary units unless ``si`` is ``True``. If ``exact`` is ``True``. the calculations will use decimal.Decimal. Binary units conform to IEC standards, see: https://en.wikipedia.org/wiki/Binary_prefix https://en.wikipedia.org/wiki/IEC_80000-13 https://www.iso.org/standard/31898.html (paywalled) Decimal units conform to SI standards, see: https://en.wikipedia.org/wiki/International_System_of_Units :param n: The number of ``unit``\ s. :type n: ``int`` or ``float`` :param unit: The unit ``n`` represents. :type unit: one of the global constants :param to: The unit to convert to. :type to: one of the global constants :param si: Assume SI units when no ``unit`` nor ``to`` is specified. :type si: ``bool`` :param exact: Use decimal.Decimal for calculations. :type exact: ``bool`` :returns: The unit pair: a numeric quantity and the unit's string. :rtype: tuple(quantity, string)
def convert_units(n, unit=BYTE, to=None, si=False, exact=False): """Converts between and within binary and decimal units. If no ``unit`` is specified, ``n`` is assumed to already be in bytes. If no ``to`` is specified, ``n`` will be converted to the highest unit possible. If no ``unit`` nor ``to`` is specified, the output will be binary units unless ``si`` is ``True``. If ``exact`` is ``True``. the calculations will use decimal.Decimal. Binary units conform to IEC standards, see: https://en.wikipedia.org/wiki/Binary_prefix https://en.wikipedia.org/wiki/IEC_80000-13 https://www.iso.org/standard/31898.html (paywalled) Decimal units conform to SI standards, see: https://en.wikipedia.org/wiki/International_System_of_Units :param n: The number of ``unit``\ s. :type n: ``int`` or ``float`` :param unit: The unit ``n`` represents. :type unit: one of the global constants :param to: The unit to convert to. :type to: one of the global constants :param si: Assume SI units when no ``unit`` nor ``to`` is specified. :type si: ``bool`` :param exact: Use decimal.Decimal for calculations. :type exact: ``bool`` :returns: The unit pair: a numeric quantity and the unit's string. :rtype: tuple(quantity, string) """ if unit not in PREFIXES: raise ValueError('{} is not a valid binary unit.'.format(unit)) # Always work with bytes to simplify logic. n *= Decimal(unit) if exact else unit if to: try: return n / to, PREFIXES[to] except KeyError: raise ValueError('{} is not a valid binary unit.'.format(to)) if unit in BINARY_PREFIXES and not si: if n < KIBIBYTE: return n, 'B' elif n < MEBIBYTE: return n / KIBIBYTE, 'KiB' elif n < GIBIBYTE: return n / MEBIBYTE, 'MiB' elif n < TEBIBYTE: return n / GIBIBYTE, 'GiB' elif n < PEBIBYTE: return n / TEBIBYTE, 'TiB' elif n < EXBIBYTE: return n / PEBIBYTE, 'PiB' elif n < ZEBIBYTE: return n / EXBIBYTE, 'EiB' elif n < YOBIBYTE: return n / ZEBIBYTE, 'ZiB' else: return n / YOBIBYTE, 'YiB' else: if n < KILOBYTE: return n, 'B' elif n < MEGABYTE: return n / KILOBYTE, 'KB' elif n < GIGABYTE: return n / MEGABYTE, 'MB' elif n < TERABYTE: return n / GIGABYTE, 'GB' elif n < PETABYTE: return n / TERABYTE, 'TB' elif n < EXABYTE: return n / PETABYTE, 'PB' elif n < ZETTABYTE: return n / EXABYTE, 'EB' elif n < YOTTABYTE: return n / ZETTABYTE, 'ZB' else: return n / YOTTABYTE, 'YB'
(n, unit=1, to=None, si=False, exact=False)
23,235
yamlfix.services
fix_code
Fix yaml source code to correct the format. It corrects these errors: * Add --- at the beginning of the file. * Correct truthy strings: 'True' -> true, 'no' -> 'false' * Remove unnecessary apostrophes: `title: 'Why we sleep'` -> `title: Why we sleep`. Args: source_code: Source code to be corrected. config: Small set of user provided configuration options for yamlfix. Returns: Corrected source code.
def fix_code(source_code: str, config: Optional[YamlfixConfig] = None) -> str: """Fix yaml source code to correct the format. It corrects these errors: * Add --- at the beginning of the file. * Correct truthy strings: 'True' -> true, 'no' -> 'false' * Remove unnecessary apostrophes: `title: 'Why we sleep'` -> `title: Why we sleep`. Args: source_code: Source code to be corrected. config: Small set of user provided configuration options for yamlfix. Returns: Corrected source code. """ # Leave Ansible vaults unmodified if source_code.startswith("$ANSIBLE_VAULT;"): return source_code if source_code.startswith("#!"): # Skip the shebang line if present, leaving it unmodified eolpos = source_code.find("\n") + 1 shebang = source_code[:eolpos] source_code = source_code[eolpos:] else: shebang = "" if source_code.startswith("#jinja2:") or source_code.startswith("# jinja2:"): eolpos = source_code.find("\n") + 1 jinja2 = source_code[:eolpos] source_code = source_code[eolpos:] else: jinja2 = "" yaml = Yaml(config=config) fixer = SourceCodeFixer(yaml=yaml, config=config) source_code = fixer.fix(source_code=source_code) return jinja2 + shebang + source_code
(source_code: str, config: Optional[yamlfix.model.YamlfixConfig] = None) -> str
23,236
yamlfix.services
fix_files
Fix the yaml source code of a list of files. If the input is taken from stdin, it will return the fixed value. Args: files: List of files to fix. dry_run: Whether to write changes or not. config: Small set of user provided configuration options for yamlfix. Returns: A tuple with the following items: * Fixed code or None. * A bool to indicate whether at least one file has been changed.
def fix_files( # pylint: disable=too-many-branches files: Files, dry_run: Optional[bool] = None, config: Optional[YamlfixConfig] = None ) -> Union[Optional[str], Tuple[Optional[str], bool]]: # noqa: TAE002 """Fix the yaml source code of a list of files. If the input is taken from stdin, it will return the fixed value. Args: files: List of files to fix. dry_run: Whether to write changes or not. config: Small set of user provided configuration options for yamlfix. Returns: A tuple with the following items: * Fixed code or None. * A bool to indicate whether at least one file has been changed. """ changed = False if dry_run is None: warnings.warn( """ From 2023-01-12 fix_files will change the return type from `Optional[str]` to Tuple[Optional[str], bool], where the first element of the Tuple is the fixed source and the second a bool that returns whether the source has changed. For more information check https://github.com/lyz-code/yamlfix/pull/182 """, UserWarning, stacklevel=2, ) total_fixed = 0 for file_ in files: if isinstance(file_, str): with open(file_, "r", encoding="utf-8") as file_descriptor: source = file_descriptor.read() file_name = file_ else: source = file_.read() file_name = file_.name log.debug("Fixing file %s...", file_name) fixed_source = fix_code(source, config) if fixed_source != source: changed = True if dry_run: log.info("Would fix %s", file_name) else: log.info("Fixed %s", file_name) total_fixed += 1 else: log.log(15, "%s is already well formatted", file_name) if file_name == "<stdin>": if dry_run is None: return fixed_source return fixed_source, changed if fixed_source != source: if dry_run: continue if isinstance(file_, str): with open(file_, "w", encoding="utf-8") as file_descriptor: file_descriptor.write(fixed_source) else: file_.seek(0) file_.write(fixed_source) file_.truncate() log.info( "Checked %d files: %d fixed, %d left unchanged", len(files), total_fixed, len(files) - total_fixed, ) if dry_run is None: return None return None, changed
(files: Union[Tuple[_io.TextIOWrapper], List[str]], dry_run: Optional[bool] = None, config: Optional[yamlfix.model.YamlfixConfig] = None) -> Union[str, NoneType, Tuple[Optional[str], bool]]
23,239
cov_core
Central
Implementation for centralised operation.
class Central(CovController): """Implementation for centralised operation.""" def start(self): """Erase any previous coverage data and start coverage.""" self.cov = coverage.coverage(source=self.cov_source, data_file=self.cov_data_file, config_file=self.cov_config) self.cov.erase() self.cov.start() self.set_env() def finish(self): """Stop coverage, save data to file and set the list of coverage objects to report on.""" self.unset_env() self.cov.stop() self.cov.combine() self.cov.save() node_desc = self.get_node_desc(sys.platform, sys.version_info) self.node_descs.add(node_desc) def summary(self, stream): """Produce coverage reports.""" CovController.summary(self, stream)
(cov_source, cov_report, cov_config, config=None, nodeid=None)
23,240
cov_core
__init__
Get some common config used by multiple derived classes.
def __init__(self, cov_source, cov_report, cov_config, config=None, nodeid=None): """Get some common config used by multiple derived classes.""" self.cov_source = cov_source self.cov_report = cov_report self.cov_config = cov_config self.config = config self.nodeid = nodeid self.cov = None self.node_descs = set() self.failed_slaves = [] self.topdir = os.getcwd() self.cov_data_file = '.coverage'
(self, cov_source, cov_report, cov_config, config=None, nodeid=None)
23,241
cov_core
finish
Stop coverage, save data to file and set the list of coverage objects to report on.
def finish(self): """Stop coverage, save data to file and set the list of coverage objects to report on.""" self.unset_env() self.cov.stop() self.cov.combine() self.cov.save() node_desc = self.get_node_desc(sys.platform, sys.version_info) self.node_descs.add(node_desc)
(self)
23,242
cov_core
get_node_desc
Return a description of this node.
@staticmethod def get_node_desc(platform, version_info): """Return a description of this node.""" return 'platform %s, python %s' % (platform, '%s.%s.%s-%s-%s' % version_info[:5])
(platform, version_info)
23,243
cov_core
sep
null
@staticmethod def sep(stream, s, txt): if hasattr(stream, 'sep'): stream.sep(s, txt) else: sep_total = max((70 - 2 - len(txt)), 2) sep_len = sep_total // 2 sep_extra = sep_total % 2 out = '%s %s %s\n' % (s * sep_len, txt, s * (sep_len + sep_extra)) stream.write(out)
(stream, s, txt)
23,244
cov_core
set_env
Put info about coverage into the env so that subprocesses can activate coverage.
def set_env(self): """Put info about coverage into the env so that subprocesses can activate coverage.""" if self.cov_source is None: os.environ['COV_CORE_SOURCE'] = '' else: os.environ['COV_CORE_SOURCE'] = UNIQUE_SEP.join(self.cov_source) os.environ['COV_CORE_DATA_FILE'] = self.cov_data_file os.environ['COV_CORE_CONFIG'] = self.cov_config
(self)
23,245
cov_core
start
Erase any previous coverage data and start coverage.
def start(self): """Erase any previous coverage data and start coverage.""" self.cov = coverage.coverage(source=self.cov_source, data_file=self.cov_data_file, config_file=self.cov_config) self.cov.erase() self.cov.start() self.set_env()
(self)
23,246
cov_core
summary
Produce coverage reports.
def summary(self, stream): """Produce coverage reports.""" CovController.summary(self, stream)
(self, stream)
23,247
cov_core
unset_env
Remove coverage info from env.
@staticmethod def unset_env(): """Remove coverage info from env.""" os.environ.pop('COV_CORE_SOURCE', None) os.environ.pop('COV_CORE_DATA_FILE', None) os.environ.pop('COV_CORE_CONFIG', None)
()
23,248
cov_core
CovController
Base class for different plugin implementations.
class CovController(object): """Base class for different plugin implementations.""" def __init__(self, cov_source, cov_report, cov_config, config=None, nodeid=None): """Get some common config used by multiple derived classes.""" self.cov_source = cov_source self.cov_report = cov_report self.cov_config = cov_config self.config = config self.nodeid = nodeid self.cov = None self.node_descs = set() self.failed_slaves = [] self.topdir = os.getcwd() self.cov_data_file = '.coverage' def set_env(self): """Put info about coverage into the env so that subprocesses can activate coverage.""" if self.cov_source is None: os.environ['COV_CORE_SOURCE'] = '' else: os.environ['COV_CORE_SOURCE'] = UNIQUE_SEP.join(self.cov_source) os.environ['COV_CORE_DATA_FILE'] = self.cov_data_file os.environ['COV_CORE_CONFIG'] = self.cov_config @staticmethod def unset_env(): """Remove coverage info from env.""" os.environ.pop('COV_CORE_SOURCE', None) os.environ.pop('COV_CORE_DATA_FILE', None) os.environ.pop('COV_CORE_CONFIG', None) @staticmethod def get_node_desc(platform, version_info): """Return a description of this node.""" return 'platform %s, python %s' % (platform, '%s.%s.%s-%s-%s' % version_info[:5]) @staticmethod def sep(stream, s, txt): if hasattr(stream, 'sep'): stream.sep(s, txt) else: sep_total = max((70 - 2 - len(txt)), 2) sep_len = sep_total // 2 sep_extra = sep_total % 2 out = '%s %s %s\n' % (s * sep_len, txt, s * (sep_len + sep_extra)) stream.write(out) def summary(self, stream): """Produce coverage reports.""" # Output coverage section header. if len(self.node_descs) == 1: self.sep(stream, '-', 'coverage: %s' % ''.join(self.node_descs)) else: self.sep(stream, '-', 'coverage') for node_desc in sorted(self.node_descs): self.sep(stream, ' ', '%s' % node_desc) # Produce terminal report if wanted. if 'term' in self.cov_report or 'term-missing' in self.cov_report: show_missing = 'term-missing' in self.cov_report self.cov.report(show_missing=show_missing, ignore_errors=True, file=stream) # Produce annotated source code report if wanted. if 'annotate' in self.cov_report: self.cov.annotate(ignore_errors=True) stream.write('Coverage annotated source written next to source\n') # Produce html report if wanted. if 'html' in self.cov_report: self.cov.html_report(ignore_errors=True) stream.write('Coverage HTML written to dir %s\n' % self.cov.config.html_dir) # Produce xml report if wanted. if 'xml' in self.cov_report: self.cov.xml_report(ignore_errors=True) stream.write('Coverage XML written to file %s\n' % self.cov.config.xml_output) # Report on any failed slaves. if self.failed_slaves: self.sep(stream, '-', 'coverage: failed slaves') stream.write('The following slaves failed to return coverage data, ' 'ensure that pytest-cov is installed on these slaves.\n') for node in self.failed_slaves: stream.write('%s\n' % node.gateway.id)
(cov_source, cov_report, cov_config, config=None, nodeid=None)
23,253
cov_core
summary
Produce coverage reports.
def summary(self, stream): """Produce coverage reports.""" # Output coverage section header. if len(self.node_descs) == 1: self.sep(stream, '-', 'coverage: %s' % ''.join(self.node_descs)) else: self.sep(stream, '-', 'coverage') for node_desc in sorted(self.node_descs): self.sep(stream, ' ', '%s' % node_desc) # Produce terminal report if wanted. if 'term' in self.cov_report or 'term-missing' in self.cov_report: show_missing = 'term-missing' in self.cov_report self.cov.report(show_missing=show_missing, ignore_errors=True, file=stream) # Produce annotated source code report if wanted. if 'annotate' in self.cov_report: self.cov.annotate(ignore_errors=True) stream.write('Coverage annotated source written next to source\n') # Produce html report if wanted. if 'html' in self.cov_report: self.cov.html_report(ignore_errors=True) stream.write('Coverage HTML written to dir %s\n' % self.cov.config.html_dir) # Produce xml report if wanted. if 'xml' in self.cov_report: self.cov.xml_report(ignore_errors=True) stream.write('Coverage XML written to file %s\n' % self.cov.config.xml_output) # Report on any failed slaves. if self.failed_slaves: self.sep(stream, '-', 'coverage: failed slaves') stream.write('The following slaves failed to return coverage data, ' 'ensure that pytest-cov is installed on these slaves.\n') for node in self.failed_slaves: stream.write('%s\n' % node.gateway.id)
(self, stream)
23,255
cov_core
DistMaster
Implementation for distributed master.
class DistMaster(CovController): """Implementation for distributed master.""" def start(self): """Ensure coverage rc file rsynced if appropriate.""" if self.cov_config and os.path.exists(self.cov_config): self.config.option.rsyncdir.append(self.cov_config) self.cov = coverage.coverage(source=self.cov_source, data_file=self.cov_data_file, config_file=self.cov_config) self.cov.erase() self.cov.start() self.cov.config.paths['source'] = [self.topdir] def configure_node(self, node): """Slaves need to know if they are collocated and what files have moved.""" node.slaveinput['cov_master_host'] = socket.gethostname() node.slaveinput['cov_master_topdir'] = self.topdir node.slaveinput['cov_master_rsync_roots'] = [str(root) for root in node.nodemanager.roots] def testnodedown(self, node, error): """Collect data file name from slave. Also save data to file if slave not collocated.""" # If slave doesn't return any data then it is likely that this # plugin didn't get activated on the slave side. if not (hasattr(node, 'slaveoutput') and 'cov_slave_node_id' in node.slaveoutput): self.failed_slaves.append(node) return # If slave is not collocated then we must save the data file # that it returns to us. if 'cov_slave_lines' in node.slaveoutput: cov = coverage.coverage(source=self.cov_source, data_file=self.cov_data_file, data_suffix=node.slaveoutput['cov_slave_node_id'], config_file=self.cov_config) cov.start() cov.data.lines = node.slaveoutput['cov_slave_lines'] cov.data.arcs = node.slaveoutput['cov_slave_arcs'] cov.stop() cov.save() path = node.slaveoutput['cov_slave_path'] self.cov.config.paths['source'].append(path) # Record the slave types that contribute to the data file. rinfo = node.gateway._rinfo() node_desc = self.get_node_desc(rinfo.platform, rinfo.version_info) self.node_descs.add(node_desc) def finish(self): """Combines coverage data and sets the list of coverage objects to report on.""" # Combine all the suffix files into the data file. self.cov.stop() self.cov.combine() self.cov.save() def summary(self, stream): """Produce coverage reports.""" CovController.summary(self, stream)
(cov_source, cov_report, cov_config, config=None, nodeid=None)
23,257
cov_core
configure_node
Slaves need to know if they are collocated and what files have moved.
def configure_node(self, node): """Slaves need to know if they are collocated and what files have moved.""" node.slaveinput['cov_master_host'] = socket.gethostname() node.slaveinput['cov_master_topdir'] = self.topdir node.slaveinput['cov_master_rsync_roots'] = [str(root) for root in node.nodemanager.roots]
(self, node)
23,258
cov_core
finish
Combines coverage data and sets the list of coverage objects to report on.
def finish(self): """Combines coverage data and sets the list of coverage objects to report on.""" # Combine all the suffix files into the data file. self.cov.stop() self.cov.combine() self.cov.save()
(self)
23,262
cov_core
start
Ensure coverage rc file rsynced if appropriate.
def start(self): """Ensure coverage rc file rsynced if appropriate.""" if self.cov_config and os.path.exists(self.cov_config): self.config.option.rsyncdir.append(self.cov_config) self.cov = coverage.coverage(source=self.cov_source, data_file=self.cov_data_file, config_file=self.cov_config) self.cov.erase() self.cov.start() self.cov.config.paths['source'] = [self.topdir]
(self)
23,264
cov_core
testnodedown
Collect data file name from slave. Also save data to file if slave not collocated.
def testnodedown(self, node, error): """Collect data file name from slave. Also save data to file if slave not collocated.""" # If slave doesn't return any data then it is likely that this # plugin didn't get activated on the slave side. if not (hasattr(node, 'slaveoutput') and 'cov_slave_node_id' in node.slaveoutput): self.failed_slaves.append(node) return # If slave is not collocated then we must save the data file # that it returns to us. if 'cov_slave_lines' in node.slaveoutput: cov = coverage.coverage(source=self.cov_source, data_file=self.cov_data_file, data_suffix=node.slaveoutput['cov_slave_node_id'], config_file=self.cov_config) cov.start() cov.data.lines = node.slaveoutput['cov_slave_lines'] cov.data.arcs = node.slaveoutput['cov_slave_arcs'] cov.stop() cov.save() path = node.slaveoutput['cov_slave_path'] self.cov.config.paths['source'].append(path) # Record the slave types that contribute to the data file. rinfo = node.gateway._rinfo() node_desc = self.get_node_desc(rinfo.platform, rinfo.version_info) self.node_descs.add(node_desc)
(self, node, error)
23,266
cov_core
DistSlave
Implementation for distributed slaves.
class DistSlave(CovController): """Implementation for distributed slaves.""" def start(self): """Determine what data file and suffix to contribute to and start coverage.""" # Determine whether we are collocated with master. self.is_collocated = bool(socket.gethostname() == self.config.slaveinput['cov_master_host'] and self.topdir == self.config.slaveinput['cov_master_topdir']) # If we are not collocated then rewrite master paths to slave paths. if not self.is_collocated: master_topdir = self.config.slaveinput['cov_master_topdir'] slave_topdir = self.topdir self.cov_source = [source.replace(master_topdir, slave_topdir) for source in self.cov_source] self.cov_data_file = self.cov_data_file.replace(master_topdir, slave_topdir) self.cov_config = self.cov_config.replace(master_topdir, slave_topdir) # Our slave node id makes us unique from all other slaves so # adjust the data file that we contribute to and the master # will combine our data with other slaves later. self.cov_data_file += '.%s' % self.nodeid # Erase any previous data and start coverage. self.cov = coverage.coverage(source=self.cov_source, data_file=self.cov_data_file, config_file=self.cov_config) self.cov.erase() self.cov.start() self.set_env() def finish(self): """Stop coverage and send relevant info back to the master.""" self.unset_env() self.cov.stop() self.cov.combine() self.cov.save() if self.is_collocated: # If we are collocated then just inform the master of our # data file to indicate that we have finished. self.config.slaveoutput['cov_slave_node_id'] = self.nodeid else: # If we are not collocated then add the current path # and coverage data to the output so we can combine # it on the master node. # Send all the data to the master over the channel. self.config.slaveoutput['cov_slave_path'] = self.topdir self.config.slaveoutput['cov_slave_node_id'] = self.nodeid self.config.slaveoutput['cov_slave_lines'] = self.cov.data.lines self.config.slaveoutput['cov_slave_arcs'] = self.cov.data.arcs def summary(self, stream): """Only the master reports so do nothing.""" pass
(cov_source, cov_report, cov_config, config=None, nodeid=None)
23,268
cov_core
finish
Stop coverage and send relevant info back to the master.
def finish(self): """Stop coverage and send relevant info back to the master.""" self.unset_env() self.cov.stop() self.cov.combine() self.cov.save() if self.is_collocated: # If we are collocated then just inform the master of our # data file to indicate that we have finished. self.config.slaveoutput['cov_slave_node_id'] = self.nodeid else: # If we are not collocated then add the current path # and coverage data to the output so we can combine # it on the master node. # Send all the data to the master over the channel. self.config.slaveoutput['cov_slave_path'] = self.topdir self.config.slaveoutput['cov_slave_node_id'] = self.nodeid self.config.slaveoutput['cov_slave_lines'] = self.cov.data.lines self.config.slaveoutput['cov_slave_arcs'] = self.cov.data.arcs
(self)
23,272
cov_core
start
Determine what data file and suffix to contribute to and start coverage.
def start(self): """Determine what data file and suffix to contribute to and start coverage.""" # Determine whether we are collocated with master. self.is_collocated = bool(socket.gethostname() == self.config.slaveinput['cov_master_host'] and self.topdir == self.config.slaveinput['cov_master_topdir']) # If we are not collocated then rewrite master paths to slave paths. if not self.is_collocated: master_topdir = self.config.slaveinput['cov_master_topdir'] slave_topdir = self.topdir self.cov_source = [source.replace(master_topdir, slave_topdir) for source in self.cov_source] self.cov_data_file = self.cov_data_file.replace(master_topdir, slave_topdir) self.cov_config = self.cov_config.replace(master_topdir, slave_topdir) # Our slave node id makes us unique from all other slaves so # adjust the data file that we contribute to and the master # will combine our data with other slaves later. self.cov_data_file += '.%s' % self.nodeid # Erase any previous data and start coverage. self.cov = coverage.coverage(source=self.cov_source, data_file=self.cov_data_file, config_file=self.cov_config) self.cov.erase() self.cov.start() self.set_env()
(self)
23,273
cov_core
summary
Only the master reports so do nothing.
def summary(self, stream): """Only the master reports so do nothing.""" pass
(self, stream)
23,278
cov_core
multiprocessing_finish
null
def multiprocessing_finish(cov): cov.stop() cov.save()
(cov)
23,279
cov_core
multiprocessing_start
null
def multiprocessing_start(obj): cov = cov_core_init.init() import multiprocessing.util multiprocessing.util.Finalize( None, multiprocessing_finish, args=(cov,), exitpriority=1000)
(obj)
23,283
cronex
CronExpression
null
class CronExpression(object): def __init__(self, line, epoch=DEFAULT_EPOCH, epoch_utc_offset=0): """ Instantiates a CronExpression object with an optionally defined epoch. If the epoch is defined, the UTC offset can be specified one of two ways: as the sixth element in 'epoch' or supplied in epoch_utc_offset. The epoch should be defined down to the minute sorted by descending significance. """ for key, value in SUBSTITUTIONS.items(): if line.startswith(key): line = line.replace(key, value) break fields = line.split(None, 5) if len(fields) == 5: fields.append('') minutes, hours, dom, months, dow, self.comment = fields dow = dow.replace('7', '0').replace('?', '*') dom = dom.replace('?', '*') for monthstr, monthnum in MONTH_NAMES: months = months.upper().replace(monthstr, str(monthnum)) for dowstr, downum in DAY_NAMES: dow = dow.upper().replace(dowstr, str(downum)) self.string_tab = [minutes, hours, dom, months, dow] self.compute_numtab() if len(epoch) == 5: y, mo, d, h, m = epoch self.epoch = (y, mo, d, h, m, epoch_utc_offset) else: self.epoch = epoch def __repr__(self): base = self.__class__.__name__ + "(%s)" cron_line = self.string_tab + [str(self.comment)] if not self.comment: cron_line.pop() arguments = '"' + ' '.join(cron_line) + '"' if self.epoch != DEFAULT_EPOCH: return base % (arguments + ", epoch=" + repr(self.epoch)) else: return base % arguments def __str__(self): return repr(self) def compute_numtab(self): """ Recomputes the sets for the static ranges of the trigger time. This method should only be called by the user if the string_tab member is modified. """ self.numerical_tab = [] for field_str, span in zip(self.string_tab, FIELD_RANGES): split_field_str = field_str.split(',') if len(split_field_str) > 1 and "*" in split_field_str: raise ValueError("\"*\" must be alone in a field.") unified = set() for cron_atom in split_field_str: # parse_atom only handles static cases if not(is_special_atom(cron_atom, span)): unified.update(parse_atom(cron_atom, span)) self.numerical_tab.append(unified) if self.string_tab[2] == "*" and self.string_tab[4] != "*": self.numerical_tab[2] = set() elif self.string_tab[4] == "*" and self.string_tab[2] != "*": self.numerical_tab[4] = set() def check_trigger(self, date_tuple, utc_offset=0): """ Returns boolean indicating if the trigger is active at the given time. The date tuple should be in the local time. Unless periodicities are used, utc_offset does not need to be specified. If periodicities are used, specifically in the hour and minutes fields, it is crucial that the utc_offset is specified. """ year, month, day, hour, mins = date_tuple given_date = datetime.date(year, month, day) zeroday = datetime.date(*self.epoch[:3]) last_dom = calendar.monthrange(year, month)[-1] dom_matched = True # In calendar and datetime.date.weekday, Monday = 0 given_dow = (datetime.date.weekday(given_date) + 1) % 7 first_dow = (given_dow + 1 - day) % 7 # Figure out how much time has passed from the epoch to the given date utc_diff = utc_offset - self.epoch[5] mod_delta_yrs = year - self.epoch[0] mod_delta_mon = month - self.epoch[1] + mod_delta_yrs * 12 mod_delta_day = (given_date - zeroday).days mod_delta_hrs = hour - self.epoch[3] + mod_delta_day * 24 + utc_diff mod_delta_min = mins - self.epoch[4] + mod_delta_hrs * 60 # Makes iterating through like components easier. quintuple = zip( (mins, hour, day, month, given_dow), self.numerical_tab, self.string_tab, (mod_delta_min, mod_delta_hrs, mod_delta_day, mod_delta_mon, mod_delta_day), FIELD_RANGES) for value, valid_values, field_str, delta_t, field_type in quintuple: # All valid, static values for the fields are stored in sets if value in valid_values: continue # The following for loop implements the logic for context # sensitive and epoch sensitive constraints. break statements, # which are executed when a match is found, lead to a continue # in the outer loop. If there are no matches found, the given date # does not match expression constraints, so the function returns # False as seen at the end of this for...else... construct. for cron_atom in field_str.split(','): if cron_atom[0] == '%': if not(delta_t % int(cron_atom[1:])): break elif '#' in cron_atom: D, N = int(cron_atom[0]), int(cron_atom[2]) # Computes Nth occurence of D day of the week if (((D - first_dow) % 7) + 1 + 7 * (N - 1)) == day: break elif cron_atom[-1] == 'W': target = min(int(cron_atom[:-1]), last_dom) lands_on = (first_dow + target - 1) % 7 if lands_on == 0: # Shift from Sun. to Mon. unless Mon. is next month if target < last_dom: target += 1 else: target -= 2 elif lands_on == 6: # Shift from Sat. to Fri. unless Fri. in prior month if target > 1: target -= 1 else: target += 2 # Break if the day is correct, and target is a weekday if target == day and (first_dow + target) % 7 > 1: break elif cron_atom[-1] == 'L': # In dom field, L means the last day of the month target = last_dom if field_type == DAYS_OF_WEEK: # Calculates the last occurence of given day of week desired_dow = int(cron_atom[:-1]) target = (((desired_dow - first_dow) % 7) + 29) if target > last_dom: target -= 7 if target == day: break else: # See 2010.11.15 of CHANGELOG if field_type == DAYS_OF_MONTH and self.string_tab[4] != '*': dom_matched = False continue elif field_type == DAYS_OF_WEEK and self.string_tab[2] != '*': # If we got here, then days of months validated so it does # not matter that days of the week failed. return dom_matched # None of the expressions matched which means this field fails return False # Arriving at this point means the date landed within the constraints # of all fields; the associated trigger should be fired. return True
(line, epoch=(1970, 1, 1, 0, 0, 0), epoch_utc_offset=0)
23,284
cronex
__init__
Instantiates a CronExpression object with an optionally defined epoch. If the epoch is defined, the UTC offset can be specified one of two ways: as the sixth element in 'epoch' or supplied in epoch_utc_offset. The epoch should be defined down to the minute sorted by descending significance.
def __init__(self, line, epoch=DEFAULT_EPOCH, epoch_utc_offset=0): """ Instantiates a CronExpression object with an optionally defined epoch. If the epoch is defined, the UTC offset can be specified one of two ways: as the sixth element in 'epoch' or supplied in epoch_utc_offset. The epoch should be defined down to the minute sorted by descending significance. """ for key, value in SUBSTITUTIONS.items(): if line.startswith(key): line = line.replace(key, value) break fields = line.split(None, 5) if len(fields) == 5: fields.append('') minutes, hours, dom, months, dow, self.comment = fields dow = dow.replace('7', '0').replace('?', '*') dom = dom.replace('?', '*') for monthstr, monthnum in MONTH_NAMES: months = months.upper().replace(monthstr, str(monthnum)) for dowstr, downum in DAY_NAMES: dow = dow.upper().replace(dowstr, str(downum)) self.string_tab = [minutes, hours, dom, months, dow] self.compute_numtab() if len(epoch) == 5: y, mo, d, h, m = epoch self.epoch = (y, mo, d, h, m, epoch_utc_offset) else: self.epoch = epoch
(self, line, epoch=(1970, 1, 1, 0, 0, 0), epoch_utc_offset=0)
23,285
cronex
__repr__
null
def __repr__(self): base = self.__class__.__name__ + "(%s)" cron_line = self.string_tab + [str(self.comment)] if not self.comment: cron_line.pop() arguments = '"' + ' '.join(cron_line) + '"' if self.epoch != DEFAULT_EPOCH: return base % (arguments + ", epoch=" + repr(self.epoch)) else: return base % arguments
(self)
23,286
cronex
__str__
null
def __str__(self): return repr(self)
(self)
23,287
cronex
check_trigger
Returns boolean indicating if the trigger is active at the given time. The date tuple should be in the local time. Unless periodicities are used, utc_offset does not need to be specified. If periodicities are used, specifically in the hour and minutes fields, it is crucial that the utc_offset is specified.
def check_trigger(self, date_tuple, utc_offset=0): """ Returns boolean indicating if the trigger is active at the given time. The date tuple should be in the local time. Unless periodicities are used, utc_offset does not need to be specified. If periodicities are used, specifically in the hour and minutes fields, it is crucial that the utc_offset is specified. """ year, month, day, hour, mins = date_tuple given_date = datetime.date(year, month, day) zeroday = datetime.date(*self.epoch[:3]) last_dom = calendar.monthrange(year, month)[-1] dom_matched = True # In calendar and datetime.date.weekday, Monday = 0 given_dow = (datetime.date.weekday(given_date) + 1) % 7 first_dow = (given_dow + 1 - day) % 7 # Figure out how much time has passed from the epoch to the given date utc_diff = utc_offset - self.epoch[5] mod_delta_yrs = year - self.epoch[0] mod_delta_mon = month - self.epoch[1] + mod_delta_yrs * 12 mod_delta_day = (given_date - zeroday).days mod_delta_hrs = hour - self.epoch[3] + mod_delta_day * 24 + utc_diff mod_delta_min = mins - self.epoch[4] + mod_delta_hrs * 60 # Makes iterating through like components easier. quintuple = zip( (mins, hour, day, month, given_dow), self.numerical_tab, self.string_tab, (mod_delta_min, mod_delta_hrs, mod_delta_day, mod_delta_mon, mod_delta_day), FIELD_RANGES) for value, valid_values, field_str, delta_t, field_type in quintuple: # All valid, static values for the fields are stored in sets if value in valid_values: continue # The following for loop implements the logic for context # sensitive and epoch sensitive constraints. break statements, # which are executed when a match is found, lead to a continue # in the outer loop. If there are no matches found, the given date # does not match expression constraints, so the function returns # False as seen at the end of this for...else... construct. for cron_atom in field_str.split(','): if cron_atom[0] == '%': if not(delta_t % int(cron_atom[1:])): break elif '#' in cron_atom: D, N = int(cron_atom[0]), int(cron_atom[2]) # Computes Nth occurence of D day of the week if (((D - first_dow) % 7) + 1 + 7 * (N - 1)) == day: break elif cron_atom[-1] == 'W': target = min(int(cron_atom[:-1]), last_dom) lands_on = (first_dow + target - 1) % 7 if lands_on == 0: # Shift from Sun. to Mon. unless Mon. is next month if target < last_dom: target += 1 else: target -= 2 elif lands_on == 6: # Shift from Sat. to Fri. unless Fri. in prior month if target > 1: target -= 1 else: target += 2 # Break if the day is correct, and target is a weekday if target == day and (first_dow + target) % 7 > 1: break elif cron_atom[-1] == 'L': # In dom field, L means the last day of the month target = last_dom if field_type == DAYS_OF_WEEK: # Calculates the last occurence of given day of week desired_dow = int(cron_atom[:-1]) target = (((desired_dow - first_dow) % 7) + 29) if target > last_dom: target -= 7 if target == day: break else: # See 2010.11.15 of CHANGELOG if field_type == DAYS_OF_MONTH and self.string_tab[4] != '*': dom_matched = False continue elif field_type == DAYS_OF_WEEK and self.string_tab[2] != '*': # If we got here, then days of months validated so it does # not matter that days of the week failed. return dom_matched # None of the expressions matched which means this field fails return False # Arriving at this point means the date landed within the constraints # of all fields; the associated trigger should be fired. return True
(self, date_tuple, utc_offset=0)
23,288
cronex
compute_numtab
Recomputes the sets for the static ranges of the trigger time. This method should only be called by the user if the string_tab member is modified.
def compute_numtab(self): """ Recomputes the sets for the static ranges of the trigger time. This method should only be called by the user if the string_tab member is modified. """ self.numerical_tab = [] for field_str, span in zip(self.string_tab, FIELD_RANGES): split_field_str = field_str.split(',') if len(split_field_str) > 1 and "*" in split_field_str: raise ValueError("\"*\" must be alone in a field.") unified = set() for cron_atom in split_field_str: # parse_atom only handles static cases if not(is_special_atom(cron_atom, span)): unified.update(parse_atom(cron_atom, span)) self.numerical_tab.append(unified) if self.string_tab[2] == "*" and self.string_tab[4] != "*": self.numerical_tab[2] = set() elif self.string_tab[4] == "*" and self.string_tab[2] != "*": self.numerical_tab[4] = set()
(self)
23,291
cronex
is_special_atom
Returns a boolean indicating whether or not the string can be parsed by parse_atom to produce a static set. In the process of examining the string, the syntax of any special character uses is also checked.
def is_special_atom(cron_atom, span): """ Returns a boolean indicating whether or not the string can be parsed by parse_atom to produce a static set. In the process of examining the string, the syntax of any special character uses is also checked. """ for special_char in ('%', '#', 'L', 'W'): if special_char not in cron_atom: continue if special_char == '#': if span != DAYS_OF_WEEK: raise ValueError("\"#\" invalid where used.") elif not VALIDATE_POUND.match(cron_atom): raise ValueError("\"#\" syntax incorrect.") elif special_char == "W": if span != DAYS_OF_MONTH: raise ValueError("\"W\" syntax incorrect.") elif not(VALIDATE_W.match(cron_atom) and int(cron_atom[:-1]) > 0): raise ValueError("Invalid use of \"W\".") elif special_char == "L": if span not in L_FIELDS: raise ValueError("\"L\" invalid where used.") elif span == DAYS_OF_MONTH: if cron_atom != "L": raise ValueError("\"L\" must be alone in days of month.") elif span == DAYS_OF_WEEK: if not VALIDATE_L_IN_DOW.match(cron_atom): raise ValueError("\"L\" syntax incorrect.") elif special_char == "%": if not(cron_atom[1:].isdigit() and int(cron_atom[1:]) > 1): raise ValueError("\"%\" syntax incorrect.") return True else: return False
(cron_atom, span)
23,292
cronex
map
null
def map(*args): return list(__builtins__['map'](*args))
(*args)
23,293
cronex
parse_atom
Returns a set containing valid values for a given cron-style range of numbers. The 'minmax' arguments is a two element iterable containing the inclusive upper and lower limits of the expression. Examples: >>> parse_atom("1-5",(0,6)) set([1, 2, 3, 4, 5]) >>> parse_atom("*/6",(0,23)) set([0, 6, 12, 18]) >>> parse_atom("18-6/4",(0,23)) set([18, 22, 0, 4]) >>> parse_atom("*/9",(0,23)) set([0, 9, 18])
def parse_atom(parse, minmax): """ Returns a set containing valid values for a given cron-style range of numbers. The 'minmax' arguments is a two element iterable containing the inclusive upper and lower limits of the expression. Examples: >>> parse_atom("1-5",(0,6)) set([1, 2, 3, 4, 5]) >>> parse_atom("*/6",(0,23)) set([0, 6, 12, 18]) >>> parse_atom("18-6/4",(0,23)) set([18, 22, 0, 4]) >>> parse_atom("*/9",(0,23)) set([0, 9, 18]) """ parse = parse.strip() increment = 1 if parse == '*': return set(xrange(minmax[0], minmax[1] + 1)) elif parse.isdigit(): # A single number still needs to be returned as a set value = int(parse) if value >= minmax[0] and value <= minmax[1]: return set((value,)) else: raise ValueError("\"%s\" is not within valid range." % parse) elif '-' in parse or '/' in parse: divide = parse.split('/') subrange = divide[0] if len(divide) == 2: # Example: 1-3/5 or */7 increment should be 5 and 7 respectively increment = int(divide[1]) if '-' in subrange: # Example: a-b prefix, suffix = [int(n) for n in subrange.split('-')] if prefix < minmax[0] or suffix > minmax[1]: raise ValueError("\"%s\" is not within valid range." % parse) elif subrange.isdigit(): # Handle offset increments e.g. 5/15 to run at :05, :20, :35, and :50 return set(xrange(int(subrange), minmax[1] + 1, increment)) elif subrange == '*': # Include all values with the given range prefix, suffix = minmax else: raise ValueError("Unrecognized symbol \"%s\"" % subrange) if prefix < suffix: # Example: 7-10 return set(xrange(prefix, suffix + 1, increment)) else: # Example: 12-4/2; (12, 12 + n, ..., 12 + m*n) U (n_0, ..., 4) noskips = list(xrange(prefix, minmax[1] + 1)) noskips += list(xrange(minmax[0], suffix + 1)) return set(noskips[::increment]) else: raise ValueError("Atom \"%s\" not in a recognized format." % parse)
(parse, minmax)
23,296
cronex
zip
null
def zip(*args): return list(__builtins__['zip'](*args))
(*args)
23,297
parutils.logging.logger
Logger
null
class Logger: def __init__( self, file_label='', force_new_logger=False, level=None, log_format=None, file_write=True, dir=None, file_format=None, ) -> None: from . import g if g.logger and g.logger.file_write and not force_new_logger: self = g.logger return self.logs = [] self.buffer = '' self.err_count = 0 self.level = level if level else const.DEFAULT_LEVEL self.log_format = log_format if log_format else const.DEFAULT_LOG_FORMAT self.file_write = file_write self.start_time = time() if not file_write: return self.file_label = file_label self.dir = dir if dir else const.DEFAULT_DIR self.file_format = file_format if file_format else const.DEFAULT_FILE_FORMAT file_base_name = datetime.now().strftime(self.file_format) if self.file_label: file_base_name += '_' + self.file_label file_name = file_base_name + '.txt' self.log_path = p.abspath(p.join(self.dir, file_name)) u.mkdirs(self.dir) with open(self.log_path, 'w', encoding='utf-8') as in_file: in_file.write('') s = (f"Log file initialised ({self.log_path})\n" f"CWD: {os.getcwd()}\n" f"Python interpreter path: {sys.executable}\n" f"Python version: {sys.version }\n" f"ParUtils version: {u.__VERSION__}\n") self.log_print(s) g.logger = self @staticmethod def close(): from . import g g.logger = None def log(self, *args, level=0, c_out=True): if self.level < level: return args = [str(e) for e in args] msg = ' '.join(args) fdate = datetime.now().strftime(self.log_format) s = f"{fdate}{msg}" self.log_print(s, c_out=c_out) def log_print(self, *args, level=0, c_out=True, nb_tab=0, dashes=0, tab_char=' '): if self.level < level: return args = [str(e) for e in args] s = ' '.join(args) if nb_tab != 0: for i in range(0, nb_tab): s = tab_char + s if dashes > 0: s = u.extend_str(s, '-', dashes) with lock: self._write_log(s, c_out) def _write_log(self, str_in, c_out): s = str(str_in) if not self.file_write: self._append_and_print(s, c_out) return try: with open(self.log_path, 'a', encoding='utf-8') as in_file: in_file.write(self.buffer + s + '\n') self.buffer = '' self.err_count = 0 except Exception as e: s = self._handle_e(str_in, e) self._append_and_print(s, c_out) def _append_and_print(self, s, c_out): u.g.logs.append(s) self.logs.append(s) if c_out: print(s) def _handle_e(self, str_in, e): s = f"Warning: the following message couldn't be logged because of {e}: {u.truncate(str_in, 256)}" self.buffer += s + '\n' self.err_count += 1 if self.err_count > const.MAX_ERR_COUNT: s += f"\nThe number of logging errors in a row reached the maximum set limit of {const.MAX_ERR_COUNT}. Disabling file_write." self.buffer += s + '\n' self.file_write = False return s
(file_label='', force_new_logger=False, level=None, log_format=None, file_write=True, dir=None, file_format=None) -> None
23,298
parutils.logging.logger
__init__
null
def __init__( self, file_label='', force_new_logger=False, level=None, log_format=None, file_write=True, dir=None, file_format=None, ) -> None: from . import g if g.logger and g.logger.file_write and not force_new_logger: self = g.logger return self.logs = [] self.buffer = '' self.err_count = 0 self.level = level if level else const.DEFAULT_LEVEL self.log_format = log_format if log_format else const.DEFAULT_LOG_FORMAT self.file_write = file_write self.start_time = time() if not file_write: return self.file_label = file_label self.dir = dir if dir else const.DEFAULT_DIR self.file_format = file_format if file_format else const.DEFAULT_FILE_FORMAT file_base_name = datetime.now().strftime(self.file_format) if self.file_label: file_base_name += '_' + self.file_label file_name = file_base_name + '.txt' self.log_path = p.abspath(p.join(self.dir, file_name)) u.mkdirs(self.dir) with open(self.log_path, 'w', encoding='utf-8') as in_file: in_file.write('') s = (f"Log file initialised ({self.log_path})\n" f"CWD: {os.getcwd()}\n" f"Python interpreter path: {sys.executable}\n" f"Python version: {sys.version }\n" f"ParUtils version: {u.__VERSION__}\n") self.log_print(s) g.logger = self
(self, file_label='', force_new_logger=False, level=None, log_format=None, file_write=True, dir=None, file_format=None) -> NoneType
23,299
parutils.logging.logger
_append_and_print
null
def _append_and_print(self, s, c_out): u.g.logs.append(s) self.logs.append(s) if c_out: print(s)
(self, s, c_out)
23,300
parutils.logging.logger
_handle_e
null
def _handle_e(self, str_in, e): s = f"Warning: the following message couldn't be logged because of {e}: {u.truncate(str_in, 256)}" self.buffer += s + '\n' self.err_count += 1 if self.err_count > const.MAX_ERR_COUNT: s += f"\nThe number of logging errors in a row reached the maximum set limit of {const.MAX_ERR_COUNT}. Disabling file_write." self.buffer += s + '\n' self.file_write = False return s
(self, str_in, e)
23,301
parutils.logging.logger
_write_log
null
def _write_log(self, str_in, c_out): s = str(str_in) if not self.file_write: self._append_and_print(s, c_out) return try: with open(self.log_path, 'a', encoding='utf-8') as in_file: in_file.write(self.buffer + s + '\n') self.buffer = '' self.err_count = 0 except Exception as e: s = self._handle_e(str_in, e) self._append_and_print(s, c_out)
(self, str_in, c_out)
23,302
parutils.logging.logger
close
null
@staticmethod def close(): from . import g g.logger = None
()
23,303
parutils.logging.logger
log
null
def log(self, *args, level=0, c_out=True): if self.level < level: return args = [str(e) for e in args] msg = ' '.join(args) fdate = datetime.now().strftime(self.log_format) s = f"{fdate}{msg}" self.log_print(s, c_out=c_out)
(self, *args, level=0, c_out=True)
23,304
parutils.logging.logger
log_print
null
def log_print(self, *args, level=0, c_out=True, nb_tab=0, dashes=0, tab_char=' '): if self.level < level: return args = [str(e) for e in args] s = ' '.join(args) if nb_tab != 0: for i in range(0, nb_tab): s = tab_char + s if dashes > 0: s = u.extend_str(s, '-', dashes) with lock: self._write_log(s, c_out)
(self, *args, level=0, c_out=True, nb_tab=0, dashes=0, tab_char=' ')
23,305
parutils.strg
big_number
Converts a potentially big number into a lisible string. Example: - big_number(10000000) returns '10 000 000'.
def big_number(int_in): """Converts a potentially big number into a lisible string. Example: - big_number(10000000) returns '10 000 000'. """ s = str(int_in) position = len(s) counter = 0 out = '' while position != 0: counter += 1 position -= 1 out = s[position] + out if counter % 3 == 0 and position != 0: out = " " + out return (out)
(int_in)
23,307
parutils.logging.cl
check_log
Checks whether the current log file contains the 'in_list' elements. If it doesn't, a warning is thrown. - log_matches: if True, the matches are printed out in the log file
def check_log(in_list=[], in_list_not=[], log_matches=False, max_warn=0, name=''): """Checks whether the current log file contains the 'in_list' elements. If it doesn't, a warning is thrown. - log_matches: if True, the matches are printed out in the log file """ s = ' ' + name if name else '' log(f'check_log{s}...') logger = get_logger() txt = load_txt(logger) n_w = 0 n_w += check(in_list, txt, logger.log_path, log_matches) n_w += check_not(in_list_not, txt, logger.log_path) check_warn(n_w, max_warn, name)
(in_list=[], in_list_not=[], log_matches=False, max_warn=0, name='')
23,308
parutils.logging.core
close_logger
null
def close_logger(): if g.logger: g.logger.close()
()
23,309
parutils.file
count_lines
Counts the number of lines of a file
def count_lines(in_path): """Counts the number of lines of a file""" with open(in_path, 'r', encoding='utf-8') as in_file: i = 0 for line in in_file: i += 1 return i
(in_path)
23,310
parutils.csvl
csv_clean
Cleans a csv field by removing csv separators and new line characters
def csv_clean(s: str): """Cleans a csv field by removing csv separators and new line characters""" out = s.replace('\r', '') out = out.replace('\n', '') out = out.replace(SEPARATOR, '') return out
(s: str)
23,311
parutils.csvl
csv_to_list
Converts a csv line to a list using SEPARATOR as separator
def csv_to_list(line_in: str): """Converts a csv line to a list using SEPARATOR as separator""" return line_in.strip('\n').split(SEPARATOR)
(line_in: str)
23,313
parutils.dq
del_dup_list
Returns in_list sorted and without duplicates
def del_dup_list(in_list): """Returns in_list sorted and without duplicates""" if not in_list: return [] # If in_list elements are hashable if isinstance(in_list[0], str): out_list = list(set(in_list)) out_list.sort() return out_list # If not in_sorted = sorted(in_list) out_list = [in_sorted[0]] old_elt = in_sorted[0] for elt in in_sorted[1:]: if elt > old_elt: out_list.append(elt) old_elt = elt return out_list
(in_list)
23,314
parutils.file
delete_folder
Deletes a folder and its content
def delete_folder(dir): """Deletes a folder and its content""" if p.exists(dir): rmtree(dir) log(f"Folder '{dir}' deleted")
(dir)
23,315
parutils.dq
diff_list
null
def diff_list(list1, list2, out_path=''): if not out_path: u.mkdirs(OUT_DIR) out_path = p.join(OUT_DIR, 'file_match_out.csv') out1 = [e for e in list1 if e not in list2] out2 = [e for e in list2 if e not in list1] out = del_dup_list(out1 + out2) u.save_list(out, out_path) u.log(f"Comparison result available here: {out_path}")
(list1, list2, out_path='')
23,317
parutils.strg
extend_str
Extends the string 'str_in' to the length 'length' with the given 'char'
def extend_str(str_in, char, length, left=False): """Extends the string 'str_in' to the length 'length' with the given 'char'""" s = str(str_in) while len(s) < length: s = char + s if left else s + char return s
(str_in, char, length, left=False)
23,319
parutils.dq
file_match
Compares two files and outputs the diff if the files don't match. Note that the files are sorted before comparison. - del_dup: if true, duplicates are deleted before comparison - err: if True, an exception is raised when the files don't match - out_path: specifies an output path for file comparison different from default
import os.path as p import parutils as u from . import wrap OUT_DIR = 'out' @wrap.simple def file_match(in1, in2, del_dup=False, err=True, out_path=''): """Compares two files and outputs the diff if the files don't match. Note that the files are sorted before comparison. - del_dup: if true, duplicates are deleted before comparison - err: if True, an exception is raised when the files don't match - out_path: specifies an output path for file comparison different from default """ s = f"Comparing files '{in1}' and '{in2}'..." u.log(s) l1, l2 = u.load_txt(in1), u.load_txt(in2) l1.sort(), l2.sort() if del_dup: l1, l2 = del_dup_list(l1), del_dup_list(l2) res = l1 == l2 s = "Files match" if res else "Files don't match" u.log(s) if not res: diff_list(l1, l2, out_path) if err: raise Exception(s)
(in1, in2, del_dup=False, err=True, out_path='')
23,320
parutils.dq
find_dup_list
Returns a list of the duplicates in in_list
def find_dup_list(in_list): """Returns a list of the duplicates in in_list""" if not in_list: return [] in_sorted = sorted(in_list) dup_list = [] old_elt = in_sorted[0] for elt in in_sorted[1:]: if elt == old_elt: dup_list.append(elt) else: old_elt = elt if dup_list: dup_list = del_dup_list(dup_list) return dup_list
(in_list)
23,322
parutils.strg
gen_random_string
Generates a random string (letters and digits) of length 'length'
def gen_random_string(length=10): """Generates a random string (letters and digits) of length 'length'""" import random import string letters = string.ascii_letters digits = string.digits ln = letters + digits out = ''.join(random.choice(ln) for i in range(length)) return out
(length=10)
23,323
parutils.csvl
get_csv_fields_dict
Returns a dictionary whose keys are the csv fields of the 'in_path' file and elements are the columns index.
def get_csv_fields_dict(in_path): """Returns a dictionary whose keys are the csv fields of the 'in_path' file and elements are the columns index. """ fields = {} line_list = get_header(in_path, True) for i, elt in enumerate(line_list): fields[elt] = i return fields
(in_path)
23,324
parutils.strg
get_duration_ms
Gives the duration in ms between 'end_time' and 'start_time'. If 'end_time' is not given, the current time is taken.
def get_duration_ms(start_time, end_time=None): """Gives the duration in ms between 'end_time' and 'start_time'. If 'end_time' is not given, the current time is taken.""" from time import time from math import floor if not end_time: end_time = time() duration = floor((end_time - start_time) * 1000) return duration
(start_time, end_time=None)
23,325
parutils.strg
get_duration_string
Outputs a string representing the time elapsed between 'end_time' and 'start_time'. If 'end_time' is not given, the current time is taken. - return_dms: if True, the duration in ms is also output: (dms, dstr). If False, only the duration string is output (dstr).
def get_duration_string(start_time, return_dms=False, end_time=None): """Outputs a string representing the time elapsed between 'end_time' and 'start_time'. If 'end_time' is not given, the current time is taken. - return_dms: if True, the duration in ms is also output: (dms, dstr). If False, only the duration string is output (dstr). """ from math import floor dms = get_duration_ms(start_time, end_time) if dms >= 1000: duration_s = dms / 1000 if duration_s > 120: duration_m = duration_s // 60 duration_s = duration_s % 60 dm = str(floor(duration_m)) ds = str(floor(duration_s)) dstr = f"{dm} minutes and {ds} seconds" else: duration_s = floor(duration_s * 10) / 10 dstr = str(duration_s) + " s" else: dstr = str(dms) + " ms" if return_dms: return (dms, dstr) return dstr
(start_time, return_dms=False, end_time=None)
23,326
parutils.logging.core
get_logger
null
def get_logger() -> Logger: if g.logger is None: logger = Logger(file_write=False) g.logger = logger else: logger = g.logger return logger
() -> parutils.logging.logger.Logger
23,327
parutils.logging.core
get_logs
null
def get_logs(): return get_logger().logs
()
23,328
parutils.strg
hash512
Contrary to hash, this hash function is not randomised, meaning it always outputs the same string for the same input string
def hash512(in_str: str, length=10): """Contrary to hash, this hash function is not randomised, meaning it always outputs the same string for the same input string""" import hashlib out = hashlib.sha512(in_str.encode('utf-8')).hexdigest()[:length] return out
(in_str: str, length=10)
23,329
parutils.logging.sl
init_sl_timer
Initialises the timer for the step_log function
def init_sl_timer(th_name='DEFAULT'): """Initialises the timer for the step_log function""" with lock: sl_time_dict[th_name] = time()
(th_name='DEFAULT')
23,330
parutils.strg
like
Behaves as the LIKE of Oracle SQL (you can match strings with wildcard character '*'). Returns the match object that you can access with the group function. Important note: If in_str is multiline and contains 'Hello World' among other characters - like(in_str, 'Hello World') returns True if exact=False - like(in_str, 'Hello World') returns False if exact=True, and like(in_str, '*Hello World*') returns a match object - like(in_str, 'Hel*ld') returns a match object If in_str is a one line string (no ) and contains 'Hello World' among other characters - like(in_str, 'Hello World') returns True if exact=False - like(in_str, 'Hello World') returns False if exact=True, and like(in_str, '*Hello World*') returns a match object - like(in_str, 'Hel*ld') returns None - like(in_str, '*Hel*ld*') returns a match object Example: - m = like('Hello World', 'He*o w*d') - m.group(0) => 'Hello World' - m.group(1) => 'll'
def like(in_str: str, like_string: str, case_sensitive=True, exact=False) -> re.Match: """Behaves as the LIKE of Oracle SQL (you can match strings with wildcard character '*'). Returns the match object that you can access with the group function. Important note: If in_str is multiline and contains 'Hello World' among other characters - like(in_str, 'Hello World') returns True if exact=False - like(in_str, 'Hello World') returns False if exact=True, and like(in_str, '*Hello World*') returns a match object - like(in_str, 'Hel*ld') returns a match object If in_str is a one line string (no \n) and contains 'Hello World' among other characters - like(in_str, 'Hello World') returns True if exact=False - like(in_str, 'Hello World') returns False if exact=True, and like(in_str, '*Hello World*') returns a match object - like(in_str, 'Hel*ld') returns None - like(in_str, '*Hel*ld*') returns a match object Example: - m = like('Hello World', 'He*o w*d') - m.group(0) => 'Hello World' - m.group(1) => 'll' """ if not case_sensitive: in_str = in_str.lower() like_string = like_string.lower() if '*' not in like_string and not exact: return like_string in in_str like_string = re.escape(like_string) like_string = like_string.replace(r'\*', '(.*)') if '\n' not in in_str: like_string = '^' + like_string + '$' m = re.search(like_string, in_str) return m
(in_str: str, like_string: str, case_sensitive=True, exact=False) -> re.Match
23,331
parutils.strg
like_dict
Returns the key whose list elt matches (using the like_list function) in_str. See the like_list function description for more details.
def like_dict(in_str, like_dict, case_sensitive=True, skey='', exact=False): """Returns the key whose list elt matches (using the like_list function) in_str. See the like_list function description for more details.""" if not isinstance(like_dict, dict): raise Exception(E_WRONG_TYPE_DICT) for key in like_dict: item = like_dict[key] if not skey else like_dict[key][skey] if isinstance(item, str) and like(in_str, item, case_sensitive, exact=exact): return key if isinstance(item, list) and like_list(in_str, item, case_sensitive, exact=exact): return key return False
(in_str, like_dict, case_sensitive=True, skey='', exact=False)
23,332
parutils.strg
like_list
Returns True if in_str matches (using the like function) one of the like_list elements. See the like function description for more details.
def like_list(in_str, like_list, case_sensitive=True, exact=False): """Returns True if in_str matches (using the like function) one of the like_list elements. See the like function description for more details.""" if not isinstance(like_list, list): raise Exception(E_WRONG_TYPE_LIST) for elt in like_list: if like(in_str, elt, case_sensitive, exact=exact): return elt return False
(in_str, like_list, case_sensitive=True, exact=False)
23,333
parutils.file
list_files
Lists the files of the 'in_dir' directory - incl_root: if True, the root directory is included in each paths - walk: if True, the files of all the subdirectories are listed as well - only_list: list of wanted patterns. e.g. ['*.py'] (only these patterns will be output) - ignore_list: list of unwanted patterns. e.g. ['*.pyc'] (these patterns won't be output)
def list_files(in_dir, walk=False, incl_root=True, abspath=False, only_list=[], ignore_list=[]): """Lists the files of the 'in_dir' directory - incl_root: if True, the root directory is included in each paths - walk: if True, the files of all the subdirectories are listed as well - only_list: list of wanted patterns. e.g. ['*.py'] (only these patterns will be output) - ignore_list: list of unwanted patterns. e.g. ['*.pyc'] (these patterns won't be output) """ if not p.exists(in_dir): return [] out = [] for root, dir, files in os.walk(in_dir): for file in files: cur_path = file if not incl_root else p.join(root, file) cur_path = p.abspath(cur_path) if abspath else cur_path only = not only_list or like_list(file, only_list, case_sensitive=False) ignore = not like_list(file, ignore_list, case_sensitive=False) if only and ignore: out.append(cur_path) if not walk: break out.sort() return out
(in_dir, walk=False, incl_root=True, abspath=False, only_list=[], ignore_list=[])
23,334
parutils.msc
list_to_dict
Transforms 'list_in' to a dictionary using the 'separator'
def list_to_dict(list_in: List[str], separator='='): """Transforms 'list_in' to a dictionary using the 'separator'""" out = {} for elt in list_in: e = elt.split(separator) key = e[0].strip() value = elt[elt.find(separator) + 1:].strip() out[key] = value return out
(list_in: List[str], separator='=')
23,335
parutils.csvl
load_csv
Loads a csv file and returns a list whose elements correspond to the lines of the 'in_path' file. Each element is also a list whose elements correspond to the csv fields. When quote is True, the buitin csv package is used and separators between quote char are ignored (less performant).
def load_csv(in_path, quote=False): """Loads a csv file and returns a list whose elements correspond to the lines of the 'in_path' file. Each element is also a list whose elements correspond to the csv fields. When quote is True, the buitin csv package is used and separators between quote char are ignored (less performant). """ out_list = [] with open(in_path, 'r', encoding='utf-8') as in_file: if quote: reader = csv.reader(in_file, delimiter=SEPARATOR, doublequote='"') for row in reader: out_list.append(row) else: for line in in_file: line_list = csv_to_list(line) out_list.append(line_list) return out_list
(in_path, quote=False)
23,336
parutils.file
load_txt
Loads a text file - list_out: if True, a list es output, each element representing a line a the file. If False, a string is output.
def load_txt(in_path, list_out=True): """Loads a text file - list_out: if True, a list es output, each element representing a line a the file. If False, a string is output. """ if list_out: out = [] else: out = '' with open(in_path, 'r', encoding='utf-8') as in_file: for line in in_file: if list_out: out.append(line.strip('\n')) else: out += line return out
(in_path, list_out=True)
23,337
parutils.logging.main
log
Logs 'str_in' in the current log file (log_path) - level: log level. Current log level is the attribute level of the current logger. You can get the current loger by using the get_logger function. Nothing is logged if logger level < level - c_out: specifies if something should be printed in the console or not
def log_input(str_in): """Same as input but traced in the log file""" log_print(str_in, c_out=False) command = input(str_in + '\n') log_print(command, c_out=False) return command
(*args, level=0, c_out=True)
23,338
parutils.logging.main
log_array
null
def log_array(array, nb_tab=0, tab_char=' '): for elt in array: log_print(elt, nb_tab=nb_tab, tab_char=tab_char)
(array, nb_tab=0, tab_char=' ')
23,339
parutils.logging.main
log_dict
null
def log_dict(d, nb_tab=0, depth=0, tab_char=' '): for key in d: if isinstance(d[key], dict) and depth > 0: log_print(f'{key}:', nb_tab=nb_tab, tab_char=tab_char) log_dict(d[key], nb_tab + 1, depth - 1, tab_char=tab_char) else: log_print(f'{key}: {d[key]}', nb_tab=nb_tab, tab_char=tab_char)
(d, nb_tab=0, depth=0, tab_char=' ')
23,340
parutils.logging.main
log_example
null
def log_example(list_in, what="duplicates", n_print=5): if not list_in: return log_print(f"Examples of {what} (limited to {n_print}):") log_array(list_in[:n_print])
(list_in, what='duplicates', n_print=5)
23,341
parutils.logging.main
log_input
Same as input but traced in the log file
def log_input(str_in): """Same as input but traced in the log file""" log_print(str_in, c_out=False) command = input(str_in + '\n') log_print(command, c_out=False) return command
(str_in)
23,342
parutils.logging.main
log_print
Prints something in the current log file (log_path) - level: log level. Current log level is the attribute level of the current logger. You can get the current loger by using the get_logger function. Nothing is logged if logger level < level - c_out: specifies if something should be printed in the console or not - nb_tab: number of tab indentations - dashes: total length of the input string extended with dashes ('-')
def log_input(str_in): """Same as input but traced in the log file""" log_print(str_in, c_out=False) command = input(str_in + '\n') log_print(command, c_out=False) return command
(*args, level=0, c_out=True, nb_tab=0, dashes=0, tab_char=' ')
23,344
parutils.file
mkdirs
Same as os.makedirs but - Input can also be a path - With a 'delete' option which (if True) deletes the folder if it already exists.
def mkdirs(dir, delete=False): """Same as os.makedirs but - Input can also be a path - With a 'delete' option which (if True) deletes the folder if it already exists.""" if not dir: return if p.exists(dir) and not delete: return if p.exists(dir) and delete: delete_folder(dir) os.makedirs(dir) log(f"Folder '{dir}' created")
(dir, delete=False)
23,346
parutils.msc
replace_from_dict
Replaces the variables (delimited by '@@') in 'str_in' with the values of 'dict_in'. Example: - replace_from_dict('Hello @@VAR@@', {'VAR': 'world'}) returns 'Hello world'
def replace_from_dict(str_in: str, dict_in, var_del='@@'): """Replaces the variables (delimited by '@@') in 'str_in' with the values of 'dict_in'. Example: - replace_from_dict('Hello @@VAR@@', {'VAR': 'world'}) returns 'Hello world' """ for key in dict_in: str_in = str_in.replace(var_del + key + var_del, str(dict_in[key])) return str_in
(str_in: str, dict_in, var_del='@@')
23,347
parutils.csvl
save_csv
Saves a list to a csv file - mode: mode for the open methode. - quote: determines whether each field should be wrapped into double quotes or not (default=False)
def save_csv(array_in, out_path, mode='w', quote=False): """Saves a list to a csv file - mode: mode for the open methode. - quote: determines whether each field should be wrapped into double quotes or not (default=False) """ import os.path as p file.mkdirs(p.dirname(out_path)) with open(out_path, mode, encoding='utf-8') as out_file: for row in array_in: write_csv_line(row, out_file, quote)
(array_in, out_path, mode='w', quote=False)
23,348
parutils.file
save_list
Saves a list in a file, each element representing a line
def save_list(in_list, out_path, mode='w'): """Saves a list in a file, each element representing a line""" mkdirs(p.dirname(out_path)) with open(out_path, mode, encoding='utf-8') as out_file: for elt in in_list: s = str(elt).strip('\n') + '\n' out_file.write(s)
(in_list, out_path, mode='w')
23,349
parutils.logging.core
set_logger
null
def set_logger(logger): g.logger = logger
(logger)
23,350
parutils.logging.sl
step_log
Logs something only when the 'counter' is a multiple of 'step' - Initialise timer with init_sl_timer() - For more info, check out the README.md file
def step_log(counter, step, what='lines written', th_name='DEFAULT', extra=''): """Logs something only when the 'counter' is a multiple of 'step' - Initialise timer with init_sl_timer() - For more info, check out the README.md file """ if counter % step != 0: return False # Avoids error if sl time has not been initialised st = get_logger().start_time if th_name not in sl_time_dict else sl_time_dict[th_name] dstr = strg.get_duration_string(st) bn_1 = strg.big_number(step) bn_2 = strg.big_number(counter) s = "{bn1} {what} in {dstr}. {bn2} {what} in total{extra}." msg = s.format(bn1=bn_1, bn2=bn_2, dstr=dstr, what=what, extra=extra) log(msg) init_sl_timer(th_name) return True
(counter, step, what='lines written', th_name='DEFAULT', extra='')
23,353
parutils.strg
truncate
null
def truncate(s, length=100): out = s[0:length - 3] + '...' if len(s) > length else s return out
(s, length=100)
23,354
parutils.testing
ttry
null
def ttry(f, e_ref, *args, **kwargs): exception_occured = False try: f(*args, **kwargs) except Exception as e: exception_occured = True if like(str(e), e_ref): log(f"[ttry] Exception caught match expected ('{e_ref}')") else: s = f"[ttry] Exception caught ('{str(e)}') don't match expected ('{e_ref}')" log(s) raise Exception(s) if not exception_occured: s = "[ttry] No exception was caught" log(s) raise Exception(s)
(f, e_ref, *args, **kwargs)
23,355
parutils.logging.core
update_logs
null
def update_logs(logs): from parutils.file import save_list logger = get_logger() logger.logs = logs save_list(logs + [''], logger.log_path)
(logs)
23,357
parutils.csvl
write_csv_line
Writes a line to a csv file - row: has to be a list
def write_csv_line(row, out_file: TextIOWrapper, quote=False): """Writes a line to a csv file - row: has to be a list """ if not isinstance(row, list): raise Exception(E_WRONG_TYPE_LIST) if quote: row = [f'"{e}"' for e in row] line_out = SEPARATOR.join(row) line_out += '\n' out_file.write(line_out)
(row, out_file: _io.TextIOWrapper, quote=False)
23,372
uvicorn.config
Config
null
class Config: def __init__( self, app: ASGIApplication | Callable[..., Any] | str, host: str = "127.0.0.1", port: int = 8000, uds: str | None = None, fd: int | None = None, loop: LoopSetupType = "auto", http: type[asyncio.Protocol] | HTTPProtocolType = "auto", ws: type[asyncio.Protocol] | WSProtocolType = "auto", ws_max_size: int = 16 * 1024 * 1024, ws_max_queue: int = 32, ws_ping_interval: float | None = 20.0, ws_ping_timeout: float | None = 20.0, ws_per_message_deflate: bool = True, lifespan: LifespanType = "auto", env_file: str | os.PathLike[str] | None = None, log_config: dict[str, Any] | str | None = LOGGING_CONFIG, log_level: str | int | None = None, access_log: bool = True, use_colors: bool | None = None, interface: InterfaceType = "auto", reload: bool = False, reload_dirs: list[str] | str | None = None, reload_delay: float = 0.25, reload_includes: list[str] | str | None = None, reload_excludes: list[str] | str | None = None, workers: int | None = None, proxy_headers: bool = True, server_header: bool = True, date_header: bool = True, forwarded_allow_ips: list[str] | str | None = None, root_path: str = "", limit_concurrency: int | None = None, limit_max_requests: int | None = None, backlog: int = 2048, timeout_keep_alive: int = 5, timeout_notify: int = 30, timeout_graceful_shutdown: int | None = None, callback_notify: Callable[..., Awaitable[None]] | None = None, ssl_keyfile: str | None = None, ssl_certfile: str | os.PathLike[str] | None = None, ssl_keyfile_password: str | None = None, ssl_version: int = SSL_PROTOCOL_VERSION, ssl_cert_reqs: int = ssl.CERT_NONE, ssl_ca_certs: str | None = None, ssl_ciphers: str = "TLSv1", headers: list[tuple[str, str]] | None = None, factory: bool = False, h11_max_incomplete_event_size: int | None = None, ): self.app = app self.host = host self.port = port self.uds = uds self.fd = fd self.loop = loop self.http = http self.ws = ws self.ws_max_size = ws_max_size self.ws_max_queue = ws_max_queue self.ws_ping_interval = ws_ping_interval self.ws_ping_timeout = ws_ping_timeout self.ws_per_message_deflate = ws_per_message_deflate self.lifespan = lifespan self.log_config = log_config self.log_level = log_level self.access_log = access_log self.use_colors = use_colors self.interface = interface self.reload = reload self.reload_delay = reload_delay self.workers = workers or 1 self.proxy_headers = proxy_headers self.server_header = server_header self.date_header = date_header self.root_path = root_path self.limit_concurrency = limit_concurrency self.limit_max_requests = limit_max_requests self.backlog = backlog self.timeout_keep_alive = timeout_keep_alive self.timeout_notify = timeout_notify self.timeout_graceful_shutdown = timeout_graceful_shutdown self.callback_notify = callback_notify self.ssl_keyfile = ssl_keyfile self.ssl_certfile = ssl_certfile self.ssl_keyfile_password = ssl_keyfile_password self.ssl_version = ssl_version self.ssl_cert_reqs = ssl_cert_reqs self.ssl_ca_certs = ssl_ca_certs self.ssl_ciphers = ssl_ciphers self.headers: list[tuple[str, str]] = headers or [] self.encoded_headers: list[tuple[bytes, bytes]] = [] self.factory = factory self.h11_max_incomplete_event_size = h11_max_incomplete_event_size self.loaded = False self.configure_logging() self.reload_dirs: list[Path] = [] self.reload_dirs_excludes: list[Path] = [] self.reload_includes: list[str] = [] self.reload_excludes: list[str] = [] if (reload_dirs or reload_includes or reload_excludes) and not self.should_reload: logger.warning( "Current configuration will not reload as not all conditions are met, " "please refer to documentation." ) if self.should_reload: reload_dirs = _normalize_dirs(reload_dirs) reload_includes = _normalize_dirs(reload_includes) reload_excludes = _normalize_dirs(reload_excludes) self.reload_includes, self.reload_dirs = resolve_reload_patterns(reload_includes, reload_dirs) self.reload_excludes, self.reload_dirs_excludes = resolve_reload_patterns(reload_excludes, []) reload_dirs_tmp = self.reload_dirs.copy() for directory in self.reload_dirs_excludes: for reload_directory in reload_dirs_tmp: if directory == reload_directory or directory in reload_directory.parents: try: self.reload_dirs.remove(reload_directory) except ValueError: pass for pattern in self.reload_excludes: if pattern in self.reload_includes: self.reload_includes.remove(pattern) if not self.reload_dirs: if reload_dirs: logger.warning( "Provided reload directories %s did not contain valid " + "directories, watching current working directory.", reload_dirs, ) self.reload_dirs = [Path(os.getcwd())] logger.info( "Will watch for changes in these directories: %s", sorted(list(map(str, self.reload_dirs))), ) if env_file is not None: from dotenv import load_dotenv logger.info("Loading environment from '%s'", env_file) load_dotenv(dotenv_path=env_file) if workers is None and "WEB_CONCURRENCY" in os.environ: self.workers = int(os.environ["WEB_CONCURRENCY"]) self.forwarded_allow_ips: list[str] | str if forwarded_allow_ips is None: self.forwarded_allow_ips = os.environ.get("FORWARDED_ALLOW_IPS", "127.0.0.1") else: self.forwarded_allow_ips = forwarded_allow_ips if self.reload and self.workers > 1: logger.warning('"workers" flag is ignored when reloading is enabled.') @property def asgi_version(self) -> Literal["2.0", "3.0"]: mapping: dict[str, Literal["2.0", "3.0"]] = { "asgi2": "2.0", "asgi3": "3.0", "wsgi": "3.0", } return mapping[self.interface] @property def is_ssl(self) -> bool: return bool(self.ssl_keyfile or self.ssl_certfile) @property def use_subprocess(self) -> bool: return bool(self.reload or self.workers > 1) def configure_logging(self) -> None: logging.addLevelName(TRACE_LOG_LEVEL, "TRACE") if self.log_config is not None: if isinstance(self.log_config, dict): if self.use_colors in (True, False): self.log_config["formatters"]["default"]["use_colors"] = self.use_colors self.log_config["formatters"]["access"]["use_colors"] = self.use_colors logging.config.dictConfig(self.log_config) elif self.log_config.endswith(".json"): with open(self.log_config) as file: loaded_config = json.load(file) logging.config.dictConfig(loaded_config) elif self.log_config.endswith((".yaml", ".yml")): # Install the PyYAML package or the uvicorn[standard] optional # dependencies to enable this functionality. import yaml with open(self.log_config) as file: loaded_config = yaml.safe_load(file) logging.config.dictConfig(loaded_config) else: # See the note about fileConfig() here: # https://docs.python.org/3/library/logging.config.html#configuration-file-format logging.config.fileConfig(self.log_config, disable_existing_loggers=False) if self.log_level is not None: if isinstance(self.log_level, str): log_level = LOG_LEVELS[self.log_level] else: log_level = self.log_level logging.getLogger("uvicorn.error").setLevel(log_level) logging.getLogger("uvicorn.access").setLevel(log_level) logging.getLogger("uvicorn.asgi").setLevel(log_level) if self.access_log is False: logging.getLogger("uvicorn.access").handlers = [] logging.getLogger("uvicorn.access").propagate = False def load(self) -> None: assert not self.loaded if self.is_ssl: assert self.ssl_certfile self.ssl: ssl.SSLContext | None = create_ssl_context( keyfile=self.ssl_keyfile, certfile=self.ssl_certfile, password=self.ssl_keyfile_password, ssl_version=self.ssl_version, cert_reqs=self.ssl_cert_reqs, ca_certs=self.ssl_ca_certs, ciphers=self.ssl_ciphers, ) else: self.ssl = None encoded_headers = [(key.lower().encode("latin1"), value.encode("latin1")) for key, value in self.headers] self.encoded_headers = ( [(b"server", b"uvicorn")] + encoded_headers if b"server" not in dict(encoded_headers) and self.server_header else encoded_headers ) if isinstance(self.http, str): http_protocol_class = import_from_string(HTTP_PROTOCOLS[self.http]) self.http_protocol_class: type[asyncio.Protocol] = http_protocol_class else: self.http_protocol_class = self.http if isinstance(self.ws, str): ws_protocol_class = import_from_string(WS_PROTOCOLS[self.ws]) self.ws_protocol_class: type[asyncio.Protocol] | None = ws_protocol_class else: self.ws_protocol_class = self.ws self.lifespan_class = import_from_string(LIFESPAN[self.lifespan]) try: self.loaded_app = import_from_string(self.app) except ImportFromStringError as exc: logger.error("Error loading ASGI app. %s" % exc) sys.exit(1) try: self.loaded_app = self.loaded_app() except TypeError as exc: if self.factory: logger.error("Error loading ASGI app factory: %s", exc) sys.exit(1) else: if not self.factory: logger.warning( "ASGI app factory detected. Using it, " "but please consider setting the --factory flag explicitly." ) if self.interface == "auto": if inspect.isclass(self.loaded_app): use_asgi_3 = hasattr(self.loaded_app, "__await__") elif inspect.isfunction(self.loaded_app): use_asgi_3 = asyncio.iscoroutinefunction(self.loaded_app) else: call = getattr(self.loaded_app, "__call__", None) use_asgi_3 = asyncio.iscoroutinefunction(call) self.interface = "asgi3" if use_asgi_3 else "asgi2" if self.interface == "wsgi": self.loaded_app = WSGIMiddleware(self.loaded_app) self.ws_protocol_class = None elif self.interface == "asgi2": self.loaded_app = ASGI2Middleware(self.loaded_app) if logger.getEffectiveLevel() <= TRACE_LOG_LEVEL: self.loaded_app = MessageLoggerMiddleware(self.loaded_app) if self.proxy_headers: self.loaded_app = ProxyHeadersMiddleware(self.loaded_app, trusted_hosts=self.forwarded_allow_ips) self.loaded = True def setup_event_loop(self) -> None: loop_setup: Callable | None = import_from_string(LOOP_SETUPS[self.loop]) if loop_setup is not None: loop_setup(use_subprocess=self.use_subprocess) def bind_socket(self) -> socket.socket: logger_args: list[str | int] if self.uds: # pragma: py-win32 path = self.uds sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: sock.bind(path) uds_perms = 0o666 os.chmod(self.uds, uds_perms) except OSError as exc: logger.error(exc) sys.exit(1) message = "Uvicorn running on unix socket %s (Press CTRL+C to quit)" sock_name_format = "%s" color_message = "Uvicorn running on " + click.style(sock_name_format, bold=True) + " (Press CTRL+C to quit)" logger_args = [self.uds] elif self.fd: # pragma: py-win32 sock = socket.fromfd(self.fd, socket.AF_UNIX, socket.SOCK_STREAM) message = "Uvicorn running on socket %s (Press CTRL+C to quit)" fd_name_format = "%s" color_message = "Uvicorn running on " + click.style(fd_name_format, bold=True) + " (Press CTRL+C to quit)" logger_args = [sock.getsockname()] else: family = socket.AF_INET addr_format = "%s://%s:%d" if self.host and ":" in self.host: # pragma: py-win32 # It's an IPv6 address. family = socket.AF_INET6 addr_format = "%s://[%s]:%d" sock = socket.socket(family=family) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: sock.bind((self.host, self.port)) except OSError as exc: logger.error(exc) sys.exit(1) message = f"Uvicorn running on {addr_format} (Press CTRL+C to quit)" color_message = "Uvicorn running on " + click.style(addr_format, bold=True) + " (Press CTRL+C to quit)" protocol_name = "https" if self.is_ssl else "http" logger_args = [protocol_name, self.host, sock.getsockname()[1]] logger.info(message, *logger_args, extra={"color_message": color_message}) sock.set_inheritable(True) return sock @property def should_reload(self) -> bool: return isinstance(self.app, str) and self.reload
(app: 'ASGIApplication | Callable[..., Any] | str', host: 'str' = '127.0.0.1', port: 'int' = 8000, uds: 'str | None' = None, fd: 'int | None' = None, loop: 'LoopSetupType' = 'auto', http: 'type[asyncio.Protocol] | HTTPProtocolType' = 'auto', ws: 'type[asyncio.Protocol] | WSProtocolType' = 'auto', ws_max_size: 'int' = 16777216, ws_max_queue: 'int' = 32, ws_ping_interval: 'float | None' = 20.0, ws_ping_timeout: 'float | None' = 20.0, ws_per_message_deflate: 'bool' = True, lifespan: 'LifespanType' = 'auto', env_file: 'str | os.PathLike[str] | None' = None, log_config: 'dict[str, Any] | str | None' = {'version': 1, 'disable_existing_loggers': False, 'formatters': {'default': {'()': 'uvicorn.logging.DefaultFormatter', 'fmt': '%(levelprefix)s %(message)s', 'use_colors': None}, 'access': {'()': 'uvicorn.logging.AccessFormatter', 'fmt': '%(levelprefix)s %(client_addr)s - "%(request_line)s" %(status_code)s'}}, 'handlers': {'default': {'formatter': 'default', 'class': 'logging.StreamHandler', 'stream': 'ext://sys.stderr'}, 'access': {'formatter': 'access', 'class': 'logging.StreamHandler', 'stream': 'ext://sys.stdout'}}, 'loggers': {'uvicorn': {'handlers': ['default'], 'level': 'INFO', 'propagate': False}, 'uvicorn.error': {'level': 'INFO'}, 'uvicorn.access': {'handlers': ['access'], 'level': 'INFO', 'propagate': False}}}, log_level: 'str | int | None' = None, access_log: 'bool' = True, use_colors: 'bool | None' = None, interface: 'InterfaceType' = 'auto', reload: 'bool' = False, reload_dirs: 'list[str] | str | None' = None, reload_delay: 'float' = 0.25, reload_includes: 'list[str] | str | None' = None, reload_excludes: 'list[str] | str | None' = None, workers: 'int | None' = None, proxy_headers: 'bool' = True, server_header: 'bool' = True, date_header: 'bool' = True, forwarded_allow_ips: 'list[str] | str | None' = None, root_path: 'str' = '', limit_concurrency: 'int | None' = None, limit_max_requests: 'int | None' = None, backlog: 'int' = 2048, timeout_keep_alive: 'int' = 5, timeout_notify: 'int' = 30, timeout_graceful_shutdown: 'int | None' = None, callback_notify: 'Callable[..., Awaitable[None]] | None' = None, ssl_keyfile: 'str | None' = None, ssl_certfile: 'str | os.PathLike[str] | None' = None, ssl_keyfile_password: 'str | None' = None, ssl_version: 'int' = <_SSLMethod.PROTOCOL_TLS_SERVER: 17>, ssl_cert_reqs: 'int' = <VerifyMode.CERT_NONE: 0>, ssl_ca_certs: 'str | None' = None, ssl_ciphers: 'str' = 'TLSv1', headers: 'list[tuple[str, str]] | None' = None, factory: 'bool' = False, h11_max_incomplete_event_size: 'int | None' = None)
23,373
uvicorn.config
__init__
null
def __init__( self, app: ASGIApplication | Callable[..., Any] | str, host: str = "127.0.0.1", port: int = 8000, uds: str | None = None, fd: int | None = None, loop: LoopSetupType = "auto", http: type[asyncio.Protocol] | HTTPProtocolType = "auto", ws: type[asyncio.Protocol] | WSProtocolType = "auto", ws_max_size: int = 16 * 1024 * 1024, ws_max_queue: int = 32, ws_ping_interval: float | None = 20.0, ws_ping_timeout: float | None = 20.0, ws_per_message_deflate: bool = True, lifespan: LifespanType = "auto", env_file: str | os.PathLike[str] | None = None, log_config: dict[str, Any] | str | None = LOGGING_CONFIG, log_level: str | int | None = None, access_log: bool = True, use_colors: bool | None = None, interface: InterfaceType = "auto", reload: bool = False, reload_dirs: list[str] | str | None = None, reload_delay: float = 0.25, reload_includes: list[str] | str | None = None, reload_excludes: list[str] | str | None = None, workers: int | None = None, proxy_headers: bool = True, server_header: bool = True, date_header: bool = True, forwarded_allow_ips: list[str] | str | None = None, root_path: str = "", limit_concurrency: int | None = None, limit_max_requests: int | None = None, backlog: int = 2048, timeout_keep_alive: int = 5, timeout_notify: int = 30, timeout_graceful_shutdown: int | None = None, callback_notify: Callable[..., Awaitable[None]] | None = None, ssl_keyfile: str | None = None, ssl_certfile: str | os.PathLike[str] | None = None, ssl_keyfile_password: str | None = None, ssl_version: int = SSL_PROTOCOL_VERSION, ssl_cert_reqs: int = ssl.CERT_NONE, ssl_ca_certs: str | None = None, ssl_ciphers: str = "TLSv1", headers: list[tuple[str, str]] | None = None, factory: bool = False, h11_max_incomplete_event_size: int | None = None, ): self.app = app self.host = host self.port = port self.uds = uds self.fd = fd self.loop = loop self.http = http self.ws = ws self.ws_max_size = ws_max_size self.ws_max_queue = ws_max_queue self.ws_ping_interval = ws_ping_interval self.ws_ping_timeout = ws_ping_timeout self.ws_per_message_deflate = ws_per_message_deflate self.lifespan = lifespan self.log_config = log_config self.log_level = log_level self.access_log = access_log self.use_colors = use_colors self.interface = interface self.reload = reload self.reload_delay = reload_delay self.workers = workers or 1 self.proxy_headers = proxy_headers self.server_header = server_header self.date_header = date_header self.root_path = root_path self.limit_concurrency = limit_concurrency self.limit_max_requests = limit_max_requests self.backlog = backlog self.timeout_keep_alive = timeout_keep_alive self.timeout_notify = timeout_notify self.timeout_graceful_shutdown = timeout_graceful_shutdown self.callback_notify = callback_notify self.ssl_keyfile = ssl_keyfile self.ssl_certfile = ssl_certfile self.ssl_keyfile_password = ssl_keyfile_password self.ssl_version = ssl_version self.ssl_cert_reqs = ssl_cert_reqs self.ssl_ca_certs = ssl_ca_certs self.ssl_ciphers = ssl_ciphers self.headers: list[tuple[str, str]] = headers or [] self.encoded_headers: list[tuple[bytes, bytes]] = [] self.factory = factory self.h11_max_incomplete_event_size = h11_max_incomplete_event_size self.loaded = False self.configure_logging() self.reload_dirs: list[Path] = [] self.reload_dirs_excludes: list[Path] = [] self.reload_includes: list[str] = [] self.reload_excludes: list[str] = [] if (reload_dirs or reload_includes or reload_excludes) and not self.should_reload: logger.warning( "Current configuration will not reload as not all conditions are met, " "please refer to documentation." ) if self.should_reload: reload_dirs = _normalize_dirs(reload_dirs) reload_includes = _normalize_dirs(reload_includes) reload_excludes = _normalize_dirs(reload_excludes) self.reload_includes, self.reload_dirs = resolve_reload_patterns(reload_includes, reload_dirs) self.reload_excludes, self.reload_dirs_excludes = resolve_reload_patterns(reload_excludes, []) reload_dirs_tmp = self.reload_dirs.copy() for directory in self.reload_dirs_excludes: for reload_directory in reload_dirs_tmp: if directory == reload_directory or directory in reload_directory.parents: try: self.reload_dirs.remove(reload_directory) except ValueError: pass for pattern in self.reload_excludes: if pattern in self.reload_includes: self.reload_includes.remove(pattern) if not self.reload_dirs: if reload_dirs: logger.warning( "Provided reload directories %s did not contain valid " + "directories, watching current working directory.", reload_dirs, ) self.reload_dirs = [Path(os.getcwd())] logger.info( "Will watch for changes in these directories: %s", sorted(list(map(str, self.reload_dirs))), ) if env_file is not None: from dotenv import load_dotenv logger.info("Loading environment from '%s'", env_file) load_dotenv(dotenv_path=env_file) if workers is None and "WEB_CONCURRENCY" in os.environ: self.workers = int(os.environ["WEB_CONCURRENCY"]) self.forwarded_allow_ips: list[str] | str if forwarded_allow_ips is None: self.forwarded_allow_ips = os.environ.get("FORWARDED_ALLOW_IPS", "127.0.0.1") else: self.forwarded_allow_ips = forwarded_allow_ips if self.reload and self.workers > 1: logger.warning('"workers" flag is ignored when reloading is enabled.')
(self, app: Union[Type[uvicorn._types.ASGI2Protocol], Callable[[Union[uvicorn._types.HTTPScope, uvicorn._types.WebSocketScope, uvicorn._types.LifespanScope], Callable[[], Awaitable[Union[uvicorn._types.HTTPRequestEvent, uvicorn._types.HTTPDisconnectEvent, uvicorn._types.WebSocketConnectEvent, uvicorn._types._WebSocketReceiveEventBytes, uvicorn._types._WebSocketReceiveEventText, uvicorn._types.WebSocketDisconnectEvent, uvicorn._types.LifespanStartupEvent, uvicorn._types.LifespanShutdownEvent]]], Callable[[Union[uvicorn._types.HTTPResponseStartEvent, uvicorn._types.HTTPResponseBodyEvent, uvicorn._types.HTTPResponseTrailersEvent, uvicorn._types.HTTPServerPushEvent, uvicorn._types.HTTPDisconnectEvent, uvicorn._types.WebSocketAcceptEvent, uvicorn._types._WebSocketSendEventBytes, uvicorn._types._WebSocketSendEventText, uvicorn._types.WebSocketResponseStartEvent, uvicorn._types.WebSocketResponseBodyEvent, uvicorn._types.WebSocketCloseEvent, uvicorn._types.LifespanStartupCompleteEvent, uvicorn._types.LifespanStartupFailedEvent, uvicorn._types.LifespanShutdownCompleteEvent, uvicorn._types.LifespanShutdownFailedEvent]], Awaitable[NoneType]]], Awaitable[NoneType]], Callable[..., Any], str], host: str = '127.0.0.1', port: int = 8000, uds: Optional[str] = None, fd: Optional[int] = None, loop: Literal['none', 'auto', 'asyncio', 'uvloop'] = 'auto', http: Union[type[asyncio.protocols.Protocol], Literal['auto', 'h11', 'httptools']] = 'auto', ws: Union[type[asyncio.protocols.Protocol], Literal['auto', 'none', 'websockets', 'wsproto']] = 'auto', ws_max_size: int = 16777216, ws_max_queue: int = 32, ws_ping_interval: float | None = 20.0, ws_ping_timeout: float | None = 20.0, ws_per_message_deflate: bool = True, lifespan: Literal['auto', 'on', 'off'] = 'auto', env_file: Union[str, os.PathLike[str], NoneType] = None, log_config: dict[str, typing.Any] | str | None = {'version': 1, 'disable_existing_loggers': False, 'formatters': {'default': {'()': 'uvicorn.logging.DefaultFormatter', 'fmt': '%(levelprefix)s %(message)s', 'use_colors': None}, 'access': {'()': 'uvicorn.logging.AccessFormatter', 'fmt': '%(levelprefix)s %(client_addr)s - "%(request_line)s" %(status_code)s'}}, 'handlers': {'default': {'formatter': 'default', 'class': 'logging.StreamHandler', 'stream': 'ext://sys.stderr'}, 'access': {'formatter': 'access', 'class': 'logging.StreamHandler', 'stream': 'ext://sys.stdout'}}, 'loggers': {'uvicorn': {'handlers': ['default'], 'level': 'INFO', 'propagate': False}, 'uvicorn.error': {'level': 'INFO'}, 'uvicorn.access': {'handlers': ['access'], 'level': 'INFO', 'propagate': False}}}, log_level: Union[str, int, NoneType] = None, access_log: bool = True, use_colors: Optional[bool] = None, interface: Literal['auto', 'asgi3', 'asgi2', 'wsgi'] = 'auto', reload: bool = False, reload_dirs: Union[list[str], str, NoneType] = None, reload_delay: float = 0.25, reload_includes: Union[list[str], str, NoneType] = None, reload_excludes: Union[list[str], str, NoneType] = None, workers: Optional[int] = None, proxy_headers: bool = True, server_header: bool = True, date_header: bool = True, forwarded_allow_ips: Union[list[str], str, NoneType] = None, root_path: str = '', limit_concurrency: Optional[int] = None, limit_max_requests: Optional[int] = None, backlog: int = 2048, timeout_keep_alive: int = 5, timeout_notify: int = 30, timeout_graceful_shutdown: Optional[int] = None, callback_notify: Optional[Callable[..., Awaitable[NoneType]]] = None, ssl_keyfile: Optional[str] = None, ssl_certfile: Union[str, os.PathLike[str], NoneType] = None, ssl_keyfile_password: Optional[str] = None, ssl_version: int = <_SSLMethod.PROTOCOL_TLS_SERVER: 17>, ssl_cert_reqs: int = <VerifyMode.CERT_NONE: 0>, ssl_ca_certs: Optional[str] = None, ssl_ciphers: str = 'TLSv1', headers: Optional[list[tuple[str, str]]] = None, factory: bool = False, h11_max_incomplete_event_size: Optional[int] = None)
23,374
uvicorn.config
bind_socket
null
def bind_socket(self) -> socket.socket: logger_args: list[str | int] if self.uds: # pragma: py-win32 path = self.uds sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: sock.bind(path) uds_perms = 0o666 os.chmod(self.uds, uds_perms) except OSError as exc: logger.error(exc) sys.exit(1) message = "Uvicorn running on unix socket %s (Press CTRL+C to quit)" sock_name_format = "%s" color_message = "Uvicorn running on " + click.style(sock_name_format, bold=True) + " (Press CTRL+C to quit)" logger_args = [self.uds] elif self.fd: # pragma: py-win32 sock = socket.fromfd(self.fd, socket.AF_UNIX, socket.SOCK_STREAM) message = "Uvicorn running on socket %s (Press CTRL+C to quit)" fd_name_format = "%s" color_message = "Uvicorn running on " + click.style(fd_name_format, bold=True) + " (Press CTRL+C to quit)" logger_args = [sock.getsockname()] else: family = socket.AF_INET addr_format = "%s://%s:%d" if self.host and ":" in self.host: # pragma: py-win32 # It's an IPv6 address. family = socket.AF_INET6 addr_format = "%s://[%s]:%d" sock = socket.socket(family=family) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: sock.bind((self.host, self.port)) except OSError as exc: logger.error(exc) sys.exit(1) message = f"Uvicorn running on {addr_format} (Press CTRL+C to quit)" color_message = "Uvicorn running on " + click.style(addr_format, bold=True) + " (Press CTRL+C to quit)" protocol_name = "https" if self.is_ssl else "http" logger_args = [protocol_name, self.host, sock.getsockname()[1]] logger.info(message, *logger_args, extra={"color_message": color_message}) sock.set_inheritable(True) return sock
(self) -> socket.socket
23,375
uvicorn.config
configure_logging
null
def configure_logging(self) -> None: logging.addLevelName(TRACE_LOG_LEVEL, "TRACE") if self.log_config is not None: if isinstance(self.log_config, dict): if self.use_colors in (True, False): self.log_config["formatters"]["default"]["use_colors"] = self.use_colors self.log_config["formatters"]["access"]["use_colors"] = self.use_colors logging.config.dictConfig(self.log_config) elif self.log_config.endswith(".json"): with open(self.log_config) as file: loaded_config = json.load(file) logging.config.dictConfig(loaded_config) elif self.log_config.endswith((".yaml", ".yml")): # Install the PyYAML package or the uvicorn[standard] optional # dependencies to enable this functionality. import yaml with open(self.log_config) as file: loaded_config = yaml.safe_load(file) logging.config.dictConfig(loaded_config) else: # See the note about fileConfig() here: # https://docs.python.org/3/library/logging.config.html#configuration-file-format logging.config.fileConfig(self.log_config, disable_existing_loggers=False) if self.log_level is not None: if isinstance(self.log_level, str): log_level = LOG_LEVELS[self.log_level] else: log_level = self.log_level logging.getLogger("uvicorn.error").setLevel(log_level) logging.getLogger("uvicorn.access").setLevel(log_level) logging.getLogger("uvicorn.asgi").setLevel(log_level) if self.access_log is False: logging.getLogger("uvicorn.access").handlers = [] logging.getLogger("uvicorn.access").propagate = False
(self) -> NoneType
23,376
uvicorn.config
load
null
def load(self) -> None: assert not self.loaded if self.is_ssl: assert self.ssl_certfile self.ssl: ssl.SSLContext | None = create_ssl_context( keyfile=self.ssl_keyfile, certfile=self.ssl_certfile, password=self.ssl_keyfile_password, ssl_version=self.ssl_version, cert_reqs=self.ssl_cert_reqs, ca_certs=self.ssl_ca_certs, ciphers=self.ssl_ciphers, ) else: self.ssl = None encoded_headers = [(key.lower().encode("latin1"), value.encode("latin1")) for key, value in self.headers] self.encoded_headers = ( [(b"server", b"uvicorn")] + encoded_headers if b"server" not in dict(encoded_headers) and self.server_header else encoded_headers ) if isinstance(self.http, str): http_protocol_class = import_from_string(HTTP_PROTOCOLS[self.http]) self.http_protocol_class: type[asyncio.Protocol] = http_protocol_class else: self.http_protocol_class = self.http if isinstance(self.ws, str): ws_protocol_class = import_from_string(WS_PROTOCOLS[self.ws]) self.ws_protocol_class: type[asyncio.Protocol] | None = ws_protocol_class else: self.ws_protocol_class = self.ws self.lifespan_class = import_from_string(LIFESPAN[self.lifespan]) try: self.loaded_app = import_from_string(self.app) except ImportFromStringError as exc: logger.error("Error loading ASGI app. %s" % exc) sys.exit(1) try: self.loaded_app = self.loaded_app() except TypeError as exc: if self.factory: logger.error("Error loading ASGI app factory: %s", exc) sys.exit(1) else: if not self.factory: logger.warning( "ASGI app factory detected. Using it, " "but please consider setting the --factory flag explicitly." ) if self.interface == "auto": if inspect.isclass(self.loaded_app): use_asgi_3 = hasattr(self.loaded_app, "__await__") elif inspect.isfunction(self.loaded_app): use_asgi_3 = asyncio.iscoroutinefunction(self.loaded_app) else: call = getattr(self.loaded_app, "__call__", None) use_asgi_3 = asyncio.iscoroutinefunction(call) self.interface = "asgi3" if use_asgi_3 else "asgi2" if self.interface == "wsgi": self.loaded_app = WSGIMiddleware(self.loaded_app) self.ws_protocol_class = None elif self.interface == "asgi2": self.loaded_app = ASGI2Middleware(self.loaded_app) if logger.getEffectiveLevel() <= TRACE_LOG_LEVEL: self.loaded_app = MessageLoggerMiddleware(self.loaded_app) if self.proxy_headers: self.loaded_app = ProxyHeadersMiddleware(self.loaded_app, trusted_hosts=self.forwarded_allow_ips) self.loaded = True
(self) -> NoneType
23,377
uvicorn.config
setup_event_loop
null
def setup_event_loop(self) -> None: loop_setup: Callable | None = import_from_string(LOOP_SETUPS[self.loop]) if loop_setup is not None: loop_setup(use_subprocess=self.use_subprocess)
(self) -> NoneType
23,378
uvicorn.server
Server
null
class Server: def __init__(self, config: Config) -> None: self.config = config self.server_state = ServerState() self.started = False self.should_exit = False self.force_exit = False self.last_notified = 0.0 self._captured_signals: list[int] = [] def run(self, sockets: list[socket.socket] | None = None) -> None: self.config.setup_event_loop() return asyncio.run(self.serve(sockets=sockets)) async def serve(self, sockets: list[socket.socket] | None = None) -> None: with self.capture_signals(): await self._serve(sockets) async def _serve(self, sockets: list[socket.socket] | None = None) -> None: process_id = os.getpid() config = self.config if not config.loaded: config.load() self.lifespan = config.lifespan_class(config) message = "Started server process [%d]" color_message = "Started server process [" + click.style("%d", fg="cyan") + "]" logger.info(message, process_id, extra={"color_message": color_message}) await self.startup(sockets=sockets) if self.should_exit: return await self.main_loop() await self.shutdown(sockets=sockets) message = "Finished server process [%d]" color_message = "Finished server process [" + click.style("%d", fg="cyan") + "]" logger.info(message, process_id, extra={"color_message": color_message}) async def startup(self, sockets: list[socket.socket] | None = None) -> None: await self.lifespan.startup() if self.lifespan.should_exit: self.should_exit = True return config = self.config def create_protocol( _loop: asyncio.AbstractEventLoop | None = None, ) -> asyncio.Protocol: return config.http_protocol_class( # type: ignore[call-arg] config=config, server_state=self.server_state, app_state=self.lifespan.state, _loop=_loop, ) loop = asyncio.get_running_loop() listeners: Sequence[socket.SocketType] if sockets is not None: # Explicitly passed a list of open sockets. # We use this when the server is run from a Gunicorn worker. def _share_socket( sock: socket.SocketType, ) -> socket.SocketType: # pragma py-linux pragma: py-darwin # Windows requires the socket be explicitly shared across # multiple workers (processes). from socket import fromshare # type: ignore[attr-defined] sock_data = sock.share(os.getpid()) # type: ignore[attr-defined] return fromshare(sock_data) self.servers: list[asyncio.base_events.Server] = [] for sock in sockets: is_windows = platform.system() == "Windows" if config.workers > 1 and is_windows: # pragma: py-not-win32 sock = _share_socket(sock) # type: ignore[assignment] server = await loop.create_server(create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog) self.servers.append(server) listeners = sockets elif config.fd is not None: # pragma: py-win32 # Use an existing socket, from a file descriptor. sock = socket.fromfd(config.fd, socket.AF_UNIX, socket.SOCK_STREAM) server = await loop.create_server(create_protocol, sock=sock, ssl=config.ssl, backlog=config.backlog) assert server.sockets is not None # mypy listeners = server.sockets self.servers = [server] elif config.uds is not None: # pragma: py-win32 # Create a socket using UNIX domain socket. uds_perms = 0o666 if os.path.exists(config.uds): uds_perms = os.stat(config.uds).st_mode server = await loop.create_unix_server( create_protocol, path=config.uds, ssl=config.ssl, backlog=config.backlog ) os.chmod(config.uds, uds_perms) assert server.sockets is not None # mypy listeners = server.sockets self.servers = [server] else: # Standard case. Create a socket from a host/port pair. try: server = await loop.create_server( create_protocol, host=config.host, port=config.port, ssl=config.ssl, backlog=config.backlog, ) except OSError as exc: logger.error(exc) await self.lifespan.shutdown() sys.exit(1) assert server.sockets is not None listeners = server.sockets self.servers = [server] if sockets is None: self._log_started_message(listeners) else: # We're most likely running multiple workers, so a message has already been # logged by `config.bind_socket()`. pass self.started = True def _log_started_message(self, listeners: Sequence[socket.SocketType]) -> None: config = self.config if config.fd is not None: # pragma: py-win32 sock = listeners[0] logger.info( "Uvicorn running on socket %s (Press CTRL+C to quit)", sock.getsockname(), ) elif config.uds is not None: # pragma: py-win32 logger.info("Uvicorn running on unix socket %s (Press CTRL+C to quit)", config.uds) else: addr_format = "%s://%s:%d" host = "0.0.0.0" if config.host is None else config.host if ":" in host: # It's an IPv6 address. addr_format = "%s://[%s]:%d" port = config.port if port == 0: port = listeners[0].getsockname()[1] protocol_name = "https" if config.ssl else "http" message = f"Uvicorn running on {addr_format} (Press CTRL+C to quit)" color_message = "Uvicorn running on " + click.style(addr_format, bold=True) + " (Press CTRL+C to quit)" logger.info( message, protocol_name, host, port, extra={"color_message": color_message}, ) async def main_loop(self) -> None: counter = 0 should_exit = await self.on_tick(counter) while not should_exit: counter += 1 counter = counter % 864000 await asyncio.sleep(0.1) should_exit = await self.on_tick(counter) async def on_tick(self, counter: int) -> bool: # Update the default headers, once per second. if counter % 10 == 0: current_time = time.time() current_date = formatdate(current_time, usegmt=True).encode() if self.config.date_header: date_header = [(b"date", current_date)] else: date_header = [] self.server_state.default_headers = date_header + self.config.encoded_headers # Callback to `callback_notify` once every `timeout_notify` seconds. if self.config.callback_notify is not None: if current_time - self.last_notified > self.config.timeout_notify: self.last_notified = current_time await self.config.callback_notify() # Determine if we should exit. if self.should_exit: return True if self.config.limit_max_requests is not None: return self.server_state.total_requests >= self.config.limit_max_requests return False async def shutdown(self, sockets: list[socket.socket] | None = None) -> None: logger.info("Shutting down") # Stop accepting new connections. for server in self.servers: server.close() for sock in sockets or []: sock.close() # Request shutdown on all existing connections. for connection in list(self.server_state.connections): connection.shutdown() await asyncio.sleep(0.1) # When 3.10 is not supported anymore, use `async with asyncio.timeout(...):`. try: await asyncio.wait_for( self._wait_tasks_to_complete(), timeout=self.config.timeout_graceful_shutdown, ) except asyncio.TimeoutError: logger.error( "Cancel %s running task(s), timeout graceful shutdown exceeded", len(self.server_state.tasks), ) for t in self.server_state.tasks: if sys.version_info < (3, 9): # pragma: py-gte-39 t.cancel() else: # pragma: py-lt-39 t.cancel(msg="Task cancelled, timeout graceful shutdown exceeded") # Send the lifespan shutdown event, and wait for application shutdown. if not self.force_exit: await self.lifespan.shutdown() async def _wait_tasks_to_complete(self) -> None: # Wait for existing connections to finish sending responses. if self.server_state.connections and not self.force_exit: msg = "Waiting for connections to close. (CTRL+C to force quit)" logger.info(msg) while self.server_state.connections and not self.force_exit: await asyncio.sleep(0.1) # Wait for existing tasks to complete. if self.server_state.tasks and not self.force_exit: msg = "Waiting for background tasks to complete. (CTRL+C to force quit)" logger.info(msg) while self.server_state.tasks and not self.force_exit: await asyncio.sleep(0.1) for server in self.servers: await server.wait_closed() @contextlib.contextmanager def capture_signals(self) -> Generator[None, None, None]: # Signals can only be listened to from the main thread. if threading.current_thread() is not threading.main_thread(): yield return # always use signal.signal, even if loop.add_signal_handler is available # this allows to restore previous signal handlers later on original_handlers = {sig: signal.signal(sig, self.handle_exit) for sig in HANDLED_SIGNALS} try: yield finally: for sig, handler in original_handlers.items(): signal.signal(sig, handler) # If we did gracefully shut down due to a signal, try to # trigger the expected behaviour now; multiple signals would be # done LIFO, see https://stackoverflow.com/questions/48434964 for captured_signal in reversed(self._captured_signals): signal.raise_signal(captured_signal) def handle_exit(self, sig: int, frame: FrameType | None) -> None: self._captured_signals.append(sig) if self.should_exit and sig == signal.SIGINT: self.force_exit = True else: self.should_exit = True
(config: 'Config') -> 'None'
23,379
uvicorn.server
__init__
null
def __init__(self, config: Config) -> None: self.config = config self.server_state = ServerState() self.started = False self.should_exit = False self.force_exit = False self.last_notified = 0.0 self._captured_signals: list[int] = []
(self, config: uvicorn.config.Config) -> NoneType
23,380
uvicorn.server
_log_started_message
null
def _log_started_message(self, listeners: Sequence[socket.SocketType]) -> None: config = self.config if config.fd is not None: # pragma: py-win32 sock = listeners[0] logger.info( "Uvicorn running on socket %s (Press CTRL+C to quit)", sock.getsockname(), ) elif config.uds is not None: # pragma: py-win32 logger.info("Uvicorn running on unix socket %s (Press CTRL+C to quit)", config.uds) else: addr_format = "%s://%s:%d" host = "0.0.0.0" if config.host is None else config.host if ":" in host: # It's an IPv6 address. addr_format = "%s://[%s]:%d" port = config.port if port == 0: port = listeners[0].getsockname()[1] protocol_name = "https" if config.ssl else "http" message = f"Uvicorn running on {addr_format} (Press CTRL+C to quit)" color_message = "Uvicorn running on " + click.style(addr_format, bold=True) + " (Press CTRL+C to quit)" logger.info( message, protocol_name, host, port, extra={"color_message": color_message}, )
(self, listeners: Sequence[_socket.socket]) -> NoneType