repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
riga/law
law/util.py
human_time_diff
def human_time_diff(*args, **kwargs): """ Returns a human readable time difference. The largest unit is days. All *args* and *kwargs* are passed to ``datetime.timedelta``. Example: .. code-block:: python human_time_diff(seconds=1233) # -> "20 minutes, 33 seconds" human_time_diff(seconds=90001) # -> "1 day, 1 hour, 1 second" """ secs = float(datetime.timedelta(*args, **kwargs).total_seconds()) parts = [] for unit, mul in time_units: if secs / mul >= 1 or mul == 1: if mul > 1: n = int(math.floor(secs / mul)) secs -= n * mul else: n = round(secs, 1) parts.append("{} {}{}".format(n, unit, "" if n == 1 else "s")) return ", ".join(parts)
python
def human_time_diff(*args, **kwargs): """ Returns a human readable time difference. The largest unit is days. All *args* and *kwargs* are passed to ``datetime.timedelta``. Example: .. code-block:: python human_time_diff(seconds=1233) # -> "20 minutes, 33 seconds" human_time_diff(seconds=90001) # -> "1 day, 1 hour, 1 second" """ secs = float(datetime.timedelta(*args, **kwargs).total_seconds()) parts = [] for unit, mul in time_units: if secs / mul >= 1 or mul == 1: if mul > 1: n = int(math.floor(secs / mul)) secs -= n * mul else: n = round(secs, 1) parts.append("{} {}{}".format(n, unit, "" if n == 1 else "s")) return ", ".join(parts)
Returns a human readable time difference. The largest unit is days. All *args* and *kwargs* are passed to ``datetime.timedelta``. Example: .. code-block:: python human_time_diff(seconds=1233) # -> "20 minutes, 33 seconds" human_time_diff(seconds=90001) # -> "1 day, 1 hour, 1 second"
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L701-L724
riga/law
law/util.py
is_file_exists_error
def is_file_exists_error(e): """ Returns whether the exception *e* was raised due to an already existing file or directory. """ if six.PY3: return isinstance(e, FileExistsError) # noqa: F821 else: return isinstance(e, OSError) and e.errno == 17
python
def is_file_exists_error(e): """ Returns whether the exception *e* was raised due to an already existing file or directory. """ if six.PY3: return isinstance(e, FileExistsError) # noqa: F821 else: return isinstance(e, OSError) and e.errno == 17
Returns whether the exception *e* was raised due to an already existing file or directory.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L727-L734
riga/law
law/util.py
send_mail
def send_mail(recipient, sender, subject="", content="", smtp_host="127.0.0.1", smtp_port=25): """ Lightweight mail functionality. Sends an mail from *sender* to *recipient* with *subject* and *content*. *smtp_host* and *smtp_port* are forwarded to the ``smtplib.SMTP`` constructor. *True* is returned on success, *False* otherwise. """ try: server = smtplib.SMTP(smtp_host, smtp_port) except Exception as e: logger = logging.getLogger(__name__) logger.warning("cannot create SMTP server: {}".format(e)) return False header = "From: {}\r\nTo: {}\r\nSubject: {}\r\n\r\n".format(sender, recipient, subject) server.sendmail(sender, recipient, header + content) return True
python
def send_mail(recipient, sender, subject="", content="", smtp_host="127.0.0.1", smtp_port=25): """ Lightweight mail functionality. Sends an mail from *sender* to *recipient* with *subject* and *content*. *smtp_host* and *smtp_port* are forwarded to the ``smtplib.SMTP`` constructor. *True* is returned on success, *False* otherwise. """ try: server = smtplib.SMTP(smtp_host, smtp_port) except Exception as e: logger = logging.getLogger(__name__) logger.warning("cannot create SMTP server: {}".format(e)) return False header = "From: {}\r\nTo: {}\r\nSubject: {}\r\n\r\n".format(sender, recipient, subject) server.sendmail(sender, recipient, header + content) return True
Lightweight mail functionality. Sends an mail from *sender* to *recipient* with *subject* and *content*. *smtp_host* and *smtp_port* are forwarded to the ``smtplib.SMTP`` constructor. *True* is returned on success, *False* otherwise.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L746-L762
riga/law
law/util.py
open_compat
def open_compat(*args, **kwargs): """ Polyfill for python's ``open`` factory, returning the plain ``open`` in python 3, and ``io.open`` in python 2 with a patched ``write`` method that internally handles unicode conversion of its first argument. All *args* and *kwargs* are forwarded. """ if six.PY3: return open(*args, **kwargs) else: f = io.open(*args, **kwargs) if f.encoding and f.encoding.lower().replace("-", "") == "utf8": write_orig = f.write def write(data, *args, **kwargs): u = unicode # noqa: F821 if not isinstance(data, u): data = u(data) return write_orig(data, *args, **kwargs) f.write = write return f
python
def open_compat(*args, **kwargs): """ Polyfill for python's ``open`` factory, returning the plain ``open`` in python 3, and ``io.open`` in python 2 with a patched ``write`` method that internally handles unicode conversion of its first argument. All *args* and *kwargs* are forwarded. """ if six.PY3: return open(*args, **kwargs) else: f = io.open(*args, **kwargs) if f.encoding and f.encoding.lower().replace("-", "") == "utf8": write_orig = f.write def write(data, *args, **kwargs): u = unicode # noqa: F821 if not isinstance(data, u): data = u(data) return write_orig(data, *args, **kwargs) f.write = write return f
Polyfill for python's ``open`` factory, returning the plain ``open`` in python 3, and ``io.open`` in python 2 with a patched ``write`` method that internally handles unicode conversion of its first argument. All *args* and *kwargs* are forwarded.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L823-L846
riga/law
law/util.py
patch_object
def patch_object(obj, attr, value): """ Context manager that temporarily patches an object *obj* by replacing its attribute *attr* with *value*. The original value is set again when the context is closed. """ orig = getattr(obj, attr, no_value) try: setattr(obj, attr, value) yield obj finally: try: if orig is no_value: delattr(obj, attr) else: setattr(obj, attr, orig) except: pass
python
def patch_object(obj, attr, value): """ Context manager that temporarily patches an object *obj* by replacing its attribute *attr* with *value*. The original value is set again when the context is closed. """ orig = getattr(obj, attr, no_value) try: setattr(obj, attr, value) yield obj finally: try: if orig is no_value: delattr(obj, attr) else: setattr(obj, attr, orig) except: pass
Context manager that temporarily patches an object *obj* by replacing its attribute *attr* with *value*. The original value is set again when the context is closed.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L850-L868
riga/law
law/util.py
TeeStream._flush
def _flush(self): """ Flushes all registered consumer streams. """ for consumer in self.consumers: if not getattr(consumer, "closed", False): consumer.flush()
python
def _flush(self): """ Flushes all registered consumer streams. """ for consumer in self.consumers: if not getattr(consumer, "closed", False): consumer.flush()
Flushes all registered consumer streams.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L950-L956
riga/law
law/util.py
TeeStream._write
def _write(self, *args, **kwargs): """ Writes to all registered consumer streams, passing *args* and *kwargs*. """ for consumer in self.consumers: consumer.write(*args, **kwargs)
python
def _write(self, *args, **kwargs): """ Writes to all registered consumer streams, passing *args* and *kwargs*. """ for consumer in self.consumers: consumer.write(*args, **kwargs)
Writes to all registered consumer streams, passing *args* and *kwargs*.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L958-L963
riga/law
law/util.py
FilteredStream._write
def _write(self, *args, **kwargs): """ Writes to the consumer stream when *filter_fn* evaluates to *True*, passing *args* and *kwargs*. """ if self.filter_fn(*args, **kwargs): self.stream.write(*args, **kwargs)
python
def _write(self, *args, **kwargs): """ Writes to the consumer stream when *filter_fn* evaluates to *True*, passing *args* and *kwargs*. """ if self.filter_fn(*args, **kwargs): self.stream.write(*args, **kwargs)
Writes to the consumer stream when *filter_fn* evaluates to *True*, passing *args* and *kwargs*.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/util.py#L991-L997
riga/law
law/config.py
Config.get_default
def get_default(self, section, option, default=None, type=None, expandvars=False, expanduser=False): """ Returns the config value defined by *section* and *option*. When either the section or the option does not exist, the *default* value is returned instead. When *type* is set, it must be either `"str"`, `"int"`, `"float"`, or `"boolean"`. When *expandvars* is *True*, environment variables are expanded. When *expanduser* is *True*, user variables are expanded as well. """ if self.has_section(section) and self.has_option(section, option): value = self.get(section, option) if isinstance(value, six.string_types): if expandvars: value = os.path.expandvars(value) if expanduser: value = os.path.expanduser(value) return value if not type else self._get_type_converter(type)(value) else: return default
python
def get_default(self, section, option, default=None, type=None, expandvars=False, expanduser=False): """ Returns the config value defined by *section* and *option*. When either the section or the option does not exist, the *default* value is returned instead. When *type* is set, it must be either `"str"`, `"int"`, `"float"`, or `"boolean"`. When *expandvars* is *True*, environment variables are expanded. When *expanduser* is *True*, user variables are expanded as well. """ if self.has_section(section) and self.has_option(section, option): value = self.get(section, option) if isinstance(value, six.string_types): if expandvars: value = os.path.expandvars(value) if expanduser: value = os.path.expanduser(value) return value if not type else self._get_type_converter(type)(value) else: return default
Returns the config value defined by *section* and *option*. When either the section or the option does not exist, the *default* value is returned instead. When *type* is set, it must be either `"str"`, `"int"`, `"float"`, or `"boolean"`. When *expandvars* is *True*, environment variables are expanded. When *expanduser* is *True*, user variables are expanded as well.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/config.py#L198-L216
riga/law
law/config.py
Config.get_expanded
def get_expanded(self, *args, **kwargs): """ Same as :py:meth:`get_default`, but *expandvars* and *expanduser* arguments are set to *True* by default. """ kwargs.setdefault("expandvars", True) kwargs.setdefault("expanduser", True) return self.get_default(*args, **kwargs)
python
def get_expanded(self, *args, **kwargs): """ Same as :py:meth:`get_default`, but *expandvars* and *expanduser* arguments are set to *True* by default. """ kwargs.setdefault("expandvars", True) kwargs.setdefault("expanduser", True) return self.get_default(*args, **kwargs)
Same as :py:meth:`get_default`, but *expandvars* and *expanduser* arguments are set to *True* by default.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/config.py#L218-L225
riga/law
law/config.py
Config.update
def update(self, data, overwrite=None, overwrite_sections=True, overwrite_options=True): """ Updates the currently stored configuration with new *data*, given as a dictionary. When *overwrite_sections* is *False*, sections in *data* that are already present in the current config are skipped. When *overwrite_options* is *False*, existing options are not overwritten. When *overwrite* is not *None*, both *overwrite_sections* and *overwrite_options* are set to its value. """ if overwrite is not None: overwrite_sections = overwrite overwrite_options = overwrite for section, _data in six.iteritems(data): if not self.has_section(section): self.add_section(section) elif not overwrite_sections: continue for option, value in six.iteritems(_data): if overwrite_options or not self.has_option(section, option): self.set(section, option, str(value))
python
def update(self, data, overwrite=None, overwrite_sections=True, overwrite_options=True): """ Updates the currently stored configuration with new *data*, given as a dictionary. When *overwrite_sections* is *False*, sections in *data* that are already present in the current config are skipped. When *overwrite_options* is *False*, existing options are not overwritten. When *overwrite* is not *None*, both *overwrite_sections* and *overwrite_options* are set to its value. """ if overwrite is not None: overwrite_sections = overwrite overwrite_options = overwrite for section, _data in six.iteritems(data): if not self.has_section(section): self.add_section(section) elif not overwrite_sections: continue for option, value in six.iteritems(_data): if overwrite_options or not self.has_option(section, option): self.set(section, option, str(value))
Updates the currently stored configuration with new *data*, given as a dictionary. When *overwrite_sections* is *False*, sections in *data* that are already present in the current config are skipped. When *overwrite_options* is *False*, existing options are not overwritten. When *overwrite* is not *None*, both *overwrite_sections* and *overwrite_options* are set to its value.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/config.py#L227-L247
riga/law
law/config.py
Config.include
def include(self, filename, *args, **kwargs): """ Updates the current configc with the config found in *filename*. All *args* and *kwargs* are forwarded to :py:meth:`update`. """ p = self.__class__(filename, skip_defaults=True, skip_fallbacks=True) self.update(p._sections, *args, **kwargs)
python
def include(self, filename, *args, **kwargs): """ Updates the current configc with the config found in *filename*. All *args* and *kwargs* are forwarded to :py:meth:`update`. """ p = self.__class__(filename, skip_defaults=True, skip_fallbacks=True) self.update(p._sections, *args, **kwargs)
Updates the current configc with the config found in *filename*. All *args* and *kwargs* are forwarded to :py:meth:`update`.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/config.py#L249-L255
riga/law
law/config.py
Config.keys
def keys(self, section, prefix=None): """ Returns all keys of a *section* in a list. When *prefix* is set, only keys starting with that prefix are returned """ return [key for key, _ in self.items(section) if (not prefix or key.startswith(prefix))]
python
def keys(self, section, prefix=None): """ Returns all keys of a *section* in a list. When *prefix* is set, only keys starting with that prefix are returned """ return [key for key, _ in self.items(section) if (not prefix or key.startswith(prefix))]
Returns all keys of a *section* in a list. When *prefix* is set, only keys starting with that prefix are returned
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/config.py#L257-L262
riga/law
law/config.py
Config.sync_luigi_config
def sync_luigi_config(self, push=True, pull=True, expand=True): """ Synchronizes sections starting with ``"luigi_"`` with the luigi configuration parser. First, when *push* is *True*, options that exist in law but **not** in luigi are stored as defaults in the luigi config. Then, when *pull* is *True*, all luigi-related options in the law config are overwritten with those from luigi. This way, options set via luigi defaults (environment variables, global configuration files, `LUIGI_CONFIG_PATH`) always have precendence. When *expand* is *True*, environment variables are expanded before pushing them to the luigi config. """ prefix = "luigi_" lparser = luigi.configuration.LuigiConfigParser.instance() if push: for section in self.sections(): if not section.startswith(prefix): continue lsection = section[len(prefix):] if not lparser.has_section(lsection): lparser.add_section(lsection) for option in self.options(section): if not lparser.has_option(lsection, option): if expand: value = self.get_expanded(section, option) else: value = self.get(section, option) lparser.set(lsection, option, value) if pull: for lsection in lparser.sections(): section = prefix + lsection if not self.has_section(section): self.add_section(section) for option, value in lparser.items(lsection): self.set(section, option, value)
python
def sync_luigi_config(self, push=True, pull=True, expand=True): """ Synchronizes sections starting with ``"luigi_"`` with the luigi configuration parser. First, when *push* is *True*, options that exist in law but **not** in luigi are stored as defaults in the luigi config. Then, when *pull* is *True*, all luigi-related options in the law config are overwritten with those from luigi. This way, options set via luigi defaults (environment variables, global configuration files, `LUIGI_CONFIG_PATH`) always have precendence. When *expand* is *True*, environment variables are expanded before pushing them to the luigi config. """ prefix = "luigi_" lparser = luigi.configuration.LuigiConfigParser.instance() if push: for section in self.sections(): if not section.startswith(prefix): continue lsection = section[len(prefix):] if not lparser.has_section(lsection): lparser.add_section(lsection) for option in self.options(section): if not lparser.has_option(lsection, option): if expand: value = self.get_expanded(section, option) else: value = self.get(section, option) lparser.set(lsection, option, value) if pull: for lsection in lparser.sections(): section = prefix + lsection if not self.has_section(section): self.add_section(section) for option, value in lparser.items(lsection): self.set(section, option, value)
Synchronizes sections starting with ``"luigi_"`` with the luigi configuration parser. First, when *push* is *True*, options that exist in law but **not** in luigi are stored as defaults in the luigi config. Then, when *pull* is *True*, all luigi-related options in the law config are overwritten with those from luigi. This way, options set via luigi defaults (environment variables, global configuration files, `LUIGI_CONFIG_PATH`) always have precendence. When *expand* is *True*, environment variables are expanded before pushing them to the luigi config.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/config.py#L264-L302
riga/law
law/contrib/telegram/notification.py
notify_telegram
def notify_telegram(title, content, token=None, chat=None, mention_user=None, **kwargs): """ Sends a telegram notification and returns *True* on success. The communication with the telegram API might have some delays and is therefore handled by a thread. """ # test import import telegram # noqa: F401 cfg = Config.instance() # get default token and chat if not token: token = cfg.get_expanded("notifications", "telegram_token") if not chat: chat = cfg.get_expanded("notifications", "telegram_chat") if not token or not chat: logger.warning("cannot send Telegram notification, token ({}) or chat ({}) empty".format( token, chat)) return False # append the user to mention to the title # unless explicitly set to empty string mention_text = "" if mention_user is None: mention_user = cfg.get_expanded("notifications", "telegram_mention_user") if mention_user: mention_text = " (@{})".format(mention_user) # request data for the API call request = { "parse_mode": "Markdown", } # standard or attachment content? if isinstance(content, six.string_types): request["text"] = "{}{}\n\n{}".format(title, mention_text, content) else: # content is a dict, add some formatting request["text"] = "{}{}\n\n".format(title, mention_text) for key, value in content.items(): request["text"] += "_{}_: {}\n".format(key, value) # extend by arbitrary kwargs request.update(kwargs) # threaded, non-blocking API communication thread = threading.Thread(target=_notify_telegram, args=(token, chat, request)) thread.start() return True
python
def notify_telegram(title, content, token=None, chat=None, mention_user=None, **kwargs): """ Sends a telegram notification and returns *True* on success. The communication with the telegram API might have some delays and is therefore handled by a thread. """ # test import import telegram # noqa: F401 cfg = Config.instance() # get default token and chat if not token: token = cfg.get_expanded("notifications", "telegram_token") if not chat: chat = cfg.get_expanded("notifications", "telegram_chat") if not token or not chat: logger.warning("cannot send Telegram notification, token ({}) or chat ({}) empty".format( token, chat)) return False # append the user to mention to the title # unless explicitly set to empty string mention_text = "" if mention_user is None: mention_user = cfg.get_expanded("notifications", "telegram_mention_user") if mention_user: mention_text = " (@{})".format(mention_user) # request data for the API call request = { "parse_mode": "Markdown", } # standard or attachment content? if isinstance(content, six.string_types): request["text"] = "{}{}\n\n{}".format(title, mention_text, content) else: # content is a dict, add some formatting request["text"] = "{}{}\n\n".format(title, mention_text) for key, value in content.items(): request["text"] += "_{}_: {}\n".format(key, value) # extend by arbitrary kwargs request.update(kwargs) # threaded, non-blocking API communication thread = threading.Thread(target=_notify_telegram, args=(token, chat, request)) thread.start() return True
Sends a telegram notification and returns *True* on success. The communication with the telegram API might have some delays and is therefore handled by a thread.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/contrib/telegram/notification.py#L19-L70
riga/law
law/workflow/local.py
LocalWorkflowProxy.complete
def complete(self): """ When *local_workflow_require_branches* of the task was set to *True*, returns whether the :py:meth:`run` method has been called before. Otherwise, the call is forwarded to the super class. """ if self.task.local_workflow_require_branches: return self._has_run else: return super(LocalWorkflowProxy, self).complete()
python
def complete(self): """ When *local_workflow_require_branches* of the task was set to *True*, returns whether the :py:meth:`run` method has been called before. Otherwise, the call is forwarded to the super class. """ if self.task.local_workflow_require_branches: return self._has_run else: return super(LocalWorkflowProxy, self).complete()
When *local_workflow_require_branches* of the task was set to *True*, returns whether the :py:meth:`run` method has been called before. Otherwise, the call is forwarded to the super class.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/local.py#L29-L38
riga/law
law/workflow/local.py
LocalWorkflowProxy.run
def run(self): """ When *local_workflow_require_branches* of the task was set to *False*, starts all branch tasks via dynamic dependencies by yielding them in a list, or simply does nothing otherwise. """ if not self._has_yielded and not self.task.local_workflow_require_branches: self._has_yielded = True yield list(self.task.get_branch_tasks().values()) self._has_run = True
python
def run(self): """ When *local_workflow_require_branches* of the task was set to *False*, starts all branch tasks via dynamic dependencies by yielding them in a list, or simply does nothing otherwise. """ if not self._has_yielded and not self.task.local_workflow_require_branches: self._has_yielded = True yield list(self.task.get_branch_tasks().values()) self._has_run = True
When *local_workflow_require_branches* of the task was set to *False*, starts all branch tasks via dynamic dependencies by yielding them in a list, or simply does nothing otherwise.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/local.py#L48-L58
riga/law
law/decorator.py
factory
def factory(**default_opts): """ Factory function to create decorators for tasks' run methods. Default options for the decorator function can be given in *default_opts*. The returned decorator can be used with or without function invocation. Example: .. code-block:: python @factory(digits=2) def runtime(fn, opts, task, *args, **kwargs): t0 = time.time() try: return fn(task, *args, **kwargs) finally: t1 = time.time() diff = round(t1 - t0, opts["digits"]) print("runtime:") print(diff) ... class MyTask(law.Task): @runtime def run(self): ... # or @runtime(digits=3): def run(self): ... .. note:: Decorators might not have the expected behavior when used to decorate generator functions such as ``Task.run()`` methods that yield dynamic dependencies. """ def wrapper(decorator): @functools.wraps(decorator) def wrapper(fn=None, **opts): _opts = default_opts.copy() _opts.update(opts) def wrapper(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): return decorator(fn, _opts, *args, **kwargs) return wrapper return wrapper if fn is None else wrapper(fn) return wrapper return wrapper
python
def factory(**default_opts): """ Factory function to create decorators for tasks' run methods. Default options for the decorator function can be given in *default_opts*. The returned decorator can be used with or without function invocation. Example: .. code-block:: python @factory(digits=2) def runtime(fn, opts, task, *args, **kwargs): t0 = time.time() try: return fn(task, *args, **kwargs) finally: t1 = time.time() diff = round(t1 - t0, opts["digits"]) print("runtime:") print(diff) ... class MyTask(law.Task): @runtime def run(self): ... # or @runtime(digits=3): def run(self): ... .. note:: Decorators might not have the expected behavior when used to decorate generator functions such as ``Task.run()`` methods that yield dynamic dependencies. """ def wrapper(decorator): @functools.wraps(decorator) def wrapper(fn=None, **opts): _opts = default_opts.copy() _opts.update(opts) def wrapper(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): return decorator(fn, _opts, *args, **kwargs) return wrapper return wrapper if fn is None else wrapper(fn) return wrapper return wrapper
Factory function to create decorators for tasks' run methods. Default options for the decorator function can be given in *default_opts*. The returned decorator can be used with or without function invocation. Example: .. code-block:: python @factory(digits=2) def runtime(fn, opts, task, *args, **kwargs): t0 = time.time() try: return fn(task, *args, **kwargs) finally: t1 = time.time() diff = round(t1 - t0, opts["digits"]) print("runtime:") print(diff) ... class MyTask(law.Task): @runtime def run(self): ... # or @runtime(digits=3): def run(self): ... .. note:: Decorators might not have the expected behavior when used to decorate generator functions such as ``Task.run()`` methods that yield dynamic dependencies.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/decorator.py#L46-L97
riga/law
law/decorator.py
log
def log(fn, opts, task, *args, **kwargs): """ log() Wraps a bound method of a task and redirects output of both stdout and stderr to the file defined by the tasks's *log_file* parameter or *default_log_file* attribute. If its value is ``"-"`` or *None*, the output is not redirected. """ _task = get_task(task) log = get_param(_task.log_file, _task.default_log_file) if log == "-" or not log: return fn(task, *args, **kwargs) else: # use the local target functionality to create the parent directory LocalFileTarget(log).parent.touch() with open_compat(log, "a", 1) as f: tee = TeeStream(f, sys.__stdout__) sys.stdout = tee sys.stderr = tee try: ret = fn(task, *args, **kwargs) except: traceback.print_exc(file=tee) raise finally: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ tee.flush() return ret
python
def log(fn, opts, task, *args, **kwargs): """ log() Wraps a bound method of a task and redirects output of both stdout and stderr to the file defined by the tasks's *log_file* parameter or *default_log_file* attribute. If its value is ``"-"`` or *None*, the output is not redirected. """ _task = get_task(task) log = get_param(_task.log_file, _task.default_log_file) if log == "-" or not log: return fn(task, *args, **kwargs) else: # use the local target functionality to create the parent directory LocalFileTarget(log).parent.touch() with open_compat(log, "a", 1) as f: tee = TeeStream(f, sys.__stdout__) sys.stdout = tee sys.stderr = tee try: ret = fn(task, *args, **kwargs) except: traceback.print_exc(file=tee) raise finally: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ tee.flush() return ret
log() Wraps a bound method of a task and redirects output of both stdout and stderr to the file defined by the tasks's *log_file* parameter or *default_log_file* attribute. If its value is ``"-"`` or *None*, the output is not redirected.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/decorator.py#L105-L132
riga/law
law/decorator.py
safe_output
def safe_output(fn, opts, task, *args, **kwargs): """ safe_output(skip=None) Wraps a bound method of a task and guards its execution. If an exception occurs, and it is not an instance of *skip*, the task's output is removed prior to the actual raising. """ try: return fn(task, *args, **kwargs) except Exception as e: if opts["skip"] is None or not isinstance(e, opts["skip"]): for outp in luigi.task.flatten(task.output()): outp.remove() raise
python
def safe_output(fn, opts, task, *args, **kwargs): """ safe_output(skip=None) Wraps a bound method of a task and guards its execution. If an exception occurs, and it is not an instance of *skip*, the task's output is removed prior to the actual raising. """ try: return fn(task, *args, **kwargs) except Exception as e: if opts["skip"] is None or not isinstance(e, opts["skip"]): for outp in luigi.task.flatten(task.output()): outp.remove() raise
safe_output(skip=None) Wraps a bound method of a task and guards its execution. If an exception occurs, and it is not an instance of *skip*, the task's output is removed prior to the actual raising.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/decorator.py#L136-L147
riga/law
law/decorator.py
delay
def delay(fn, opts, task, *args, **kwargs): """ delay(t=5, stddev=0., pdf="gauss") Wraps a bound method of a task and delays its execution by *t* seconds. """ if opts["stddev"] <= 0: t = opts["t"] elif opts["pdf"] == "gauss": t = random.gauss(opts["t"], opts["stddev"]) elif opts["pdf"] == "uniform": t = random.uniform(opts["t"], opts["stddev"]) else: raise ValueError("unknown delay decorator pdf '{}'".format(opts["pdf"])) time.sleep(t) return fn(task, *args, **kwargs)
python
def delay(fn, opts, task, *args, **kwargs): """ delay(t=5, stddev=0., pdf="gauss") Wraps a bound method of a task and delays its execution by *t* seconds. """ if opts["stddev"] <= 0: t = opts["t"] elif opts["pdf"] == "gauss": t = random.gauss(opts["t"], opts["stddev"]) elif opts["pdf"] == "uniform": t = random.uniform(opts["t"], opts["stddev"]) else: raise ValueError("unknown delay decorator pdf '{}'".format(opts["pdf"])) time.sleep(t) return fn(task, *args, **kwargs)
delay(t=5, stddev=0., pdf="gauss") Wraps a bound method of a task and delays its execution by *t* seconds.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/decorator.py#L151-L166
riga/law
law/decorator.py
notify
def notify(fn, opts, task, *args, **kwargs): """ notify(on_success=True, on_failure=True, **kwargs) Wraps a bound method of a task and guards its execution. Information about the execution (task name, duration, etc) is collected and dispatched to all notification transports registered on wrapped task via adding :py:class:`law.NotifyParameter` parameters. Example: .. code-block:: python class MyTask(law.Task): notify_mail = law.NotifyMailParameter() @notify # or @notify(sender="[email protected]", recipient="[email protected]") def run(self): ... When the *notify_mail* parameter is *True*, a notification is sent to the configured email address. Also see :ref:`config-notifications`. """ _task = get_task(task) # get notification transports transports = [] for param_name, param in _task.get_params(): if isinstance(param, NotifyParameter) and getattr(_task, param_name): try: transport = param.get_transport() if transport: transports.append(transport) except Exception as e: logger.warning("get_transport() failed for '{}' parameter: {}".format( param_name, e)) # nothing to do when there is no transport if not transports: return fn(task, *args, **kwargs) # guard the fn call and gather infos error = None t0 = time.time() try: return fn(task, *args, **kwargs) except (Exception, KeyboardInterrupt) as e: error = e raise finally: success = error is None # do nothing on KeyboardInterrupt, or when on_success / on_failure do not match the status if isinstance(error, KeyboardInterrupt): return elif success and not opts["on_success"]: return elif not success and not opts["on_failure"]: return duration = human_time_diff(seconds=round(time.time() - t0, 1)) status_string = "succeeded" if success else "failed" title = "Task {} {}!".format(_task.get_task_family(), status_string) parts = collections.OrderedDict([ ("Host", socket.gethostname()), ("Duration", duration), ("Last message", "-" if not len(_task._message_cache) else _task._message_cache[-1]), ("Task", str(_task)), ]) if not success: parts["Traceback"] = traceback.format_exc() message = "\n".join("{}: {}".format(*tpl) for tpl in parts.items()) # dispatch via all transports for transport in transports: fn = transport["func"] raw = transport.get("raw", False) try: fn(success, title, parts.copy() if raw else message, **opts) except Exception as e: t = traceback.format_exc() logger.warning("notification failed via transport '{}': {}\n{}".format(fn, e, t))
python
def notify(fn, opts, task, *args, **kwargs): """ notify(on_success=True, on_failure=True, **kwargs) Wraps a bound method of a task and guards its execution. Information about the execution (task name, duration, etc) is collected and dispatched to all notification transports registered on wrapped task via adding :py:class:`law.NotifyParameter` parameters. Example: .. code-block:: python class MyTask(law.Task): notify_mail = law.NotifyMailParameter() @notify # or @notify(sender="[email protected]", recipient="[email protected]") def run(self): ... When the *notify_mail* parameter is *True*, a notification is sent to the configured email address. Also see :ref:`config-notifications`. """ _task = get_task(task) # get notification transports transports = [] for param_name, param in _task.get_params(): if isinstance(param, NotifyParameter) and getattr(_task, param_name): try: transport = param.get_transport() if transport: transports.append(transport) except Exception as e: logger.warning("get_transport() failed for '{}' parameter: {}".format( param_name, e)) # nothing to do when there is no transport if not transports: return fn(task, *args, **kwargs) # guard the fn call and gather infos error = None t0 = time.time() try: return fn(task, *args, **kwargs) except (Exception, KeyboardInterrupt) as e: error = e raise finally: success = error is None # do nothing on KeyboardInterrupt, or when on_success / on_failure do not match the status if isinstance(error, KeyboardInterrupt): return elif success and not opts["on_success"]: return elif not success and not opts["on_failure"]: return duration = human_time_diff(seconds=round(time.time() - t0, 1)) status_string = "succeeded" if success else "failed" title = "Task {} {}!".format(_task.get_task_family(), status_string) parts = collections.OrderedDict([ ("Host", socket.gethostname()), ("Duration", duration), ("Last message", "-" if not len(_task._message_cache) else _task._message_cache[-1]), ("Task", str(_task)), ]) if not success: parts["Traceback"] = traceback.format_exc() message = "\n".join("{}: {}".format(*tpl) for tpl in parts.items()) # dispatch via all transports for transport in transports: fn = transport["func"] raw = transport.get("raw", False) try: fn(success, title, parts.copy() if raw else message, **opts) except Exception as e: t = traceback.format_exc() logger.warning("notification failed via transport '{}': {}\n{}".format(fn, e, t))
notify(on_success=True, on_failure=True, **kwargs) Wraps a bound method of a task and guards its execution. Information about the execution (task name, duration, etc) is collected and dispatched to all notification transports registered on wrapped task via adding :py:class:`law.NotifyParameter` parameters. Example: .. code-block:: python class MyTask(law.Task): notify_mail = law.NotifyMailParameter() @notify # or @notify(sender="[email protected]", recipient="[email protected]") def run(self): ... When the *notify_mail* parameter is *True*, a notification is sent to the configured email address. Also see :ref:`config-notifications`.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/decorator.py#L170-L248
riga/law
law/decorator.py
timeit
def timeit(fn, opts, task, *args, **kwargs): """ Wraps a bound method of a task and logs its execution time in a human readable format. Logs in info mode. When *publish_message* is *True*, the duration is also published as a task message to the scheduler. """ start_time = time.time() try: return fn(task, *args, **kwargs) finally: duration = human_time_diff(seconds=round(time.time() - start_time, 1)) # log timeit_logger = logger.getChild("timeit") timeit_logger.info("runtime of {}: {}".format(task.task_id, duration)) # optionally publish a task message to the scheduler if opts["publish_message"] and callable(getattr(task, "publish_message", None)): task.publish_message("runtime: {}".format(duration))
python
def timeit(fn, opts, task, *args, **kwargs): """ Wraps a bound method of a task and logs its execution time in a human readable format. Logs in info mode. When *publish_message* is *True*, the duration is also published as a task message to the scheduler. """ start_time = time.time() try: return fn(task, *args, **kwargs) finally: duration = human_time_diff(seconds=round(time.time() - start_time, 1)) # log timeit_logger = logger.getChild("timeit") timeit_logger.info("runtime of {}: {}".format(task.task_id, duration)) # optionally publish a task message to the scheduler if opts["publish_message"] and callable(getattr(task, "publish_message", None)): task.publish_message("runtime: {}".format(duration))
Wraps a bound method of a task and logs its execution time in a human readable format. Logs in info mode. When *publish_message* is *True*, the duration is also published as a task message to the scheduler.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/decorator.py#L252-L270
riga/law
law/contrib/wlcg/util.py
get_voms_proxy_user
def get_voms_proxy_user(): """ Returns the owner of the voms proxy. """ out = _voms_proxy_info(["--identity"])[1].strip() try: return re.match(r".*\/CN\=([^\/]+).*", out.strip()).group(1) except: raise Exception("no valid identity found in voms proxy: {}".format(out))
python
def get_voms_proxy_user(): """ Returns the owner of the voms proxy. """ out = _voms_proxy_info(["--identity"])[1].strip() try: return re.match(r".*\/CN\=([^\/]+).*", out.strip()).group(1) except: raise Exception("no valid identity found in voms proxy: {}".format(out))
Returns the owner of the voms proxy.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/contrib/wlcg/util.py#L44-L52
riga/law
law/contrib/wlcg/util.py
check_voms_proxy_validity
def check_voms_proxy_validity(log=False): """ Returns *True* when a valid voms proxy exists, *False* otherwise. When *log* is *True*, a warning will be logged. """ valid = _voms_proxy_info(["--exists"], silent=True)[0] == 0 if log and not valid: logger.warning("no valid voms proxy found") return valid
python
def check_voms_proxy_validity(log=False): """ Returns *True* when a valid voms proxy exists, *False* otherwise. When *log* is *True*, a warning will be logged. """ valid = _voms_proxy_info(["--exists"], silent=True)[0] == 0 if log and not valid: logger.warning("no valid voms proxy found") return valid
Returns *True* when a valid voms proxy exists, *False* otherwise. When *log* is *True*, a warning will be logged.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/contrib/wlcg/util.py#L73-L81
riga/law
law/contrib/wlcg/util.py
renew_voms_proxy
def renew_voms_proxy(passwd="", vo=None, lifetime="196:00"): """ Renews the voms proxy using a password *passwd*, an optional virtual organization name *vo*, and a default *lifetime* of 8 days. The password is written to a temporary file first and piped into the renewal commad to ensure it is not visible in the process list. """ with tmp_file() as (_, tmp): with open(tmp, "w") as f: f.write(passwd) cmd = "cat '{}' | voms-proxy-init --valid '{}'".format(tmp, lifetime) if vo: cmd += " -voms '{}'".format(vo) code, out, _ = interruptable_popen(cmd, shell=True, executable="/bin/bash", stdout=subprocess.PIPE, stderr=subprocess.STDOUT) if code != 0: raise Exception("proxy renewal failed: {}".format(out))
python
def renew_voms_proxy(passwd="", vo=None, lifetime="196:00"): """ Renews the voms proxy using a password *passwd*, an optional virtual organization name *vo*, and a default *lifetime* of 8 days. The password is written to a temporary file first and piped into the renewal commad to ensure it is not visible in the process list. """ with tmp_file() as (_, tmp): with open(tmp, "w") as f: f.write(passwd) cmd = "cat '{}' | voms-proxy-init --valid '{}'".format(tmp, lifetime) if vo: cmd += " -voms '{}'".format(vo) code, out, _ = interruptable_popen(cmd, shell=True, executable="/bin/bash", stdout=subprocess.PIPE, stderr=subprocess.STDOUT) if code != 0: raise Exception("proxy renewal failed: {}".format(out))
Renews the voms proxy using a password *passwd*, an optional virtual organization name *vo*, and a default *lifetime* of 8 days. The password is written to a temporary file first and piped into the renewal commad to ensure it is not visible in the process list.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/contrib/wlcg/util.py#L84-L100
riga/law
law/contrib/wlcg/util.py
delegate_voms_proxy_glite
def delegate_voms_proxy_glite(endpoint, stdout=None, stderr=None, cache=True): """ Delegates the voms proxy via gLite to an *endpoint*, e.g. ``grid-ce.physik.rwth-aachen.de:8443``. *stdout* and *stderr* are passed to the *Popen* constructor for executing the ``glite-ce-delegate-proxy`` command. When *cache* is *True*, a json file is created alongside the proxy file, which stores the delegation ids per endpoint. The next time the exact same proxy should be delegated to the same endpoint, the cached delegation id is returned. """ # get the proxy file proxy_file = get_voms_proxy_file() if not os.path.exists(proxy_file): raise Exception("proxy file '{}' does not exist".format(proxy_file)) if cache: if isinstance(cache, six.string_types): cache_file = cache else: cache_file = proxy_file + "_delegation_cache.json" def remove_cache(): try: if os.path.exists(cache_file): os.remove(cache_file) except OSError: pass # create the hash of the proxy file content with open(proxy_file, "r") as f: proxy_hash = create_hash(f.read()) # already delegated? cache_data = {} if os.path.exists(cache_file): with open(cache_file, "r") as f: try: cache_data = json.load(f) except: remove_cache() # is the hash up-to-date? if cache_data.get("hash") != proxy_hash: remove_cache() cache_data = {} # proxy already delegated to that endpoint? elif endpoint in cache_data.get("ids", []): return str(cache_data["ids"][endpoint]) # do the actual delegation delegation_id = uuid.uuid4().hex cmd = ["glite-ce-delegate-proxy", "-e", endpoint, delegation_id] code = interruptable_popen(cmd, stdout=stdout, stderr=stderr)[0] if code != 0: raise Exception("glite proxy delegation to endpoint {} failed".format(endpoint)) if cache: # write the id back to the delegation file cache_data["hash"] = proxy_hash cache_data.setdefault("ids", {})[endpoint] = delegation_id with open(cache_file, "w") as f: json.dump(cache_data, f, indent=4) os.chmod(cache_file, 0o0600) return delegation_id
python
def delegate_voms_proxy_glite(endpoint, stdout=None, stderr=None, cache=True): """ Delegates the voms proxy via gLite to an *endpoint*, e.g. ``grid-ce.physik.rwth-aachen.de:8443``. *stdout* and *stderr* are passed to the *Popen* constructor for executing the ``glite-ce-delegate-proxy`` command. When *cache* is *True*, a json file is created alongside the proxy file, which stores the delegation ids per endpoint. The next time the exact same proxy should be delegated to the same endpoint, the cached delegation id is returned. """ # get the proxy file proxy_file = get_voms_proxy_file() if not os.path.exists(proxy_file): raise Exception("proxy file '{}' does not exist".format(proxy_file)) if cache: if isinstance(cache, six.string_types): cache_file = cache else: cache_file = proxy_file + "_delegation_cache.json" def remove_cache(): try: if os.path.exists(cache_file): os.remove(cache_file) except OSError: pass # create the hash of the proxy file content with open(proxy_file, "r") as f: proxy_hash = create_hash(f.read()) # already delegated? cache_data = {} if os.path.exists(cache_file): with open(cache_file, "r") as f: try: cache_data = json.load(f) except: remove_cache() # is the hash up-to-date? if cache_data.get("hash") != proxy_hash: remove_cache() cache_data = {} # proxy already delegated to that endpoint? elif endpoint in cache_data.get("ids", []): return str(cache_data["ids"][endpoint]) # do the actual delegation delegation_id = uuid.uuid4().hex cmd = ["glite-ce-delegate-proxy", "-e", endpoint, delegation_id] code = interruptable_popen(cmd, stdout=stdout, stderr=stderr)[0] if code != 0: raise Exception("glite proxy delegation to endpoint {} failed".format(endpoint)) if cache: # write the id back to the delegation file cache_data["hash"] = proxy_hash cache_data.setdefault("ids", {})[endpoint] = delegation_id with open(cache_file, "w") as f: json.dump(cache_data, f, indent=4) os.chmod(cache_file, 0o0600) return delegation_id
Delegates the voms proxy via gLite to an *endpoint*, e.g. ``grid-ce.physik.rwth-aachen.de:8443``. *stdout* and *stderr* are passed to the *Popen* constructor for executing the ``glite-ce-delegate-proxy`` command. When *cache* is *True*, a json file is created alongside the proxy file, which stores the delegation ids per endpoint. The next time the exact same proxy should be delegated to the same endpoint, the cached delegation id is returned.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/contrib/wlcg/util.py#L103-L167
riga/law
law/target/formatter.py
get_formatter
def get_formatter(name, silent=False): """ Returns the formatter class whose name attribute is *name*. When no class could be found and *silent* is *True*, *None* is returned. Otherwise, an exception is raised. """ formatter = FormatterRegister.formatters.get(name) if formatter or silent: return formatter else: raise Exception("cannot find formatter '{}'".format(name))
python
def get_formatter(name, silent=False): """ Returns the formatter class whose name attribute is *name*. When no class could be found and *silent* is *True*, *None* is returned. Otherwise, an exception is raised. """ formatter = FormatterRegister.formatters.get(name) if formatter or silent: return formatter else: raise Exception("cannot find formatter '{}'".format(name))
Returns the formatter class whose name attribute is *name*. When no class could be found and *silent* is *True*, *None* is returned. Otherwise, an exception is raised.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/target/formatter.py#L48-L57
riga/law
law/target/formatter.py
find_formatters
def find_formatters(path, silent=True): """ Returns a list of formatter classes which would accept the file given by *path*. When no classes could be found and *silent* is *True*, an empty list is returned. Otherwise, an exception is raised. """ formatters = [f for f in six.itervalues(FormatterRegister.formatters) if f.accepts(path)] if formatters or silent: return formatters else: raise Exception("cannot find formatter for path '{}'".format(path))
python
def find_formatters(path, silent=True): """ Returns a list of formatter classes which would accept the file given by *path*. When no classes could be found and *silent* is *True*, an empty list is returned. Otherwise, an exception is raised. """ formatters = [f for f in six.itervalues(FormatterRegister.formatters) if f.accepts(path)] if formatters or silent: return formatters else: raise Exception("cannot find formatter for path '{}'".format(path))
Returns a list of formatter classes which would accept the file given by *path*. When no classes could be found and *silent* is *True*, an empty list is returned. Otherwise, an exception is raised.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/target/formatter.py#L60-L70
riga/law
law/target/formatter.py
find_formatter
def find_formatter(name, path): """ Returns the formatter class whose name attribute is *name* when *name* is not *AUTO_FORMATTER*. Otherwise, the first formatter that accepts *path* is returned. Internally, this method simply uses :py:func:`get_formatter` or :py:func:`find_formatters` depending on the value of *name*. """ if name == AUTO_FORMATTER: return find_formatters(path, silent=False)[0] else: return get_formatter(name, silent=False)
python
def find_formatter(name, path): """ Returns the formatter class whose name attribute is *name* when *name* is not *AUTO_FORMATTER*. Otherwise, the first formatter that accepts *path* is returned. Internally, this method simply uses :py:func:`get_formatter` or :py:func:`find_formatters` depending on the value of *name*. """ if name == AUTO_FORMATTER: return find_formatters(path, silent=False)[0] else: return get_formatter(name, silent=False)
Returns the formatter class whose name attribute is *name* when *name* is not *AUTO_FORMATTER*. Otherwise, the first formatter that accepts *path* is returned. Internally, this method simply uses :py:func:`get_formatter` or :py:func:`find_formatters` depending on the value of *name*.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/target/formatter.py#L73-L82
riga/law
law/job/base.py
JobArguments.encode_list
def encode_list(cls, value): """ Encodes a list *value* into a string via base64 encoding. """ encoded = base64.b64encode(six.b(" ".join(str(v) for v in value) or "-")) return encoded.decode("utf-8") if six.PY3 else encoded
python
def encode_list(cls, value): """ Encodes a list *value* into a string via base64 encoding. """ encoded = base64.b64encode(six.b(" ".join(str(v) for v in value) or "-")) return encoded.decode("utf-8") if six.PY3 else encoded
Encodes a list *value* into a string via base64 encoding.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/job/base.py#L700-L705
riga/law
law/job/base.py
JobArguments.get_args
def get_args(self): """ Returns the list of encoded job arguments. The order of this list corresponds to the arguments expected by the job wrapper script. """ return [ self.task_cls.__module__, self.task_cls.__name__, self.encode_list(self.task_params), self.encode_list(self.branches), self.encode_bool(self.auto_retry), self.encode_list(self.dashboard_data), ]
python
def get_args(self): """ Returns the list of encoded job arguments. The order of this list corresponds to the arguments expected by the job wrapper script. """ return [ self.task_cls.__module__, self.task_cls.__name__, self.encode_list(self.task_params), self.encode_list(self.branches), self.encode_bool(self.auto_retry), self.encode_list(self.dashboard_data), ]
Returns the list of encoded job arguments. The order of this list corresponds to the arguments expected by the job wrapper script.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/job/base.py#L707-L719
riga/law
law/contrib/__init__.py
load
def load(*packages): """ Loads contrib *packages* and adds members exposed in ``__all__`` to the law main module. Example: .. code-block:: python import law law.contrib.load("numpy") print(law.NumpyFormatter) # -> <class 'law.contrib.numpy.formatter.NumpyFormatter'> It is ensured that packages are loaded only once. """ for pkg in flatten(packages): if pkg in loaded_packages: logger.debug("skip contrib package '{}', already loaded".format(pkg)) continue loaded_packages.append(pkg) mod = __import__("law.contrib.{}".format(pkg), globals(), locals(), [pkg]) logger.debug("loaded contrib package '{}'".format(pkg)) for attr in mod.__all__: if hasattr(law, attr): logger.info("cannot register 'law.contrib.{0}.{1}' to 'law.{1}', " "already exists".format(pkg, attr)) else: setattr(law, attr, getattr(mod, attr)) law.__all__.append(attr) logger.debug("registered 'law.contrib.{0}.{1}' to 'law.{1}'".format(pkg, attr))
python
def load(*packages): """ Loads contrib *packages* and adds members exposed in ``__all__`` to the law main module. Example: .. code-block:: python import law law.contrib.load("numpy") print(law.NumpyFormatter) # -> <class 'law.contrib.numpy.formatter.NumpyFormatter'> It is ensured that packages are loaded only once. """ for pkg in flatten(packages): if pkg in loaded_packages: logger.debug("skip contrib package '{}', already loaded".format(pkg)) continue loaded_packages.append(pkg) mod = __import__("law.contrib.{}".format(pkg), globals(), locals(), [pkg]) logger.debug("loaded contrib package '{}'".format(pkg)) for attr in mod.__all__: if hasattr(law, attr): logger.info("cannot register 'law.contrib.{0}.{1}' to 'law.{1}', " "already exists".format(pkg, attr)) else: setattr(law, attr, getattr(mod, attr)) law.__all__.append(attr) logger.debug("registered 'law.contrib.{0}.{1}' to 'law.{1}'".format(pkg, attr))
Loads contrib *packages* and adds members exposed in ``__all__`` to the law main module. Example: .. code-block:: python import law law.contrib.load("numpy") print(law.NumpyFormatter) # -> <class 'law.contrib.numpy.formatter.NumpyFormatter'> It is ensured that packages are loaded only once.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/contrib/__init__.py#L21-L52
riga/law
law/notification.py
notify_mail
def notify_mail(title, message, recipient=None, sender=None, smtp_host=None, smtp_port=None, **kwargs): """ Mail notification method taking a *title* and a string *message*. *recipient*, *sender*, *smtp_host* and *smtp_port* default to the configuration values in the [notifications] section. """ cfg = Config.instance() if not recipient: recipient = cfg.get_expanded("notifications", "mail_recipient") if not sender: sender = cfg.get_expanded("notifications", "mail_sender") if not smtp_host: smtp_host = cfg.get_expanded("notifications", "mail_smtp_host") if not smtp_port: smtp_port = cfg.get_expanded("notifications", "mail_smtp_port") if not recipient or not sender: logger.warning("cannot send mail notification, recipient ({}) or sender ({}) empty".format( recipient, sender)) return False return send_mail(recipient, sender, title, message, smtp_host=smtp_host, smtp_port=smtp_port)
python
def notify_mail(title, message, recipient=None, sender=None, smtp_host=None, smtp_port=None, **kwargs): """ Mail notification method taking a *title* and a string *message*. *recipient*, *sender*, *smtp_host* and *smtp_port* default to the configuration values in the [notifications] section. """ cfg = Config.instance() if not recipient: recipient = cfg.get_expanded("notifications", "mail_recipient") if not sender: sender = cfg.get_expanded("notifications", "mail_sender") if not smtp_host: smtp_host = cfg.get_expanded("notifications", "mail_smtp_host") if not smtp_port: smtp_port = cfg.get_expanded("notifications", "mail_smtp_port") if not recipient or not sender: logger.warning("cannot send mail notification, recipient ({}) or sender ({}) empty".format( recipient, sender)) return False return send_mail(recipient, sender, title, message, smtp_host=smtp_host, smtp_port=smtp_port)
Mail notification method taking a *title* and a string *message*. *recipient*, *sender*, *smtp_host* and *smtp_port* default to the configuration values in the [notifications] section.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/notification.py#L20-L42
riga/law
law/patches.py
patch_all
def patch_all(): """ Runs all patches. This function ensures that a second invocation has no effect. """ global _patched if _patched: return _patched = True patch_default_retcodes() patch_worker_run_task() patch_worker_factory() patch_keepalive_run() patch_cmdline_parser() logger.debug("applied law-specific luigi patches")
python
def patch_all(): """ Runs all patches. This function ensures that a second invocation has no effect. """ global _patched if _patched: return _patched = True patch_default_retcodes() patch_worker_run_task() patch_worker_factory() patch_keepalive_run() patch_cmdline_parser() logger.debug("applied law-specific luigi patches")
Runs all patches. This function ensures that a second invocation has no effect.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/patches.py#L24-L40
riga/law
law/patches.py
patch_default_retcodes
def patch_default_retcodes(): """ Sets the default luigi return codes in ``luigi.retcodes.retcode`` to: - already_running: 10 - missing_data: 20 - not_run: 30 - task_failed: 40 - scheduling_error: 50 - unhandled_exception: 60 """ import luigi.retcodes retcode = luigi.retcodes.retcode retcode.already_running._default = 10 retcode.missing_data._default = 20 retcode.not_run._default = 30 retcode.task_failed._default = 40 retcode.scheduling_error._default = 50 retcode.unhandled_exception._default = 60
python
def patch_default_retcodes(): """ Sets the default luigi return codes in ``luigi.retcodes.retcode`` to: - already_running: 10 - missing_data: 20 - not_run: 30 - task_failed: 40 - scheduling_error: 50 - unhandled_exception: 60 """ import luigi.retcodes retcode = luigi.retcodes.retcode retcode.already_running._default = 10 retcode.missing_data._default = 20 retcode.not_run._default = 30 retcode.task_failed._default = 40 retcode.scheduling_error._default = 50 retcode.unhandled_exception._default = 60
Sets the default luigi return codes in ``luigi.retcodes.retcode`` to: - already_running: 10 - missing_data: 20 - not_run: 30 - task_failed: 40 - scheduling_error: 50 - unhandled_exception: 60
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/patches.py#L43-L63
riga/law
law/patches.py
patch_worker_run_task
def patch_worker_run_task(): """ Patches the ``luigi.worker.Worker._run_task`` method to store the worker id and the id of its first task in the task. This information is required by the sandboxing mechanism """ _run_task = luigi.worker.Worker._run_task def run_task(self, task_id): task = self._scheduled_tasks[task_id] task._worker_id = self._id task._worker_task = self._first_task try: _run_task(self, task_id) finally: task._worker_id = None task._worker_task = None # make worker disposable when sandboxed if os.getenv("LAW_SANDBOX_SWITCHED") == "1": self._start_phasing_out() luigi.worker.Worker._run_task = run_task
python
def patch_worker_run_task(): """ Patches the ``luigi.worker.Worker._run_task`` method to store the worker id and the id of its first task in the task. This information is required by the sandboxing mechanism """ _run_task = luigi.worker.Worker._run_task def run_task(self, task_id): task = self._scheduled_tasks[task_id] task._worker_id = self._id task._worker_task = self._first_task try: _run_task(self, task_id) finally: task._worker_id = None task._worker_task = None # make worker disposable when sandboxed if os.getenv("LAW_SANDBOX_SWITCHED") == "1": self._start_phasing_out() luigi.worker.Worker._run_task = run_task
Patches the ``luigi.worker.Worker._run_task`` method to store the worker id and the id of its first task in the task. This information is required by the sandboxing mechanism
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/patches.py#L66-L89
riga/law
law/patches.py
patch_worker_factory
def patch_worker_factory(): """ Patches the ``luigi.interface._WorkerSchedulerFactory`` to include sandboxing information when create a worker instance. """ def create_worker(self, scheduler, worker_processes, assistant=False): worker = luigi.worker.Worker(scheduler=scheduler, worker_processes=worker_processes, assistant=assistant, worker_id=os.getenv("LAW_SANDBOX_WORKER_ID")) worker._first_task = os.getenv("LAW_SANDBOX_WORKER_TASK") return worker luigi.interface._WorkerSchedulerFactory.create_worker = create_worker
python
def patch_worker_factory(): """ Patches the ``luigi.interface._WorkerSchedulerFactory`` to include sandboxing information when create a worker instance. """ def create_worker(self, scheduler, worker_processes, assistant=False): worker = luigi.worker.Worker(scheduler=scheduler, worker_processes=worker_processes, assistant=assistant, worker_id=os.getenv("LAW_SANDBOX_WORKER_ID")) worker._first_task = os.getenv("LAW_SANDBOX_WORKER_TASK") return worker luigi.interface._WorkerSchedulerFactory.create_worker = create_worker
Patches the ``luigi.interface._WorkerSchedulerFactory`` to include sandboxing information when create a worker instance.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/patches.py#L92-L103
riga/law
law/patches.py
patch_keepalive_run
def patch_keepalive_run(): """ Patches the ``luigi.worker.KeepAliveThread.run`` to immediately stop the keep-alive thread when running within a sandbox. """ _run = luigi.worker.KeepAliveThread.run def run(self): # do not run the keep-alive loop when sandboxed if os.getenv("LAW_SANDBOX_SWITCHED") == "1": self.stop() else: _run(self) luigi.worker.KeepAliveThread.run = run
python
def patch_keepalive_run(): """ Patches the ``luigi.worker.KeepAliveThread.run`` to immediately stop the keep-alive thread when running within a sandbox. """ _run = luigi.worker.KeepAliveThread.run def run(self): # do not run the keep-alive loop when sandboxed if os.getenv("LAW_SANDBOX_SWITCHED") == "1": self.stop() else: _run(self) luigi.worker.KeepAliveThread.run = run
Patches the ``luigi.worker.KeepAliveThread.run`` to immediately stop the keep-alive thread when running within a sandbox.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/patches.py#L106-L120
riga/law
law/patches.py
patch_cmdline_parser
def patch_cmdline_parser(): """ Patches the ``luigi.cmdline_parser.CmdlineParser`` to store the original command line arguments for later processing in the :py:class:`law.config.Config`. """ # store original functions _init = luigi.cmdline_parser.CmdlineParser.__init__ # patch init def __init__(self, cmdline_args): _init(self, cmdline_args) self.cmdline_args = cmdline_args luigi.cmdline_parser.CmdlineParser.__init__ = __init__
python
def patch_cmdline_parser(): """ Patches the ``luigi.cmdline_parser.CmdlineParser`` to store the original command line arguments for later processing in the :py:class:`law.config.Config`. """ # store original functions _init = luigi.cmdline_parser.CmdlineParser.__init__ # patch init def __init__(self, cmdline_args): _init(self, cmdline_args) self.cmdline_args = cmdline_args luigi.cmdline_parser.CmdlineParser.__init__ = __init__
Patches the ``luigi.cmdline_parser.CmdlineParser`` to store the original command line arguments for later processing in the :py:class:`law.config.Config`.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/patches.py#L123-L136
riga/law
law/contrib/slack/notification.py
notify_slack
def notify_slack(title, content, attachment_color="#4bb543", short_threshold=40, token=None, channel=None, mention_user=None, **kwargs): """ Sends a slack notification and returns *True* on success. The communication with the slack API might have some delays and is therefore handled by a thread. The format of the notification depends on *content*. If it is a string, a simple text notification is sent. Otherwise, it should be a dictionary whose fields are used to build a message attachment with two-column formatting. """ # test import import slackclient # noqa: F401 cfg = Config.instance() # get default token and channel if not token: token = cfg.get_expanded("notifications", "slack_token") if not channel: channel = cfg.get_expanded("notifications", "slack_channel") if not token or not channel: logger.warning("cannot send Slack notification, token ({}) or channel ({}) empty".format( token, channel)) return False # append the user to mention to the title # unless explicitly set to empty string mention_text = "" if mention_user is None: mention_user = cfg.get_expanded("notifications", "slack_mention_user") if mention_user: mention_text = " (@{})".format(mention_user) # request data for the API call request = { "channel": channel, "as_user": True, "parse": "full", } # standard or attachment content? if isinstance(content, six.string_types): request["text"] = "{}{}\n\n{}".format(title, mention_text, content) else: # content is a dict, send its data as an attachment request["text"] = "{} {}".format(title, mention_text) request["attachments"] = at = { "color": attachment_color, "fields": [], "fallback": "{}{}\n\n".format(title, mention_text), } # fill the attachment fields and extend the fallback for key, value in content.items(): at["fields"].append({ "title": key, "value": value, "short": len(value) <= short_threshold, }) at["fallback"] += "_{}_: {}\n".format(key, value) # extend by arbitrary kwargs request.update(kwargs) # threaded, non-blocking API communication thread = threading.Thread(target=_notify_slack, args=(token, request)) thread.start() return True
python
def notify_slack(title, content, attachment_color="#4bb543", short_threshold=40, token=None, channel=None, mention_user=None, **kwargs): """ Sends a slack notification and returns *True* on success. The communication with the slack API might have some delays and is therefore handled by a thread. The format of the notification depends on *content*. If it is a string, a simple text notification is sent. Otherwise, it should be a dictionary whose fields are used to build a message attachment with two-column formatting. """ # test import import slackclient # noqa: F401 cfg = Config.instance() # get default token and channel if not token: token = cfg.get_expanded("notifications", "slack_token") if not channel: channel = cfg.get_expanded("notifications", "slack_channel") if not token or not channel: logger.warning("cannot send Slack notification, token ({}) or channel ({}) empty".format( token, channel)) return False # append the user to mention to the title # unless explicitly set to empty string mention_text = "" if mention_user is None: mention_user = cfg.get_expanded("notifications", "slack_mention_user") if mention_user: mention_text = " (@{})".format(mention_user) # request data for the API call request = { "channel": channel, "as_user": True, "parse": "full", } # standard or attachment content? if isinstance(content, six.string_types): request["text"] = "{}{}\n\n{}".format(title, mention_text, content) else: # content is a dict, send its data as an attachment request["text"] = "{} {}".format(title, mention_text) request["attachments"] = at = { "color": attachment_color, "fields": [], "fallback": "{}{}\n\n".format(title, mention_text), } # fill the attachment fields and extend the fallback for key, value in content.items(): at["fields"].append({ "title": key, "value": value, "short": len(value) <= short_threshold, }) at["fallback"] += "_{}_: {}\n".format(key, value) # extend by arbitrary kwargs request.update(kwargs) # threaded, non-blocking API communication thread = threading.Thread(target=_notify_slack, args=(token, request)) thread.start() return True
Sends a slack notification and returns *True* on success. The communication with the slack API might have some delays and is therefore handled by a thread. The format of the notification depends on *content*. If it is a string, a simple text notification is sent. Otherwise, it should be a dictionary whose fields are used to build a message attachment with two-column formatting.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/contrib/slack/notification.py#L19-L87
riga/law
examples/workflows/tasks.py
maybe_wait
def maybe_wait(func): """ Wrapper around run() methods that reads the *slow* flag to decide whether to wait some seconds for illustrative purposes. This is very straight forward, so no need for functools.wraps here. """ def wrapper(self, *args, **kwargs): if self.slow: time.sleep(random.randint(5, 15)) return func(self, *args, **kwargs) return wrapper
python
def maybe_wait(func): """ Wrapper around run() methods that reads the *slow* flag to decide whether to wait some seconds for illustrative purposes. This is very straight forward, so no need for functools.wraps here. """ def wrapper(self, *args, **kwargs): if self.slow: time.sleep(random.randint(5, 15)) return func(self, *args, **kwargs) return wrapper
Wrapper around run() methods that reads the *slow* flag to decide whether to wait some seconds for illustrative purposes. This is very straight forward, so no need for functools.wraps here.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/examples/workflows/tasks.py#L19-L29
riga/law
law/contrib/lsf/job.py
LSFJobManager.parse_query_output
def parse_query_output(cls, out): """ Example output to parse: 141914132 user_name DONE queue_name exec_host b63cee711a job_name Feb 8 14:54 """ query_data = {} for line in out.strip().split("\n"): parts = line.split() if len(parts) < 6: continue job_id = parts[0] status_flag = parts[2] # map the status status = cls.map_status(status_flag) # save the result query_data[job_id] = cls.job_status_dict(job_id=job_id, status=status) return query_data
python
def parse_query_output(cls, out): """ Example output to parse: 141914132 user_name DONE queue_name exec_host b63cee711a job_name Feb 8 14:54 """ query_data = {} for line in out.strip().split("\n"): parts = line.split() if len(parts) < 6: continue job_id = parts[0] status_flag = parts[2] # map the status status = cls.map_status(status_flag) # save the result query_data[job_id] = cls.job_status_dict(job_id=job_id, status=status) return query_data
Example output to parse: 141914132 user_name DONE queue_name exec_host b63cee711a job_name Feb 8 14:54
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/contrib/lsf/job.py#L144-L165
riga/law
law/logger.py
setup_logging
def setup_logging(): """ Sets up the internal logging mechanism, i.e., it creates the :py:attr:`console_handler`, sets its formatting, and mounts on on the main ``"law"`` logger. It also sets the levels of all loggers that are given in the law config. """ global console_handler # make sure logging is setup only once if console_handler: return # set the handler of the law root logger console_handler = logging.StreamHandler() console_handler.setFormatter(LogFormatter()) logging.getLogger("law").addHandler(console_handler) # set levels for all loggers for name, level in Config.instance().items("logging"): level = level.upper() if hasattr(logging, level): logger = logging.getLogger(name) logger.setLevel(getattr(logging, level)) logger.debug("registered logger with level '{}'".format(level))
python
def setup_logging(): """ Sets up the internal logging mechanism, i.e., it creates the :py:attr:`console_handler`, sets its formatting, and mounts on on the main ``"law"`` logger. It also sets the levels of all loggers that are given in the law config. """ global console_handler # make sure logging is setup only once if console_handler: return # set the handler of the law root logger console_handler = logging.StreamHandler() console_handler.setFormatter(LogFormatter()) logging.getLogger("law").addHandler(console_handler) # set levels for all loggers for name, level in Config.instance().items("logging"): level = level.upper() if hasattr(logging, level): logger = logging.getLogger(name) logger.setLevel(getattr(logging, level)) logger.debug("registered logger with level '{}'".format(level))
Sets up the internal logging mechanism, i.e., it creates the :py:attr:`console_handler`, sets its formatting, and mounts on on the main ``"law"`` logger. It also sets the levels of all loggers that are given in the law config.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/logger.py#L22-L45
riga/law
law/cli/config.py
execute
def execute(args): """ Executes the *config* subprogram with parsed commandline *args*. """ # just print the file location? if args.location: print(Config.instance().config_file) return # every option below requires the name to be set if not args.name: abort("please give the name of the config in the format <section>[.<option>]") # removal if args.remove: abort("config removal not yet implemented") # setting if args.value: abort("config setting not yet implemented") # getting print(get_config(args.name, expand=args.expand))
python
def execute(args): """ Executes the *config* subprogram with parsed commandline *args*. """ # just print the file location? if args.location: print(Config.instance().config_file) return # every option below requires the name to be set if not args.name: abort("please give the name of the config in the format <section>[.<option>]") # removal if args.remove: abort("config removal not yet implemented") # setting if args.value: abort("config setting not yet implemented") # getting print(get_config(args.name, expand=args.expand))
Executes the *config* subprogram with parsed commandline *args*.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/cli/config.py#L30-L52
riga/law
law/cli/config.py
get_config
def get_config(name, expand=False): """ Returns the config value that corresponds to *name*, which must have the format ``<section>[.<option>]``. When an option is given and *expand* is *True*, variables are expanded in the returned value. """ cfg = Config.instance() only_section = "." not in name # when only the section is given, print all keys if only_section: return "\n".join(cfg.keys(name)) else: section, option = name.split(".", 1) func = cfg.get_expanded if expand else cfg.get return func(section, option)
python
def get_config(name, expand=False): """ Returns the config value that corresponds to *name*, which must have the format ``<section>[.<option>]``. When an option is given and *expand* is *True*, variables are expanded in the returned value. """ cfg = Config.instance() only_section = "." not in name # when only the section is given, print all keys if only_section: return "\n".join(cfg.keys(name)) else: section, option = name.split(".", 1) func = cfg.get_expanded if expand else cfg.get return func(section, option)
Returns the config value that corresponds to *name*, which must have the format ``<section>[.<option>]``. When an option is given and *expand* is *True*, variables are expanded in the returned value.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/cli/config.py#L55-L71
riga/law
law/cli/run.py
setup_parser
def setup_parser(sub_parsers): """ Sets up the command line parser for the *run* subprogram and adds it to *sub_parsers*. """ parser = sub_parsers.add_parser("run", prog="law run", description="Run a task with" " configurable parameters. See http://luigi.rtfd.io/en/stable/running_luigi.html for more" " info.") parser.add_argument("task_family", help="a task family registered in the task database file or" " a module and task class in the format <module>.<class>") parser.add_argument("parameter", nargs="*", help="task parameters")
python
def setup_parser(sub_parsers): """ Sets up the command line parser for the *run* subprogram and adds it to *sub_parsers*. """ parser = sub_parsers.add_parser("run", prog="law run", description="Run a task with" " configurable parameters. See http://luigi.rtfd.io/en/stable/running_luigi.html for more" " info.") parser.add_argument("task_family", help="a task family registered in the task database file or" " a module and task class in the format <module>.<class>") parser.add_argument("parameter", nargs="*", help="task parameters")
Sets up the command line parser for the *run* subprogram and adds it to *sub_parsers*.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/cli/run.py#L22-L32
riga/law
law/cli/run.py
execute
def execute(args): """ Executes the *run* subprogram with parsed commandline *args*. """ task_family = None error = None # try to infer the task module from the passed task family and import it parts = args.task_family.rsplit(".", 1) if len(parts) == 2: modid, cls_name = parts try: mod = __import__(modid, globals(), locals(), [cls_name]) if hasattr(mod, cls_name): task_cls = getattr(mod, cls_name) if not issubclass(task_cls, Task): abort("object '{}' is not a Task".format(args.task_family)) task_family = task_cls.task_family except ImportError as e: logger.warning("import error in module {}: {}".format(modid, e)) error = e # read task info from the index file and import it if task_family is None: index_file = Config.instance().get_expanded("core", "index_file") if os.path.exists(index_file): info = read_task_from_index(args.task_family, index_file) if not info: abort("task family '{}' not found in index".format(args.task_family)) modid, task_family, _ = info __import__(modid, globals(), locals()) # complain when no task could be found if task_family is None: if error: raise error else: abort("task '{}' not found".format(args.task_family)) # import the module and run luigi luigi_run([task_family] + sys.argv[3:])
python
def execute(args): """ Executes the *run* subprogram with parsed commandline *args*. """ task_family = None error = None # try to infer the task module from the passed task family and import it parts = args.task_family.rsplit(".", 1) if len(parts) == 2: modid, cls_name = parts try: mod = __import__(modid, globals(), locals(), [cls_name]) if hasattr(mod, cls_name): task_cls = getattr(mod, cls_name) if not issubclass(task_cls, Task): abort("object '{}' is not a Task".format(args.task_family)) task_family = task_cls.task_family except ImportError as e: logger.warning("import error in module {}: {}".format(modid, e)) error = e # read task info from the index file and import it if task_family is None: index_file = Config.instance().get_expanded("core", "index_file") if os.path.exists(index_file): info = read_task_from_index(args.task_family, index_file) if not info: abort("task family '{}' not found in index".format(args.task_family)) modid, task_family, _ = info __import__(modid, globals(), locals()) # complain when no task could be found if task_family is None: if error: raise error else: abort("task '{}' not found".format(args.task_family)) # import the module and run luigi luigi_run([task_family] + sys.argv[3:])
Executes the *run* subprogram with parsed commandline *args*.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/cli/run.py#L35-L75
riga/law
law/cli/run.py
read_task_from_index
def read_task_from_index(task_family, index_file=None): """ Returns module id, task family and space-separated parameters in a tuple for a task given by *task_family* from the *index_file*. When *None*, the *index_file* refers to the default as defined in :py:mod:`law.config`. Returns *None* when the task could not be found. """ # read task information from the index file given a task family if index_file is None: index_file = Config.instance().get_expanded("core", "index_file") # open and go through lines with open(index_file, "r") as f: for line in f.readlines(): line = line.strip() if line.count(":") >= 2: modid, family, params = line.split(":", 2) if family == task_family: return modid, family, params return None
python
def read_task_from_index(task_family, index_file=None): """ Returns module id, task family and space-separated parameters in a tuple for a task given by *task_family* from the *index_file*. When *None*, the *index_file* refers to the default as defined in :py:mod:`law.config`. Returns *None* when the task could not be found. """ # read task information from the index file given a task family if index_file is None: index_file = Config.instance().get_expanded("core", "index_file") # open and go through lines with open(index_file, "r") as f: for line in f.readlines(): line = line.strip() if line.count(":") >= 2: modid, family, params = line.split(":", 2) if family == task_family: return modid, family, params return None
Returns module id, task family and space-separated parameters in a tuple for a task given by *task_family* from the *index_file*. When *None*, the *index_file* refers to the default as defined in :py:mod:`law.config`. Returns *None* when the task could not be found.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/cli/run.py#L78-L97
riga/law
law/cli/cli.py
run
def run(): """ Entry point to the law cli. Sets up all parsers, parses all arguments, and executes the requested subprogram. """ # setup the main parser and sub parsers parser = ArgumentParser(prog="law", description="The law command line tool.") sub_parsers = parser.add_subparsers(help="subcommands", dest="command") # add main arguments parser.add_argument("--version", "-V", action="version", version=law.__version__) # setup all progs mods = {} for prog in progs: mods[prog] = import_module("law.cli." + prog) mods[prog].setup_parser(sub_parsers) # parse args and dispatch execution if len(sys.argv) >= 2 and sys.argv[1] in forward_progs: args = parser.parse_args(sys.argv[1:3]) else: args = parser.parse_args() if args.command: mods[args.command].execute(args) else: parser.print_help()
python
def run(): """ Entry point to the law cli. Sets up all parsers, parses all arguments, and executes the requested subprogram. """ # setup the main parser and sub parsers parser = ArgumentParser(prog="law", description="The law command line tool.") sub_parsers = parser.add_subparsers(help="subcommands", dest="command") # add main arguments parser.add_argument("--version", "-V", action="version", version=law.__version__) # setup all progs mods = {} for prog in progs: mods[prog] = import_module("law.cli." + prog) mods[prog].setup_parser(sub_parsers) # parse args and dispatch execution if len(sys.argv) >= 2 and sys.argv[1] in forward_progs: args = parser.parse_args(sys.argv[1:3]) else: args = parser.parse_args() if args.command: mods[args.command].execute(args) else: parser.print_help()
Entry point to the law cli. Sets up all parsers, parses all arguments, and executes the requested subprogram.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/cli/cli.py#L22-L49
riga/law
law/job/dashboard.py
cache_by_status
def cache_by_status(func): """ Decorator for :py:meth:`BaseJobDashboard.publish` (and inheriting classes) that caches the last published status to decide if the a new publication is necessary or not. When the status did not change since the last call, the actual publish method is not invoked and *None* is returned. """ @functools.wraps(func) def wrapper(self, job_data, event, job_num, *args, **kwargs): job_id = job_data["job_id"] dashboard_status = self.map_status(job_data.get("status"), event) # nothing to do when the status is invalid or did not change if not dashboard_status or self._last_states.get(job_id) == dashboard_status: return None # set the new status self._last_states[job_id] = dashboard_status return func(self, job_data, event, job_num, *args, **kwargs) return wrapper
python
def cache_by_status(func): """ Decorator for :py:meth:`BaseJobDashboard.publish` (and inheriting classes) that caches the last published status to decide if the a new publication is necessary or not. When the status did not change since the last call, the actual publish method is not invoked and *None* is returned. """ @functools.wraps(func) def wrapper(self, job_data, event, job_num, *args, **kwargs): job_id = job_data["job_id"] dashboard_status = self.map_status(job_data.get("status"), event) # nothing to do when the status is invalid or did not change if not dashboard_status or self._last_states.get(job_id) == dashboard_status: return None # set the new status self._last_states[job_id] = dashboard_status return func(self, job_data, event, job_num, *args, **kwargs) return wrapper
Decorator for :py:meth:`BaseJobDashboard.publish` (and inheriting classes) that caches the last published status to decide if the a new publication is necessary or not. When the status did not change since the last call, the actual publish method is not invoked and *None* is returned.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/job/dashboard.py#L19-L39
riga/law
law/cli/software.py
setup_parser
def setup_parser(sub_parsers): """ Sets up the command line parser for the *software* subprogram and adds it to *sub_parsers*. """ parser = sub_parsers.add_parser("software", prog="law software", description="Create or update" " the law software cache ({}). This is only required for some sandboxes that need to" " forward software into containers.".format(get_sw_dir())) parser.add_argument("--remove", "-r", action="store_true", help="remove the software cache" " directory and exit") parser.add_argument("--location", "-l", action="store_true", help="print the location of the" " software cache directory and exit")
python
def setup_parser(sub_parsers): """ Sets up the command line parser for the *software* subprogram and adds it to *sub_parsers*. """ parser = sub_parsers.add_parser("software", prog="law software", description="Create or update" " the law software cache ({}). This is only required for some sandboxes that need to" " forward software into containers.".format(get_sw_dir())) parser.add_argument("--remove", "-r", action="store_true", help="remove the software cache" " directory and exit") parser.add_argument("--location", "-l", action="store_true", help="print the location of the" " software cache directory and exit")
Sets up the command line parser for the *software* subprogram and adds it to *sub_parsers*.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/cli/software.py#L30-L41
riga/law
law/cli/software.py
execute
def execute(args): """ Executes the *software* subprogram with parsed commandline *args*. """ sw_dir = get_sw_dir() # just print the cache location? if args.location: print(sw_dir) return # just remove the current software cache? if args.remove: remove_software_cache(sw_dir) return # rebuild the software cache build_software_cache(sw_dir)
python
def execute(args): """ Executes the *software* subprogram with parsed commandline *args*. """ sw_dir = get_sw_dir() # just print the cache location? if args.location: print(sw_dir) return # just remove the current software cache? if args.remove: remove_software_cache(sw_dir) return # rebuild the software cache build_software_cache(sw_dir)
Executes the *software* subprogram with parsed commandline *args*.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/cli/software.py#L44-L61
riga/law
law/cli/software.py
build_software_cache
def build_software_cache(sw_dir=None): """ Builds up the software cache directory at *sw_dir* by simply copying all required python modules. *sw_dir* is evaluated with :py:func:`get_sw_dir`. """ # ensure the cache is empty sw_dir = get_sw_dir(sw_dir) remove_software_cache(sw_dir) os.makedirs(sw_dir) # reload dependencies to find the proper module paths reload_dependencies(force=True) for mod in deps: path = os.path.dirname(mod.__file__) name, ext = os.path.splitext(os.path.basename(mod.__file__)) # single file or module? if name == "__init__": # copy the entire module name = os.path.basename(path) shutil.copytree(path, os.path.join(sw_dir, name)) else: shutil.copy2(os.path.join(path, name + ".py"), sw_dir)
python
def build_software_cache(sw_dir=None): """ Builds up the software cache directory at *sw_dir* by simply copying all required python modules. *sw_dir* is evaluated with :py:func:`get_sw_dir`. """ # ensure the cache is empty sw_dir = get_sw_dir(sw_dir) remove_software_cache(sw_dir) os.makedirs(sw_dir) # reload dependencies to find the proper module paths reload_dependencies(force=True) for mod in deps: path = os.path.dirname(mod.__file__) name, ext = os.path.splitext(os.path.basename(mod.__file__)) # single file or module? if name == "__init__": # copy the entire module name = os.path.basename(path) shutil.copytree(path, os.path.join(sw_dir, name)) else: shutil.copy2(os.path.join(path, name + ".py"), sw_dir)
Builds up the software cache directory at *sw_dir* by simply copying all required python modules. *sw_dir* is evaluated with :py:func:`get_sw_dir`.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/cli/software.py#L64-L86
riga/law
law/cli/software.py
remove_software_cache
def remove_software_cache(sw_dir=None): """ Removes the software cache directory at *sw_dir* which is evaluated with :py:func:`get_sw_dir`. """ sw_dir = get_sw_dir(sw_dir) if os.path.exists(sw_dir): shutil.rmtree(sw_dir)
python
def remove_software_cache(sw_dir=None): """ Removes the software cache directory at *sw_dir* which is evaluated with :py:func:`get_sw_dir`. """ sw_dir = get_sw_dir(sw_dir) if os.path.exists(sw_dir): shutil.rmtree(sw_dir)
Removes the software cache directory at *sw_dir* which is evaluated with :py:func:`get_sw_dir`.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/cli/software.py#L89-L95
riga/law
law/cli/software.py
reload_dependencies
def reload_dependencies(force=False): """ Reloads all python modules that law depends on. Currently, this is just *luigi* and *six*. Unless *force* is *True*, multiple calls to this function will not have any effect. """ global _reloaded_deps if _reloaded_deps and not force: return _reloaded_deps = True for mod in deps: six.moves.reload_module(mod) logger.debug("reloaded module '{}'".format(mod))
python
def reload_dependencies(force=False): """ Reloads all python modules that law depends on. Currently, this is just *luigi* and *six*. Unless *force* is *True*, multiple calls to this function will not have any effect. """ global _reloaded_deps if _reloaded_deps and not force: return _reloaded_deps = True for mod in deps: six.moves.reload_module(mod) logger.debug("reloaded module '{}'".format(mod))
Reloads all python modules that law depends on. Currently, this is just *luigi* and *six*. Unless *force* is *True*, multiple calls to this function will not have any effect.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/cli/software.py#L98-L111
riga/law
law/cli/software.py
use_software_cache
def use_software_cache(sw_dir=None, reload_deps=False): """ Adjusts ``sys.path`` so that the cached software at *sw_dir* is used. *sw_dir* is evaluated with :py:func:`get_sw_dir`. When *reload_deps* is *True*, :py:func:`reload_dependencies` is invoked. """ sw_dir = get_sw_dir(sw_dir) if os.path.exists(sw_dir): sys.path.insert(1, sw_dir) if reload_deps: reload_dependencies()
python
def use_software_cache(sw_dir=None, reload_deps=False): """ Adjusts ``sys.path`` so that the cached software at *sw_dir* is used. *sw_dir* is evaluated with :py:func:`get_sw_dir`. When *reload_deps* is *True*, :py:func:`reload_dependencies` is invoked. """ sw_dir = get_sw_dir(sw_dir) if os.path.exists(sw_dir): sys.path.insert(1, sw_dir) if reload_deps: reload_dependencies()
Adjusts ``sys.path`` so that the cached software at *sw_dir* is used. *sw_dir* is evaluated with :py:func:`get_sw_dir`. When *reload_deps* is *True*, :py:func:`reload_dependencies` is invoked.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/cli/software.py#L114-L124
riga/law
law/cli/software.py
get_sw_dir
def get_sw_dir(sw_dir=None): """ Returns the software directory defined in the ``config.software_dir`` config. When *sw_dir* is not *None*, it is expanded and returned instead. """ if sw_dir is None: sw_dir = Config.instance().get("core", "software_dir") sw_dir = os.path.expandvars(os.path.expanduser(sw_dir)) return sw_dir
python
def get_sw_dir(sw_dir=None): """ Returns the software directory defined in the ``config.software_dir`` config. When *sw_dir* is not *None*, it is expanded and returned instead. """ if sw_dir is None: sw_dir = Config.instance().get("core", "software_dir") sw_dir = os.path.expandvars(os.path.expanduser(sw_dir)) return sw_dir
Returns the software directory defined in the ``config.software_dir`` config. When *sw_dir* is not *None*, it is expanded and returned instead.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/cli/software.py#L127-L137
riga/law
law/contrib/root/formatter.py
GuardedTFile
def GuardedTFile(*args, **kwargs): """ Factory function that lazily creates the guarded TFile class, and creates and returns an instance with all passed *args* and *kwargs*. This is required as we do not want to import ROOT in the global scope. """ global guarded_tfile_cls if not guarded_tfile_cls: import ROOT class GuardedTFile(ROOT.TFile): def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): if self.IsOpen(): self.Close() guarded_tfile_cls = GuardedTFile return guarded_tfile_cls(*args, **kwargs)
python
def GuardedTFile(*args, **kwargs): """ Factory function that lazily creates the guarded TFile class, and creates and returns an instance with all passed *args* and *kwargs*. This is required as we do not want to import ROOT in the global scope. """ global guarded_tfile_cls if not guarded_tfile_cls: import ROOT class GuardedTFile(ROOT.TFile): def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): if self.IsOpen(): self.Close() guarded_tfile_cls = GuardedTFile return guarded_tfile_cls(*args, **kwargs)
Factory function that lazily creates the guarded TFile class, and creates and returns an instance with all passed *args* and *kwargs*. This is required as we do not want to import ROOT in the global scope.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/contrib/root/formatter.py#L15-L37
riga/law
law/workflow/base.py
workflow_property
def workflow_property(func): """ Decorator to declare a property that is stored only on a workflow but makes it also accessible from branch tasks. Internally, branch tasks are re-instantiated with ``branch=-1``, and its decorated property is invoked. You might want to use this decorator in case of a property that is common (and mutable) to a workflow and all its branch tasks, e.g. for static data. Example: .. code-block:: python class MyTask(Workflow): def __init__(self, *args, **kwargs): super(MyTask, self).__init__(*args, **kwargs) if self.is_workflow(): self._common_data = some_demanding_computation() @workflow_property def common_data(self): # this method is always called with *self* is the *workflow* return self._common_data """ @functools.wraps(func) def wrapper(self): return func(self.as_workflow()) return property(wrapper)
python
def workflow_property(func): """ Decorator to declare a property that is stored only on a workflow but makes it also accessible from branch tasks. Internally, branch tasks are re-instantiated with ``branch=-1``, and its decorated property is invoked. You might want to use this decorator in case of a property that is common (and mutable) to a workflow and all its branch tasks, e.g. for static data. Example: .. code-block:: python class MyTask(Workflow): def __init__(self, *args, **kwargs): super(MyTask, self).__init__(*args, **kwargs) if self.is_workflow(): self._common_data = some_demanding_computation() @workflow_property def common_data(self): # this method is always called with *self* is the *workflow* return self._common_data """ @functools.wraps(func) def wrapper(self): return func(self.as_workflow()) return property(wrapper)
Decorator to declare a property that is stored only on a workflow but makes it also accessible from branch tasks. Internally, branch tasks are re-instantiated with ``branch=-1``, and its decorated property is invoked. You might want to use this decorator in case of a property that is common (and mutable) to a workflow and all its branch tasks, e.g. for static data. Example: .. code-block:: python class MyTask(Workflow): def __init__(self, *args, **kwargs): super(MyTask, self).__init__(*args, **kwargs) if self.is_workflow(): self._common_data = some_demanding_computation() @workflow_property def common_data(self): # this method is always called with *self* is the *workflow* return self._common_data
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/base.py#L129-L155
riga/law
law/workflow/base.py
cached_workflow_property
def cached_workflow_property(func=None, attr=None, setter=True): """ Decorator to declare an attribute that is stored only on a workflow and also cached for subsequent calls. Therefore, the decorated method is expected to (lazily) provide the value to cache. The resulting value is stored as ``_workflow_cached_<func.__name__>`` on the workflow, which can be overwritten by setting the *attr* argument. By default, a setter is provded to overwrite the cache value. Set *setter* to *False* to disable this feature. Example: .. code-block:: python class MyTask(Workflow): @cached_workflow_property def common_data(self): # this method is always called with *self* is the *workflow* return some_demanding_computation() @cached_workflow_property(attr="my_own_property", setter=False) def common_data2(self): return some_other_computation() """ def wrapper(func): _attr = attr or "_workflow_cached_" + func.__name__ @functools.wraps(func) def getter(self): wf = self.as_workflow() if not hasattr(wf, _attr): setattr(wf, _attr, func(wf)) return getattr(wf, _attr) _setter = None if setter: def _setter(self, value): wf = self.as_workflow() setattr(wf, _attr, value) _setter.__name__ = func.__name__ return property(fget=getter, fset=_setter) return wrapper if not func else wrapper(func)
python
def cached_workflow_property(func=None, attr=None, setter=True): """ Decorator to declare an attribute that is stored only on a workflow and also cached for subsequent calls. Therefore, the decorated method is expected to (lazily) provide the value to cache. The resulting value is stored as ``_workflow_cached_<func.__name__>`` on the workflow, which can be overwritten by setting the *attr* argument. By default, a setter is provded to overwrite the cache value. Set *setter* to *False* to disable this feature. Example: .. code-block:: python class MyTask(Workflow): @cached_workflow_property def common_data(self): # this method is always called with *self* is the *workflow* return some_demanding_computation() @cached_workflow_property(attr="my_own_property", setter=False) def common_data2(self): return some_other_computation() """ def wrapper(func): _attr = attr or "_workflow_cached_" + func.__name__ @functools.wraps(func) def getter(self): wf = self.as_workflow() if not hasattr(wf, _attr): setattr(wf, _attr, func(wf)) return getattr(wf, _attr) _setter = None if setter: def _setter(self, value): wf = self.as_workflow() setattr(wf, _attr, value) _setter.__name__ = func.__name__ return property(fget=getter, fset=_setter) return wrapper if not func else wrapper(func)
Decorator to declare an attribute that is stored only on a workflow and also cached for subsequent calls. Therefore, the decorated method is expected to (lazily) provide the value to cache. The resulting value is stored as ``_workflow_cached_<func.__name__>`` on the workflow, which can be overwritten by setting the *attr* argument. By default, a setter is provded to overwrite the cache value. Set *setter* to *False* to disable this feature. Example: .. code-block:: python class MyTask(Workflow): @cached_workflow_property def common_data(self): # this method is always called with *self* is the *workflow* return some_demanding_computation() @cached_workflow_property(attr="my_own_property", setter=False) def common_data2(self): return some_other_computation()
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/base.py#L158-L199
riga/law
law/workflow/base.py
BaseWorkflowProxy.complete
def complete(self): """ Custom completion check that invokes the task's *workflow_complete* if it is callable, or just does the default completion check otherwise. """ if callable(self.task.workflow_complete): return self.task.workflow_complete() else: return super(BaseWorkflowProxy, self).complete()
python
def complete(self): """ Custom completion check that invokes the task's *workflow_complete* if it is callable, or just does the default completion check otherwise. """ if callable(self.task.workflow_complete): return self.task.workflow_complete() else: return super(BaseWorkflowProxy, self).complete()
Custom completion check that invokes the task's *workflow_complete* if it is callable, or just does the default completion check otherwise.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/base.py#L68-L76
riga/law
law/workflow/base.py
BaseWorkflowProxy.requires
def requires(self): """ Returns the default workflow requirements in an ordered dictionary, which is updated with the return value of the task's *workflow_requires* method. """ reqs = OrderedDict() reqs.update(self.task.workflow_requires()) return reqs
python
def requires(self): """ Returns the default workflow requirements in an ordered dictionary, which is updated with the return value of the task's *workflow_requires* method. """ reqs = OrderedDict() reqs.update(self.task.workflow_requires()) return reqs
Returns the default workflow requirements in an ordered dictionary, which is updated with the return value of the task's *workflow_requires* method.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/base.py#L78-L85
riga/law
law/workflow/base.py
BaseWorkflowProxy.output
def output(self): """ Returns the default workflow outputs in an ordered dictionary. At the moment this is just the collection of outputs of the branch tasks, stored with the key ``"collection"``. """ if self.task.target_collection_cls is not None: cls = self.task.target_collection_cls elif self.task.outputs_siblings: cls = SiblingFileCollection else: cls = TargetCollection targets = luigi.task.getpaths(self.task.get_branch_tasks()) collection = cls(targets, threshold=self.threshold(len(targets))) return OrderedDict([("collection", collection)])
python
def output(self): """ Returns the default workflow outputs in an ordered dictionary. At the moment this is just the collection of outputs of the branch tasks, stored with the key ``"collection"``. """ if self.task.target_collection_cls is not None: cls = self.task.target_collection_cls elif self.task.outputs_siblings: cls = SiblingFileCollection else: cls = TargetCollection targets = luigi.task.getpaths(self.task.get_branch_tasks()) collection = cls(targets, threshold=self.threshold(len(targets))) return OrderedDict([("collection", collection)])
Returns the default workflow outputs in an ordered dictionary. At the moment this is just the collection of outputs of the branch tasks, stored with the key ``"collection"``.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/base.py#L87-L102
riga/law
law/workflow/base.py
BaseWorkflowProxy.threshold
def threshold(self, n=None): """ Returns the threshold number of tasks that need to be complete in order to consider the workflow as being complete itself. This takes into account the :py:attr:`law.BaseWorkflow.acceptance` parameter of the workflow. The threshold is passed to the :py:class:`law.TargetCollection` (or :py:class:`law.SiblingFileCollection`) within :py:meth:`output`. By default, the maximum number of tasks is taken from the length of the branch map. For performance purposes, you can set this value, *n*, directly. """ if n is None: n = len(self.task.branch_map()) acceptance = self.task.acceptance return (acceptance * n) if acceptance <= 1 else acceptance
python
def threshold(self, n=None): """ Returns the threshold number of tasks that need to be complete in order to consider the workflow as being complete itself. This takes into account the :py:attr:`law.BaseWorkflow.acceptance` parameter of the workflow. The threshold is passed to the :py:class:`law.TargetCollection` (or :py:class:`law.SiblingFileCollection`) within :py:meth:`output`. By default, the maximum number of tasks is taken from the length of the branch map. For performance purposes, you can set this value, *n*, directly. """ if n is None: n = len(self.task.branch_map()) acceptance = self.task.acceptance return (acceptance * n) if acceptance <= 1 else acceptance
Returns the threshold number of tasks that need to be complete in order to consider the workflow as being complete itself. This takes into account the :py:attr:`law.BaseWorkflow.acceptance` parameter of the workflow. The threshold is passed to the :py:class:`law.TargetCollection` (or :py:class:`law.SiblingFileCollection`) within :py:meth:`output`. By default, the maximum number of tasks is taken from the length of the branch map. For performance purposes, you can set this value, *n*, directly.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/base.py#L104-L117
riga/law
law/workflow/base.py
BaseWorkflowProxy.get_prefixed_config
def get_prefixed_config(self, section, option, **kwargs): """ TODO. """ cfg = Config.instance() default = cfg.get_expanded(section, option, **kwargs) return cfg.get_expanded(section, "{}_{}".format(self.workflow_type, option), default=default, **kwargs)
python
def get_prefixed_config(self, section, option, **kwargs): """ TODO. """ cfg = Config.instance() default = cfg.get_expanded(section, option, **kwargs) return cfg.get_expanded(section, "{}_{}".format(self.workflow_type, option), default=default, **kwargs)
TODO.
https://github.com/riga/law/blob/479f84ce06ecf3bafe9d33cb7b8fc52e39fb80a1/law/workflow/base.py#L119-L126
alexandrovteam/pyimzML
pyimzml/ImzMLParser.py
getionimage
def getionimage(p, mz_value, tol=0.1, z=1, reduce_func=sum): """ Get an image representation of the intensity distribution of the ion with specified m/z value. By default, the intensity values within the tolerance region are summed. :param p: the ImzMLParser (or anything else with similar attributes) for the desired dataset :param mz_value: m/z value for which the ion image shall be returned :param tol: Absolute tolerance for the m/z value, such that all ions with values mz_value-|tol| <= x <= mz_value+|tol| are included. Defaults to 0.1 :param z: z Value if spectrogram is 3-dimensional. :param reduce_func: the bahaviour for reducing the intensities between mz_value-|tol| and mz_value+|tol| to a single value. Must be a function that takes a sequence as input and outputs a number. By default, the values are summed. :return: numpy matrix with each element representing the ion intensity in this pixel. Can be easily plotted with matplotlib """ tol = abs(tol) im = np.zeros((p.imzmldict["max count of pixels y"], p.imzmldict["max count of pixels x"])) for i, (x, y, z_) in enumerate(p.coordinates): if z_ == 0: UserWarning("z coordinate = 0 present, if you're getting blank images set getionimage(.., .., z=0)") if z_ == z: mzs, ints = map(lambda x: np.asarray(x), p.getspectrum(i)) min_i, max_i = _bisect_spectrum(mzs, mz_value, tol) im[y - 1, x - 1] = reduce_func(ints[min_i:max_i+1]) return im
python
def getionimage(p, mz_value, tol=0.1, z=1, reduce_func=sum): """ Get an image representation of the intensity distribution of the ion with specified m/z value. By default, the intensity values within the tolerance region are summed. :param p: the ImzMLParser (or anything else with similar attributes) for the desired dataset :param mz_value: m/z value for which the ion image shall be returned :param tol: Absolute tolerance for the m/z value, such that all ions with values mz_value-|tol| <= x <= mz_value+|tol| are included. Defaults to 0.1 :param z: z Value if spectrogram is 3-dimensional. :param reduce_func: the bahaviour for reducing the intensities between mz_value-|tol| and mz_value+|tol| to a single value. Must be a function that takes a sequence as input and outputs a number. By default, the values are summed. :return: numpy matrix with each element representing the ion intensity in this pixel. Can be easily plotted with matplotlib """ tol = abs(tol) im = np.zeros((p.imzmldict["max count of pixels y"], p.imzmldict["max count of pixels x"])) for i, (x, y, z_) in enumerate(p.coordinates): if z_ == 0: UserWarning("z coordinate = 0 present, if you're getting blank images set getionimage(.., .., z=0)") if z_ == z: mzs, ints = map(lambda x: np.asarray(x), p.getspectrum(i)) min_i, max_i = _bisect_spectrum(mzs, mz_value, tol) im[y - 1, x - 1] = reduce_func(ints[min_i:max_i+1]) return im
Get an image representation of the intensity distribution of the ion with specified m/z value. By default, the intensity values within the tolerance region are summed. :param p: the ImzMLParser (or anything else with similar attributes) for the desired dataset :param mz_value: m/z value for which the ion image shall be returned :param tol: Absolute tolerance for the m/z value, such that all ions with values mz_value-|tol| <= x <= mz_value+|tol| are included. Defaults to 0.1 :param z: z Value if spectrogram is 3-dimensional. :param reduce_func: the bahaviour for reducing the intensities between mz_value-|tol| and mz_value+|tol| to a single value. Must be a function that takes a sequence as input and outputs a number. By default, the values are summed. :return: numpy matrix with each element representing the ion intensity in this pixel. Can be easily plotted with matplotlib
https://github.com/alexandrovteam/pyimzML/blob/baae0bea7279f9439113d6b2f61be528c0462b3f/pyimzml/ImzMLParser.py#L335-L368
alexandrovteam/pyimzML
pyimzml/ImzMLParser.py
ImzMLParser.__iter_read_spectrum_meta
def __iter_read_spectrum_meta(self): """ This method should only be called by __init__. Reads the data formats, coordinates and offsets from the .imzML file and initializes the respective attributes. While traversing the XML tree, the per-spectrum metadata is pruned, i.e. the <spectrumList> element(s) are left behind empty. Supported accession values for the number formats: "MS:1000521", "MS:1000523", "IMS:1000141" or "IMS:1000142". The string values are "32-bit float", "64-bit float", "32-bit integer", "64-bit integer". """ mz_group = int_group = None slist = None elem_iterator = self.iterparse(self.filename, events=("start", "end")) if sys.version_info > (3,): _, self.root = next(elem_iterator) else: _, self.root = elem_iterator.next() for event, elem in elem_iterator: if elem.tag == self.sl + "spectrumList" and event == "start": slist = elem elif elem.tag == self.sl + "spectrum" and event == "end": self.__process_spectrum(elem) slist.remove(elem) elif elem.tag == self.sl + "referenceableParamGroup" and event == "end": for param in elem: if param.attrib["name"] == "m/z array": self.mzGroupId = elem.attrib['id'] mz_group = elem elif param.attrib["name"] == "intensity array": self.intGroupId = elem.attrib['id'] int_group = elem self.__assign_precision(int_group, mz_group) self.__fix_offsets()
python
def __iter_read_spectrum_meta(self): """ This method should only be called by __init__. Reads the data formats, coordinates and offsets from the .imzML file and initializes the respective attributes. While traversing the XML tree, the per-spectrum metadata is pruned, i.e. the <spectrumList> element(s) are left behind empty. Supported accession values for the number formats: "MS:1000521", "MS:1000523", "IMS:1000141" or "IMS:1000142". The string values are "32-bit float", "64-bit float", "32-bit integer", "64-bit integer". """ mz_group = int_group = None slist = None elem_iterator = self.iterparse(self.filename, events=("start", "end")) if sys.version_info > (3,): _, self.root = next(elem_iterator) else: _, self.root = elem_iterator.next() for event, elem in elem_iterator: if elem.tag == self.sl + "spectrumList" and event == "start": slist = elem elif elem.tag == self.sl + "spectrum" and event == "end": self.__process_spectrum(elem) slist.remove(elem) elif elem.tag == self.sl + "referenceableParamGroup" and event == "end": for param in elem: if param.attrib["name"] == "m/z array": self.mzGroupId = elem.attrib['id'] mz_group = elem elif param.attrib["name"] == "intensity array": self.intGroupId = elem.attrib['id'] int_group = elem self.__assign_precision(int_group, mz_group) self.__fix_offsets()
This method should only be called by __init__. Reads the data formats, coordinates and offsets from the .imzML file and initializes the respective attributes. While traversing the XML tree, the per-spectrum metadata is pruned, i.e. the <spectrumList> element(s) are left behind empty. Supported accession values for the number formats: "MS:1000521", "MS:1000523", "IMS:1000141" or "IMS:1000142". The string values are "32-bit float", "64-bit float", "32-bit integer", "64-bit integer".
https://github.com/alexandrovteam/pyimzML/blob/baae0bea7279f9439113d6b2f61be528c0462b3f/pyimzml/ImzMLParser.py#L115-L148
alexandrovteam/pyimzML
pyimzml/ImzMLParser.py
ImzMLParser.__readimzmlmeta
def __readimzmlmeta(self): """ This method should only be called by __init__. Initializes the imzmldict with frequently used metadata from the .imzML file. This method reads only a subset of the available meta information and may be extended in the future. The keys are named similarly to the imzML names. Currently supported keys: "max dimension x", "max dimension y", "pixel size x", "pixel size y", "matrix solution concentration", "wavelength", "focus diameter x", "focus diameter y", "pulse energy", "pulse duration", "attenuation". If a key is not found in the XML tree, it will not be in the dict either. :return d: dict containing above mentioned meta data :rtype: dict :raises Warning: if an xml attribute has a number format different from the imzML specification """ d = {} scan_settings_list_elem = self.root.find('%sscanSettingsList' % self.sl) instrument_config_list_elem = self.root.find('%sinstrumentConfigurationList' % self.sl) supportedparams1 = [("max count of pixels x", int), ("max count of pixels y", int), ("max dimension x", int), ("max dimension y", int), ("pixel size x", float), ("pixel size y", float), ("matrix solution concentration", float)] supportedparams2 = [("wavelength", float), ("focus diameter x", float), ("focus diameter y", float), ("pulse energy", float), ("pulse duration", float), ("attenuation", float)] supportedaccessions1 = [("IMS:1000042", "value"), ("IMS:1000043", "value"), ("IMS:1000044", "value"), ("IMS:1000045", "value"), ("IMS:1000046", "value"), ("IMS:1000047", "value"), ("MS:1000835", "value")] supportedaccessions2 = [("MS:1000843", "value"), ("MS:1000844", "value"), ("MS:1000845", "value"), ("MS:1000846", "value"), ("MS:1000847", "value"), ("MS:1000848", "value")] for i in range(len(supportedparams1)): acc, attr = supportedaccessions1[i] elem = scan_settings_list_elem.find('.//%scvParam[@accession="%s"]' % (self.sl, acc)) if elem is None: break name, T = supportedparams1[i] try: d[name] = T(elem.attrib[attr]) except ValueError: warn(Warning('Wrong data type in XML file. Skipped attribute "%s"' % name)) for i in range(len(supportedparams2)): acc, attr = supportedaccessions2[i] elem = instrument_config_list_elem.find('.//%scvParam[@accession="%s"]' % (self.sl, acc)) if elem is None: break name, T = supportedparams2[i] try: d[name] = T(elem.attrib[attr]) except ValueError: warn(Warning('Wrong data type in XML file. Skipped attribute "%s"' % name)) return d
python
def __readimzmlmeta(self): """ This method should only be called by __init__. Initializes the imzmldict with frequently used metadata from the .imzML file. This method reads only a subset of the available meta information and may be extended in the future. The keys are named similarly to the imzML names. Currently supported keys: "max dimension x", "max dimension y", "pixel size x", "pixel size y", "matrix solution concentration", "wavelength", "focus diameter x", "focus diameter y", "pulse energy", "pulse duration", "attenuation". If a key is not found in the XML tree, it will not be in the dict either. :return d: dict containing above mentioned meta data :rtype: dict :raises Warning: if an xml attribute has a number format different from the imzML specification """ d = {} scan_settings_list_elem = self.root.find('%sscanSettingsList' % self.sl) instrument_config_list_elem = self.root.find('%sinstrumentConfigurationList' % self.sl) supportedparams1 = [("max count of pixels x", int), ("max count of pixels y", int), ("max dimension x", int), ("max dimension y", int), ("pixel size x", float), ("pixel size y", float), ("matrix solution concentration", float)] supportedparams2 = [("wavelength", float), ("focus diameter x", float), ("focus diameter y", float), ("pulse energy", float), ("pulse duration", float), ("attenuation", float)] supportedaccessions1 = [("IMS:1000042", "value"), ("IMS:1000043", "value"), ("IMS:1000044", "value"), ("IMS:1000045", "value"), ("IMS:1000046", "value"), ("IMS:1000047", "value"), ("MS:1000835", "value")] supportedaccessions2 = [("MS:1000843", "value"), ("MS:1000844", "value"), ("MS:1000845", "value"), ("MS:1000846", "value"), ("MS:1000847", "value"), ("MS:1000848", "value")] for i in range(len(supportedparams1)): acc, attr = supportedaccessions1[i] elem = scan_settings_list_elem.find('.//%scvParam[@accession="%s"]' % (self.sl, acc)) if elem is None: break name, T = supportedparams1[i] try: d[name] = T(elem.attrib[attr]) except ValueError: warn(Warning('Wrong data type in XML file. Skipped attribute "%s"' % name)) for i in range(len(supportedparams2)): acc, attr = supportedaccessions2[i] elem = instrument_config_list_elem.find('.//%scvParam[@accession="%s"]' % (self.sl, acc)) if elem is None: break name, T = supportedparams2[i] try: d[name] = T(elem.attrib[attr]) except ValueError: warn(Warning('Wrong data type in XML file. Skipped attribute "%s"' % name)) return d
This method should only be called by __init__. Initializes the imzmldict with frequently used metadata from the .imzML file. This method reads only a subset of the available meta information and may be extended in the future. The keys are named similarly to the imzML names. Currently supported keys: "max dimension x", "max dimension y", "pixel size x", "pixel size y", "matrix solution concentration", "wavelength", "focus diameter x", "focus diameter y", "pulse energy", "pulse duration", "attenuation". If a key is not found in the XML tree, it will not be in the dict either. :return d: dict containing above mentioned meta data :rtype: dict :raises Warning: if an xml attribute has a number format different from the imzML specification
https://github.com/alexandrovteam/pyimzML/blob/baae0bea7279f9439113d6b2f61be528c0462b3f/pyimzml/ImzMLParser.py#L210-L264
alexandrovteam/pyimzML
pyimzml/ImzMLParser.py
ImzMLParser.get_physical_coordinates
def get_physical_coordinates(self, i): """ For a pixel index i, return the real-world coordinates in nanometers. This is equivalent to multiplying the image coordinates of the given pixel with the pixel size. :param i: the pixel index :return: a tuple of x and y coordinates. :rtype: Tuple[float] :raises KeyError: if the .imzML file does not specify the attributes "pixel size x" and "pixel size y" """ try: pixel_size_x = self.imzmldict["pixel size x"] pixel_size_y = self.imzmldict["pixel size y"] except KeyError: raise KeyError("Could not find all pixel size attributes in imzML file") image_x, image_y = self.coordinates[i][:2] return image_x * pixel_size_x, image_y * pixel_size_y
python
def get_physical_coordinates(self, i): """ For a pixel index i, return the real-world coordinates in nanometers. This is equivalent to multiplying the image coordinates of the given pixel with the pixel size. :param i: the pixel index :return: a tuple of x and y coordinates. :rtype: Tuple[float] :raises KeyError: if the .imzML file does not specify the attributes "pixel size x" and "pixel size y" """ try: pixel_size_x = self.imzmldict["pixel size x"] pixel_size_y = self.imzmldict["pixel size y"] except KeyError: raise KeyError("Could not find all pixel size attributes in imzML file") image_x, image_y = self.coordinates[i][:2] return image_x * pixel_size_x, image_y * pixel_size_y
For a pixel index i, return the real-world coordinates in nanometers. This is equivalent to multiplying the image coordinates of the given pixel with the pixel size. :param i: the pixel index :return: a tuple of x and y coordinates. :rtype: Tuple[float] :raises KeyError: if the .imzML file does not specify the attributes "pixel size x" and "pixel size y"
https://github.com/alexandrovteam/pyimzML/blob/baae0bea7279f9439113d6b2f61be528c0462b3f/pyimzml/ImzMLParser.py#L266-L283
alexandrovteam/pyimzML
pyimzml/ImzMLParser.py
ImzMLParser.getspectrum
def getspectrum(self, index): """ Reads the spectrum at specified index from the .ibd file. :param index: Index of the desired spectrum in the .imzML file Output: mz_array: numpy.ndarray Sequence of m/z values representing the horizontal axis of the desired mass spectrum intensity_array: numpy.ndarray Sequence of intensity values corresponding to mz_array """ mz_bytes, intensity_bytes = self.get_spectrum_as_string(index) mz_array = np.frombuffer(mz_bytes, dtype=self.mzPrecision) intensity_array = np.frombuffer(intensity_bytes, dtype=self.intensityPrecision) return mz_array, intensity_array
python
def getspectrum(self, index): """ Reads the spectrum at specified index from the .ibd file. :param index: Index of the desired spectrum in the .imzML file Output: mz_array: numpy.ndarray Sequence of m/z values representing the horizontal axis of the desired mass spectrum intensity_array: numpy.ndarray Sequence of intensity values corresponding to mz_array """ mz_bytes, intensity_bytes = self.get_spectrum_as_string(index) mz_array = np.frombuffer(mz_bytes, dtype=self.mzPrecision) intensity_array = np.frombuffer(intensity_bytes, dtype=self.intensityPrecision) return mz_array, intensity_array
Reads the spectrum at specified index from the .ibd file. :param index: Index of the desired spectrum in the .imzML file Output: mz_array: numpy.ndarray Sequence of m/z values representing the horizontal axis of the desired mass spectrum intensity_array: numpy.ndarray Sequence of intensity values corresponding to mz_array
https://github.com/alexandrovteam/pyimzML/blob/baae0bea7279f9439113d6b2f61be528c0462b3f/pyimzml/ImzMLParser.py#L285-L303
alexandrovteam/pyimzML
pyimzml/ImzMLParser.py
ImzMLParser.get_spectrum_as_string
def get_spectrum_as_string(self, index): """ Reads m/z array and intensity array of the spectrum at specified location from the binary file as a byte string. The string can be unpacked by the struct module. To get the arrays as numbers, use getspectrum :param index: Index of the desired spectrum in the .imzML file :rtype: Tuple[str, str] Output: mz_string: string where each character represents a byte of the mz array of the spectrum intensity_string: string where each character represents a byte of the intensity array of the spectrum """ offsets = [self.mzOffsets[index], self.intensityOffsets[index]] lengths = [self.mzLengths[index], self.intensityLengths[index]] lengths[0] *= self.sizeDict[self.mzPrecision] lengths[1] *= self.sizeDict[self.intensityPrecision] self.m.seek(offsets[0]) mz_string = self.m.read(lengths[0]) self.m.seek(offsets[1]) intensity_string = self.m.read(lengths[1]) return mz_string, intensity_string
python
def get_spectrum_as_string(self, index): """ Reads m/z array and intensity array of the spectrum at specified location from the binary file as a byte string. The string can be unpacked by the struct module. To get the arrays as numbers, use getspectrum :param index: Index of the desired spectrum in the .imzML file :rtype: Tuple[str, str] Output: mz_string: string where each character represents a byte of the mz array of the spectrum intensity_string: string where each character represents a byte of the intensity array of the spectrum """ offsets = [self.mzOffsets[index], self.intensityOffsets[index]] lengths = [self.mzLengths[index], self.intensityLengths[index]] lengths[0] *= self.sizeDict[self.mzPrecision] lengths[1] *= self.sizeDict[self.intensityPrecision] self.m.seek(offsets[0]) mz_string = self.m.read(lengths[0]) self.m.seek(offsets[1]) intensity_string = self.m.read(lengths[1]) return mz_string, intensity_string
Reads m/z array and intensity array of the spectrum at specified location from the binary file as a byte string. The string can be unpacked by the struct module. To get the arrays as numbers, use getspectrum :param index: Index of the desired spectrum in the .imzML file :rtype: Tuple[str, str] Output: mz_string: string where each character represents a byte of the mz array of the spectrum intensity_string: string where each character represents a byte of the intensity array of the spectrum
https://github.com/alexandrovteam/pyimzML/blob/baae0bea7279f9439113d6b2f61be528c0462b3f/pyimzml/ImzMLParser.py#L305-L332
alexandrovteam/pyimzML
pyimzml/ImzMLWriter.py
ImzMLWriter._read_mz
def _read_mz(self, mz_offset, mz_len, mz_enc_len): '''reads a mz array from the currently open ibd file''' self.ibd.seek(mz_offset) data = self.ibd.read(mz_enc_len) self.ibd.seek(0, 2) data = self.mz_compression.decompress(data) return tuple(np.fromstring(data, dtype=self.mz_dtype))
python
def _read_mz(self, mz_offset, mz_len, mz_enc_len): '''reads a mz array from the currently open ibd file''' self.ibd.seek(mz_offset) data = self.ibd.read(mz_enc_len) self.ibd.seek(0, 2) data = self.mz_compression.decompress(data) return tuple(np.fromstring(data, dtype=self.mz_dtype))
reads a mz array from the currently open ibd file
https://github.com/alexandrovteam/pyimzML/blob/baae0bea7279f9439113d6b2f61be528c0462b3f/pyimzml/ImzMLWriter.py#L241-L247
alexandrovteam/pyimzML
pyimzml/ImzMLWriter.py
ImzMLWriter._get_previous_mz
def _get_previous_mz(self, mzs): '''given an mz array, return the mz_data (disk location) if the mz array was not previously written, write to disk first''' mzs = tuple(mzs) # must be hashable if mzs in self.lru_cache: return self.lru_cache[mzs] # mz not recognized ... check hash mz_hash = "%s-%s-%s" % (hash(mzs), sum(mzs), len(mzs)) if mz_hash in self.hashes: for mz_data in self.hashes[mz_hash]: test_mz = self._read_mz(*mz_data) if mzs == test_mz: self.lru_cache[test_mz] = mz_data return mz_data # hash not recognized # must be a new mz array ... write it, add it to lru_cache and hashes mz_data = self._encode_and_write(mzs, self.mz_dtype, self.mz_compression) self.hashes[mz_hash].append(mz_data) self.lru_cache[mzs] = mz_data return mz_data
python
def _get_previous_mz(self, mzs): '''given an mz array, return the mz_data (disk location) if the mz array was not previously written, write to disk first''' mzs = tuple(mzs) # must be hashable if mzs in self.lru_cache: return self.lru_cache[mzs] # mz not recognized ... check hash mz_hash = "%s-%s-%s" % (hash(mzs), sum(mzs), len(mzs)) if mz_hash in self.hashes: for mz_data in self.hashes[mz_hash]: test_mz = self._read_mz(*mz_data) if mzs == test_mz: self.lru_cache[test_mz] = mz_data return mz_data # hash not recognized # must be a new mz array ... write it, add it to lru_cache and hashes mz_data = self._encode_and_write(mzs, self.mz_dtype, self.mz_compression) self.hashes[mz_hash].append(mz_data) self.lru_cache[mzs] = mz_data return mz_data
given an mz array, return the mz_data (disk location) if the mz array was not previously written, write to disk first
https://github.com/alexandrovteam/pyimzML/blob/baae0bea7279f9439113d6b2f61be528c0462b3f/pyimzml/ImzMLWriter.py#L249-L269
alexandrovteam/pyimzML
pyimzml/ImzMLWriter.py
ImzMLWriter.addSpectrum
def addSpectrum(self, mzs, intensities, coords, userParams=[]): """ Add a mass spectrum to the file. :param mz: mz array :param intensities: intensity array :param coords: * 2-tuple of x and y position OR * 3-tuple of x, y, and z position note some applications want coords to be 1-indexed """ # must be rounded now to allow comparisons to later data # but don't waste CPU time in continuous mode since the data will not be used anyway if self.mode != "continuous" or self.first_mz is None: mzs = self.mz_compression.rounding(mzs) intensities = self.intensity_compression.rounding(intensities) if self.mode == "continuous": if self.first_mz is None: self.first_mz = self._encode_and_write(mzs, self.mz_dtype, self.mz_compression) mz_data = self.first_mz elif self.mode == "processed": mz_data = self._encode_and_write(mzs, self.mz_dtype, self.mz_compression) elif self.mode == "auto": mz_data = self._get_previous_mz(mzs) else: raise TypeError("Unknown mode: %s" % self.mode) mz_offset, mz_len, mz_enc_len = mz_data int_offset, int_len, int_enc_len = self._encode_and_write(intensities, self.intensity_dtype, self.intensity_compression) mz_min = np.min(mzs) mz_max = np.max(mzs) ix_max = np.argmax(intensities) mz_base = mzs[ix_max] int_base = intensities[ix_max] int_tic = np.sum(intensities) s = _Spectrum(coords, mz_len, mz_offset, mz_enc_len, int_len, int_offset, int_enc_len, mz_min, mz_max, mz_base, int_base, int_tic, userParams) self.spectra.append(s)
python
def addSpectrum(self, mzs, intensities, coords, userParams=[]): """ Add a mass spectrum to the file. :param mz: mz array :param intensities: intensity array :param coords: * 2-tuple of x and y position OR * 3-tuple of x, y, and z position note some applications want coords to be 1-indexed """ # must be rounded now to allow comparisons to later data # but don't waste CPU time in continuous mode since the data will not be used anyway if self.mode != "continuous" or self.first_mz is None: mzs = self.mz_compression.rounding(mzs) intensities = self.intensity_compression.rounding(intensities) if self.mode == "continuous": if self.first_mz is None: self.first_mz = self._encode_and_write(mzs, self.mz_dtype, self.mz_compression) mz_data = self.first_mz elif self.mode == "processed": mz_data = self._encode_and_write(mzs, self.mz_dtype, self.mz_compression) elif self.mode == "auto": mz_data = self._get_previous_mz(mzs) else: raise TypeError("Unknown mode: %s" % self.mode) mz_offset, mz_len, mz_enc_len = mz_data int_offset, int_len, int_enc_len = self._encode_and_write(intensities, self.intensity_dtype, self.intensity_compression) mz_min = np.min(mzs) mz_max = np.max(mzs) ix_max = np.argmax(intensities) mz_base = mzs[ix_max] int_base = intensities[ix_max] int_tic = np.sum(intensities) s = _Spectrum(coords, mz_len, mz_offset, mz_enc_len, int_len, int_offset, int_enc_len, mz_min, mz_max, mz_base, int_base, int_tic, userParams) self.spectra.append(s)
Add a mass spectrum to the file. :param mz: mz array :param intensities: intensity array :param coords: * 2-tuple of x and y position OR * 3-tuple of x, y, and z position note some applications want coords to be 1-indexed
https://github.com/alexandrovteam/pyimzML/blob/baae0bea7279f9439113d6b2f61be528c0462b3f/pyimzml/ImzMLWriter.py#L271-L312
alexandrovteam/pyimzML
pyimzml/ImzMLWriter.py
ImzMLWriter.finish
def finish(self): '''alias of close()''' self.ibd.close() self._write_xml() self.xml.close()
python
def finish(self): '''alias of close()''' self.ibd.close() self._write_xml() self.xml.close()
alias of close()
https://github.com/alexandrovteam/pyimzML/blob/baae0bea7279f9439113d6b2f61be528c0462b3f/pyimzml/ImzMLWriter.py#L321-L325
michaeljoseph/changes
changes/__init__.py
initialise
def initialise(): """ Detects, prompts and initialises the project. Stores project and tool configuration in the `changes` module. """ global settings, project_settings # Global changes settings settings = Changes.load() # Project specific settings project_settings = Project.load(GitHubRepository(auth_token=settings.auth_token))
python
def initialise(): """ Detects, prompts and initialises the project. Stores project and tool configuration in the `changes` module. """ global settings, project_settings # Global changes settings settings = Changes.load() # Project specific settings project_settings = Project.load(GitHubRepository(auth_token=settings.auth_token))
Detects, prompts and initialises the project. Stores project and tool configuration in the `changes` module.
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/__init__.py#L21-L33
michaeljoseph/changes
changes/packaging.py
build_distributions
def build_distributions(context): """Builds package distributions""" rmtree('dist', ignore_errors=True) build_package_command = 'python setup.py clean sdist bdist_wheel' result = shell.dry_run(build_package_command, context.dry_run) packages = Path('dist').files() if not context.dry_run else "nothing" if not result: raise Exception('Error building packages: %s' % result) else: log.info('Built %s' % ', '.join(packages)) return packages
python
def build_distributions(context): """Builds package distributions""" rmtree('dist', ignore_errors=True) build_package_command = 'python setup.py clean sdist bdist_wheel' result = shell.dry_run(build_package_command, context.dry_run) packages = Path('dist').files() if not context.dry_run else "nothing" if not result: raise Exception('Error building packages: %s' % result) else: log.info('Built %s' % ', '.join(packages)) return packages
Builds package distributions
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/packaging.py#L10-L22
michaeljoseph/changes
changes/packaging.py
install_package
def install_package(context): """Attempts to install the sdist and wheel.""" if not context.dry_run and build_distributions(context): with util.mktmpdir() as tmp_dir: venv.create_venv(tmp_dir=tmp_dir) for distribution in Path('dist').files(): try: venv.install(distribution, tmp_dir) log.info('Successfully installed %s', distribution) if context.test_command and verification.run_test_command(context): log.info( 'Successfully ran test command: %s', context.test_command ) except Exception as e: raise Exception( 'Error installing distribution %s' % distribution, e ) else: log.info('Dry run, skipping installation')
python
def install_package(context): """Attempts to install the sdist and wheel.""" if not context.dry_run and build_distributions(context): with util.mktmpdir() as tmp_dir: venv.create_venv(tmp_dir=tmp_dir) for distribution in Path('dist').files(): try: venv.install(distribution, tmp_dir) log.info('Successfully installed %s', distribution) if context.test_command and verification.run_test_command(context): log.info( 'Successfully ran test command: %s', context.test_command ) except Exception as e: raise Exception( 'Error installing distribution %s' % distribution, e ) else: log.info('Dry run, skipping installation')
Attempts to install the sdist and wheel.
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/packaging.py#L26-L45
michaeljoseph/changes
changes/packaging.py
upload_package
def upload_package(context): """Uploads your project packages to pypi with twine.""" if not context.dry_run and build_distributions(context): upload_args = 'twine upload ' upload_args += ' '.join(Path('dist').files()) if context.pypi: upload_args += ' -r %s' % context.pypi upload_result = shell.dry_run(upload_args, context.dry_run) if not context.dry_run and not upload_result: raise Exception('Error uploading: %s' % upload_result) else: log.info( 'Successfully uploaded %s:%s', context.module_name, context.new_version ) else: log.info('Dry run, skipping package upload')
python
def upload_package(context): """Uploads your project packages to pypi with twine.""" if not context.dry_run and build_distributions(context): upload_args = 'twine upload ' upload_args += ' '.join(Path('dist').files()) if context.pypi: upload_args += ' -r %s' % context.pypi upload_result = shell.dry_run(upload_args, context.dry_run) if not context.dry_run and not upload_result: raise Exception('Error uploading: %s' % upload_result) else: log.info( 'Successfully uploaded %s:%s', context.module_name, context.new_version ) else: log.info('Dry run, skipping package upload')
Uploads your project packages to pypi with twine.
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/packaging.py#L49-L66
michaeljoseph/changes
changes/packaging.py
install_from_pypi
def install_from_pypi(context): """Attempts to install your package from pypi.""" tmp_dir = venv.create_venv() install_cmd = '%s/bin/pip install %s' % (tmp_dir, context.module_name) package_index = 'pypi' if context.pypi: install_cmd += '-i %s' % context.pypi package_index = context.pypi try: result = shell.dry_run(install_cmd, context.dry_run) if not context.dry_run and not result: log.error( 'Failed to install %s from %s', context.module_name, package_index ) else: log.info( 'Successfully installed %s from %s', context.module_name, package_index ) except Exception as e: error_msg = 'Error installing %s from %s' % (context.module_name, package_index) log.exception(error_msg) raise Exception(error_msg, e)
python
def install_from_pypi(context): """Attempts to install your package from pypi.""" tmp_dir = venv.create_venv() install_cmd = '%s/bin/pip install %s' % (tmp_dir, context.module_name) package_index = 'pypi' if context.pypi: install_cmd += '-i %s' % context.pypi package_index = context.pypi try: result = shell.dry_run(install_cmd, context.dry_run) if not context.dry_run and not result: log.error( 'Failed to install %s from %s', context.module_name, package_index ) else: log.info( 'Successfully installed %s from %s', context.module_name, package_index ) except Exception as e: error_msg = 'Error installing %s from %s' % (context.module_name, package_index) log.exception(error_msg) raise Exception(error_msg, e)
Attempts to install your package from pypi.
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/packaging.py#L69-L94
michaeljoseph/changes
changes/probe.py
report_and_raise
def report_and_raise(probe_name, probe_result, failure_msg): """Logs the probe result and raises on failure""" log.info('%s? %s' % (probe_name, probe_result)) if not probe_result: raise exceptions.ProbeException(failure_msg) else: return True
python
def report_and_raise(probe_name, probe_result, failure_msg): """Logs the probe result and raises on failure""" log.info('%s? %s' % (probe_name, probe_result)) if not probe_result: raise exceptions.ProbeException(failure_msg) else: return True
Logs the probe result and raises on failure
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/probe.py#L30-L36
michaeljoseph/changes
changes/probe.py
has_metadata
def has_metadata(python_module): """`<module_name>/__init__.py` with `__version__` and `__url__`""" init_path = '{}/__init__.py'.format(python_module) has_metadata = ( exists(init_path) and attributes.has_attribute(python_module, '__version__') and attributes.has_attribute(python_module, '__url__') ) return report_and_raise( 'Has module metadata', has_metadata, 'Your %s/__init__.py must contain __version__ and __url__ attributes', )
python
def has_metadata(python_module): """`<module_name>/__init__.py` with `__version__` and `__url__`""" init_path = '{}/__init__.py'.format(python_module) has_metadata = ( exists(init_path) and attributes.has_attribute(python_module, '__version__') and attributes.has_attribute(python_module, '__url__') ) return report_and_raise( 'Has module metadata', has_metadata, 'Your %s/__init__.py must contain __version__ and __url__ attributes', )
`<module_name>/__init__.py` with `__version__` and `__url__`
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/probe.py#L79-L91
michaeljoseph/changes
changes/probe.py
probe_project
def probe_project(python_module): """ Check if the project meets `changes` requirements. Complain and exit otherwise. """ log.info('Checking project for changes requirements.') return ( has_tools() and has_setup() and has_metadata(python_module) and has_test_runner() and has_readme() and has_changelog() )
python
def probe_project(python_module): """ Check if the project meets `changes` requirements. Complain and exit otherwise. """ log.info('Checking project for changes requirements.') return ( has_tools() and has_setup() and has_metadata(python_module) and has_test_runner() and has_readme() and has_changelog() )
Check if the project meets `changes` requirements. Complain and exit otherwise.
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/probe.py#L98-L111
michaeljoseph/changes
changes/flow.py
publish
def publish(context): """Publishes the project""" commit_version_change(context) if context.github: # github token project_settings = project_config(context.module_name) if not project_settings['gh_token']: click.echo('You need a GitHub token for changes to create a release.') click.pause( 'Press [enter] to launch the GitHub "New personal access ' 'token" page, to create a token for changes.' ) click.launch('https://github.com/settings/tokens/new') project_settings['gh_token'] = click.prompt('Enter your changes token') store_settings(context.module_name, project_settings) description = click.prompt('Describe this release') upload_url = create_github_release( context, project_settings['gh_token'], description ) upload_release_distributions( context, project_settings['gh_token'], build_distributions(context), upload_url, ) click.pause('Press [enter] to review and update your new release') click.launch( '{0}/releases/tag/{1}'.format(context.repo_url, context.new_version) ) else: tag_and_push(context)
python
def publish(context): """Publishes the project""" commit_version_change(context) if context.github: # github token project_settings = project_config(context.module_name) if not project_settings['gh_token']: click.echo('You need a GitHub token for changes to create a release.') click.pause( 'Press [enter] to launch the GitHub "New personal access ' 'token" page, to create a token for changes.' ) click.launch('https://github.com/settings/tokens/new') project_settings['gh_token'] = click.prompt('Enter your changes token') store_settings(context.module_name, project_settings) description = click.prompt('Describe this release') upload_url = create_github_release( context, project_settings['gh_token'], description ) upload_release_distributions( context, project_settings['gh_token'], build_distributions(context), upload_url, ) click.pause('Press [enter] to review and update your new release') click.launch( '{0}/releases/tag/{1}'.format(context.repo_url, context.new_version) ) else: tag_and_push(context)
Publishes the project
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/flow.py#L25-L60
michaeljoseph/changes
changes/flow.py
perform_release
def perform_release(context): """Executes the release process.""" try: run_tests() if not context.skip_changelog: generate_changelog(context) increment_version(context) build_distributions(context) install_package(context) upload_package(context) install_from_pypi(context) publish(context) except Exception: log.exception('Error releasing')
python
def perform_release(context): """Executes the release process.""" try: run_tests() if not context.skip_changelog: generate_changelog(context) increment_version(context) build_distributions(context) install_package(context) upload_package(context) install_from_pypi(context) publish(context) except Exception: log.exception('Error releasing')
Executes the release process.
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/flow.py#L63-L83
michaeljoseph/changes
changes/attributes.py
extract_attribute
def extract_attribute(module_name, attribute_name): """Extract metatdata property from a module""" with open('%s/__init__.py' % module_name) as input_file: for line in input_file: if line.startswith(attribute_name): return ast.literal_eval(line.split('=')[1].strip())
python
def extract_attribute(module_name, attribute_name): """Extract metatdata property from a module""" with open('%s/__init__.py' % module_name) as input_file: for line in input_file: if line.startswith(attribute_name): return ast.literal_eval(line.split('=')[1].strip())
Extract metatdata property from a module
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/attributes.py#L12-L17
michaeljoseph/changes
changes/attributes.py
replace_attribute
def replace_attribute(module_name, attribute_name, new_value, dry_run=True): """Update a metadata attribute""" init_file = '%s/__init__.py' % module_name _, tmp_file = tempfile.mkstemp() with open(init_file) as input_file: with open(tmp_file, 'w') as output_file: for line in input_file: if line.startswith(attribute_name): line = "%s = '%s'\n" % (attribute_name, new_value) output_file.write(line) if not dry_run: Path(tmp_file).copy(init_file) else: log.info(diff(tmp_file, init_file, retcode=None))
python
def replace_attribute(module_name, attribute_name, new_value, dry_run=True): """Update a metadata attribute""" init_file = '%s/__init__.py' % module_name _, tmp_file = tempfile.mkstemp() with open(init_file) as input_file: with open(tmp_file, 'w') as output_file: for line in input_file: if line.startswith(attribute_name): line = "%s = '%s'\n" % (attribute_name, new_value) output_file.write(line) if not dry_run: Path(tmp_file).copy(init_file) else: log.info(diff(tmp_file, init_file, retcode=None))
Update a metadata attribute
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/attributes.py#L20-L36
michaeljoseph/changes
changes/attributes.py
has_attribute
def has_attribute(module_name, attribute_name): """Is this attribute present?""" init_file = '%s/__init__.py' % module_name return any( [attribute_name in init_line for init_line in open(init_file).readlines()] )
python
def has_attribute(module_name, attribute_name): """Is this attribute present?""" init_file = '%s/__init__.py' % module_name return any( [attribute_name in init_line for init_line in open(init_file).readlines()] )
Is this attribute present?
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/attributes.py#L39-L44
michaeljoseph/changes
changes/prompt.py
choose_labels
def choose_labels(alternatives): """ Prompt the user select several labels from the provided alternatives. At least one label must be selected. :param list alternatives: Sequence of options that are available to select from :return: Several selected labels """ if not alternatives: raise ValueError if not isinstance(alternatives, list): raise TypeError choice_map = OrderedDict( ('{}'.format(i), value) for i, value in enumerate(alternatives, 1) ) # prepend a termination option input_terminator = '0' choice_map.update({input_terminator: '<done>'}) choice_map.move_to_end('0', last=False) choice_indexes = choice_map.keys() choice_lines = ['{} - {}'.format(*c) for c in choice_map.items()] prompt = '\n'.join( ( 'Select labels:', '\n'.join(choice_lines), 'Choose from {}'.format(', '.join(choice_indexes)), ) ) user_choices = set() user_choice = None while not user_choice == input_terminator: if user_choices: note('Selected labels: [{}]'.format(', '.join(user_choices))) user_choice = click.prompt( prompt, type=click.Choice(choice_indexes), default=input_terminator ) done = user_choice == input_terminator new_selection = user_choice not in user_choices nothing_selected = not user_choices if not done and new_selection: user_choices.add(choice_map[user_choice]) if done and nothing_selected: error('Please select at least one label') user_choice = None return user_choices
python
def choose_labels(alternatives): """ Prompt the user select several labels from the provided alternatives. At least one label must be selected. :param list alternatives: Sequence of options that are available to select from :return: Several selected labels """ if not alternatives: raise ValueError if not isinstance(alternatives, list): raise TypeError choice_map = OrderedDict( ('{}'.format(i), value) for i, value in enumerate(alternatives, 1) ) # prepend a termination option input_terminator = '0' choice_map.update({input_terminator: '<done>'}) choice_map.move_to_end('0', last=False) choice_indexes = choice_map.keys() choice_lines = ['{} - {}'.format(*c) for c in choice_map.items()] prompt = '\n'.join( ( 'Select labels:', '\n'.join(choice_lines), 'Choose from {}'.format(', '.join(choice_indexes)), ) ) user_choices = set() user_choice = None while not user_choice == input_terminator: if user_choices: note('Selected labels: [{}]'.format(', '.join(user_choices))) user_choice = click.prompt( prompt, type=click.Choice(choice_indexes), default=input_terminator ) done = user_choice == input_terminator new_selection = user_choice not in user_choices nothing_selected = not user_choices if not done and new_selection: user_choices.add(choice_map[user_choice]) if done and nothing_selected: error('Please select at least one label') user_choice = None return user_choices
Prompt the user select several labels from the provided alternatives. At least one label must be selected. :param list alternatives: Sequence of options that are available to select from :return: Several selected labels
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/prompt.py#L8-L63
michaeljoseph/changes
changes/cli.py
work_in
def work_in(dirname=None): """ Context manager version of os.chdir. When exited, returns to the working directory prior to entering. """ curdir = os.getcwd() try: if dirname is not None: os.chdir(dirname) requests_cache.configure(expire_after=60 * 10 * 10) changes.initialise() yield finally: os.chdir(curdir)
python
def work_in(dirname=None): """ Context manager version of os.chdir. When exited, returns to the working directory prior to entering. """ curdir = os.getcwd() try: if dirname is not None: os.chdir(dirname) requests_cache.configure(expire_after=60 * 10 * 10) changes.initialise() yield finally: os.chdir(curdir)
Context manager version of os.chdir. When exited, returns to the working directory prior to entering.
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/cli.py#L20-L36
michaeljoseph/changes
changes/cli.py
stage
def stage(draft, discard, repo_directory, release_name, release_description): """ Stages a release """ with work_in(repo_directory): if discard: stage_command.discard(release_name, release_description) else: stage_command.stage(draft, release_name, release_description)
python
def stage(draft, discard, repo_directory, release_name, release_description): """ Stages a release """ with work_in(repo_directory): if discard: stage_command.discard(release_name, release_description) else: stage_command.stage(draft, release_name, release_description)
Stages a release
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/cli.py#L85-L93
michaeljoseph/changes
changes/changelog.py
generate_changelog
def generate_changelog(context): """Generates an automatic changelog from your commit messages.""" changelog_content = [ '\n## [%s](%s/compare/%s...%s)\n\n' % ( context.new_version, context.repo_url, context.current_version, context.new_version, ) ] git_log_content = None git_log = 'log --oneline --no-merges --no-color'.split(' ') try: git_log_tag = git_log + ['%s..master' % context.current_version] git_log_content = git(git_log_tag) log.debug('content: %s' % git_log_content) except Exception: log.warn('Error diffing previous version, initial release') git_log_content = git(git_log) git_log_content = replace_sha_with_commit_link(context.repo_url, git_log_content) # turn change log entries into markdown bullet points if git_log_content: [ changelog_content.append('* %s\n' % line) if line else line for line in git_log_content[:-1] ] write_new_changelog( context.repo_url, 'CHANGELOG.md', changelog_content, dry_run=context.dry_run ) log.info('Added content to CHANGELOG.md') context.changelog_content = changelog_content
python
def generate_changelog(context): """Generates an automatic changelog from your commit messages.""" changelog_content = [ '\n## [%s](%s/compare/%s...%s)\n\n' % ( context.new_version, context.repo_url, context.current_version, context.new_version, ) ] git_log_content = None git_log = 'log --oneline --no-merges --no-color'.split(' ') try: git_log_tag = git_log + ['%s..master' % context.current_version] git_log_content = git(git_log_tag) log.debug('content: %s' % git_log_content) except Exception: log.warn('Error diffing previous version, initial release') git_log_content = git(git_log) git_log_content = replace_sha_with_commit_link(context.repo_url, git_log_content) # turn change log entries into markdown bullet points if git_log_content: [ changelog_content.append('* %s\n' % line) if line else line for line in git_log_content[:-1] ] write_new_changelog( context.repo_url, 'CHANGELOG.md', changelog_content, dry_run=context.dry_run ) log.info('Added content to CHANGELOG.md') context.changelog_content = changelog_content
Generates an automatic changelog from your commit messages.
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/changelog.py#L48-L83
michaeljoseph/changes
changes/util.py
extract
def extract(dictionary, keys): """ Extract only the specified keys from a dict :param dictionary: source dictionary :param keys: list of keys to extract :return dict: extracted dictionary """ return dict((k, dictionary[k]) for k in keys if k in dictionary)
python
def extract(dictionary, keys): """ Extract only the specified keys from a dict :param dictionary: source dictionary :param keys: list of keys to extract :return dict: extracted dictionary """ return dict((k, dictionary[k]) for k in keys if k in dictionary)
Extract only the specified keys from a dict :param dictionary: source dictionary :param keys: list of keys to extract :return dict: extracted dictionary
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/util.py#L6-L14
michaeljoseph/changes
changes/util.py
extract_arguments
def extract_arguments(arguments, long_keys, key_prefix='--'): """ :param arguments: dict of command line arguments """ long_arguments = extract(arguments, long_keys) return dict( [(key.replace(key_prefix, ''), value) for key, value in long_arguments.items()] )
python
def extract_arguments(arguments, long_keys, key_prefix='--'): """ :param arguments: dict of command line arguments """ long_arguments = extract(arguments, long_keys) return dict( [(key.replace(key_prefix, ''), value) for key, value in long_arguments.items()] )
:param arguments: dict of command line arguments
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/util.py#L17-L25
michaeljoseph/changes
changes/vcs.py
tag_and_push
def tag_and_push(context): """Tags your git repo with the new version number""" tag_option = '--annotate' if probe.has_signing_key(context): tag_option = '--sign' shell.dry_run( TAG_TEMPLATE % (tag_option, context.new_version, context.new_version), context.dry_run, ) shell.dry_run('git push --tags', context.dry_run)
python
def tag_and_push(context): """Tags your git repo with the new version number""" tag_option = '--annotate' if probe.has_signing_key(context): tag_option = '--sign' shell.dry_run( TAG_TEMPLATE % (tag_option, context.new_version, context.new_version), context.dry_run, ) shell.dry_run('git push --tags', context.dry_run)
Tags your git repo with the new version number
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/vcs.py#L30-L41
michaeljoseph/changes
changes/shell.py
dry_run
def dry_run(command, dry_run): """Executes a shell command unless the dry run option is set""" if not dry_run: cmd_parts = command.split(' ') # http://plumbum.readthedocs.org/en/latest/local_commands.html#run-and-popen return local[cmd_parts[0]](cmd_parts[1:]) else: log.info('Dry run of %s, skipping' % command) return True
python
def dry_run(command, dry_run): """Executes a shell command unless the dry run option is set""" if not dry_run: cmd_parts = command.split(' ') # http://plumbum.readthedocs.org/en/latest/local_commands.html#run-and-popen return local[cmd_parts[0]](cmd_parts[1:]) else: log.info('Dry run of %s, skipping' % command) return True
Executes a shell command unless the dry run option is set
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/shell.py#L8-L16
michaeljoseph/changes
changes/config.py
project_config
def project_config(): """Deprecated""" project_name = curdir config_path = Path(join(project_name, PROJECT_CONFIG_FILE)) if not exists(config_path): store_settings(DEFAULTS.copy()) return DEFAULTS return toml.load(io.open(config_path)) or {}
python
def project_config(): """Deprecated""" project_name = curdir config_path = Path(join(project_name, PROJECT_CONFIG_FILE)) if not exists(config_path): store_settings(DEFAULTS.copy()) return DEFAULTS return toml.load(io.open(config_path)) or {}
Deprecated
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/config.py#L195-L205
michaeljoseph/changes
changes/version.py
increment
def increment(version, major=False, minor=False, patch=True): """ Increment a semantic version :param version: str of the version to increment :param major: bool specifying major level version increment :param minor: bool specifying minor level version increment :param patch: bool specifying patch level version increment :return: str of the incremented version """ version = semantic_version.Version(version) if major: version.major += 1 version.minor = 0 version.patch = 0 elif minor: version.minor += 1 version.patch = 0 elif patch: version.patch += 1 return str(version)
python
def increment(version, major=False, minor=False, patch=True): """ Increment a semantic version :param version: str of the version to increment :param major: bool specifying major level version increment :param minor: bool specifying minor level version increment :param patch: bool specifying patch level version increment :return: str of the incremented version """ version = semantic_version.Version(version) if major: version.major += 1 version.minor = 0 version.patch = 0 elif minor: version.minor += 1 version.patch = 0 elif patch: version.patch += 1 return str(version)
Increment a semantic version :param version: str of the version to increment :param major: bool specifying major level version increment :param minor: bool specifying minor level version increment :param patch: bool specifying patch level version increment :return: str of the incremented version
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/version.py#L35-L56
michaeljoseph/changes
changes/version.py
increment_version
def increment_version(context): """Increments the __version__ attribute of your module's __init__.""" attributes.replace_attribute( context.module_name, '__version__', context.new_version, dry_run=context.dry_run ) log.info( 'Bumped version from %s to %s' % (context.current_version, context.new_version) )
python
def increment_version(context): """Increments the __version__ attribute of your module's __init__.""" attributes.replace_attribute( context.module_name, '__version__', context.new_version, dry_run=context.dry_run ) log.info( 'Bumped version from %s to %s' % (context.current_version, context.new_version) )
Increments the __version__ attribute of your module's __init__.
https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/version.py#L59-L67