repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
Erotemic/utool | utool/util_progress.py | ProgressIter.ensure_newline | def ensure_newline(self):
"""
use before any custom printing when using the progress iter to ensure
your print statement starts on a new line instead of at the end of a
progress line
"""
DECTCEM_SHOW = '\033[?25h' # show cursor
AT_END = DECTCEM_SHOW + '\n'
if not self._cursor_at_newline:
self.write(AT_END)
self._cursor_at_newline = True | python | def ensure_newline(self):
"""
use before any custom printing when using the progress iter to ensure
your print statement starts on a new line instead of at the end of a
progress line
"""
DECTCEM_SHOW = '\033[?25h' # show cursor
AT_END = DECTCEM_SHOW + '\n'
if not self._cursor_at_newline:
self.write(AT_END)
self._cursor_at_newline = True | [
"def",
"ensure_newline",
"(",
"self",
")",
":",
"DECTCEM_SHOW",
"=",
"'\\033[?25h'",
"# show cursor",
"AT_END",
"=",
"DECTCEM_SHOW",
"+",
"'\\n'",
"if",
"not",
"self",
".",
"_cursor_at_newline",
":",
"self",
".",
"write",
"(",
"AT_END",
")",
"self",
".",
"_cursor_at_newline",
"=",
"True"
] | use before any custom printing when using the progress iter to ensure
your print statement starts on a new line instead of at the end of a
progress line | [
"use",
"before",
"any",
"custom",
"printing",
"when",
"using",
"the",
"progress",
"iter",
"to",
"ensure",
"your",
"print",
"statement",
"starts",
"on",
"a",
"new",
"line",
"instead",
"of",
"at",
"the",
"end",
"of",
"a",
"progress",
"line"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_progress.py#L796-L806 | train |
Erotemic/utool | utool/util_progress.py | ProgressIter._get_timethresh_heuristics | def _get_timethresh_heuristics(self):
"""
resonably decent hueristics for how much time to wait before
updating progress.
"""
if self.length > 1E5:
time_thresh = 2.5
elif self.length > 1E4:
time_thresh = 2.0
elif self.length > 1E3:
time_thresh = 1.0
else:
time_thresh = 0.5
return time_thresh | python | def _get_timethresh_heuristics(self):
"""
resonably decent hueristics for how much time to wait before
updating progress.
"""
if self.length > 1E5:
time_thresh = 2.5
elif self.length > 1E4:
time_thresh = 2.0
elif self.length > 1E3:
time_thresh = 1.0
else:
time_thresh = 0.5
return time_thresh | [
"def",
"_get_timethresh_heuristics",
"(",
"self",
")",
":",
"if",
"self",
".",
"length",
">",
"1E5",
":",
"time_thresh",
"=",
"2.5",
"elif",
"self",
".",
"length",
">",
"1E4",
":",
"time_thresh",
"=",
"2.0",
"elif",
"self",
".",
"length",
">",
"1E3",
":",
"time_thresh",
"=",
"1.0",
"else",
":",
"time_thresh",
"=",
"0.5",
"return",
"time_thresh"
] | resonably decent hueristics for how much time to wait before
updating progress. | [
"resonably",
"decent",
"hueristics",
"for",
"how",
"much",
"time",
"to",
"wait",
"before",
"updating",
"progress",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_progress.py#L808-L821 | train |
timedata-org/loady | loady/code.py | load_code | def load_code(name, base_path=None, recurse=False):
"""Load executable code from a URL or a path"""
if '/' in name:
return load_location(name, base_path, module=False)
return importer.import_code(name, base_path, recurse=recurse) | python | def load_code(name, base_path=None, recurse=False):
"""Load executable code from a URL or a path"""
if '/' in name:
return load_location(name, base_path, module=False)
return importer.import_code(name, base_path, recurse=recurse) | [
"def",
"load_code",
"(",
"name",
",",
"base_path",
"=",
"None",
",",
"recurse",
"=",
"False",
")",
":",
"if",
"'/'",
"in",
"name",
":",
"return",
"load_location",
"(",
"name",
",",
"base_path",
",",
"module",
"=",
"False",
")",
"return",
"importer",
".",
"import_code",
"(",
"name",
",",
"base_path",
",",
"recurse",
"=",
"recurse",
")"
] | Load executable code from a URL or a path | [
"Load",
"executable",
"code",
"from",
"a",
"URL",
"or",
"a",
"path"
] | 94ffcdb92f15a28f3c85f77bd293a9cb59de4cad | https://github.com/timedata-org/loady/blob/94ffcdb92f15a28f3c85f77bd293a9cb59de4cad/loady/code.py#L64-L69 | train |
timedata-org/loady | loady/code.py | load | def load(name, base_path=None):
"""Load a module from a URL or a path"""
if '/' in name:
return load_location(name, base_path, module=True)
return importer.import_symbol(name, base_path) | python | def load(name, base_path=None):
"""Load a module from a URL or a path"""
if '/' in name:
return load_location(name, base_path, module=True)
return importer.import_symbol(name, base_path) | [
"def",
"load",
"(",
"name",
",",
"base_path",
"=",
"None",
")",
":",
"if",
"'/'",
"in",
"name",
":",
"return",
"load_location",
"(",
"name",
",",
"base_path",
",",
"module",
"=",
"True",
")",
"return",
"importer",
".",
"import_symbol",
"(",
"name",
",",
"base_path",
")"
] | Load a module from a URL or a path | [
"Load",
"a",
"module",
"from",
"a",
"URL",
"or",
"a",
"path"
] | 94ffcdb92f15a28f3c85f77bd293a9cb59de4cad | https://github.com/timedata-org/loady/blob/94ffcdb92f15a28f3c85f77bd293a9cb59de4cad/loady/code.py#L73-L78 | train |
timedata-org/loady | loady/sys_path.py | extend | def extend(path=None, cache=None):
"""Extend sys.path by a list of git paths."""
if path is None:
path = config.PATH
try:
path = path.split(':')
except:
pass
sys.path.extend([library.to_path(p, cache) for p in path]) | python | def extend(path=None, cache=None):
"""Extend sys.path by a list of git paths."""
if path is None:
path = config.PATH
try:
path = path.split(':')
except:
pass
sys.path.extend([library.to_path(p, cache) for p in path]) | [
"def",
"extend",
"(",
"path",
"=",
"None",
",",
"cache",
"=",
"None",
")",
":",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"config",
".",
"PATH",
"try",
":",
"path",
"=",
"path",
".",
"split",
"(",
"':'",
")",
"except",
":",
"pass",
"sys",
".",
"path",
".",
"extend",
"(",
"[",
"library",
".",
"to_path",
"(",
"p",
",",
"cache",
")",
"for",
"p",
"in",
"path",
"]",
")"
] | Extend sys.path by a list of git paths. | [
"Extend",
"sys",
".",
"path",
"by",
"a",
"list",
"of",
"git",
"paths",
"."
] | 94ffcdb92f15a28f3c85f77bd293a9cb59de4cad | https://github.com/timedata-org/loady/blob/94ffcdb92f15a28f3c85f77bd293a9cb59de4cad/loady/sys_path.py#L5-L14 | train |
timedata-org/loady | loady/sys_path.py | extender | def extender(path=None, cache=None):
"""A context that temporarily extends sys.path and reverts it after the
context is complete."""
old_path = sys.path[:]
extend(path, cache=None)
try:
yield
finally:
sys.path = old_path | python | def extender(path=None, cache=None):
"""A context that temporarily extends sys.path and reverts it after the
context is complete."""
old_path = sys.path[:]
extend(path, cache=None)
try:
yield
finally:
sys.path = old_path | [
"def",
"extender",
"(",
"path",
"=",
"None",
",",
"cache",
"=",
"None",
")",
":",
"old_path",
"=",
"sys",
".",
"path",
"[",
":",
"]",
"extend",
"(",
"path",
",",
"cache",
"=",
"None",
")",
"try",
":",
"yield",
"finally",
":",
"sys",
".",
"path",
"=",
"old_path"
] | A context that temporarily extends sys.path and reverts it after the
context is complete. | [
"A",
"context",
"that",
"temporarily",
"extends",
"sys",
".",
"path",
"and",
"reverts",
"it",
"after",
"the",
"context",
"is",
"complete",
"."
] | 94ffcdb92f15a28f3c85f77bd293a9cb59de4cad | https://github.com/timedata-org/loady/blob/94ffcdb92f15a28f3c85f77bd293a9cb59de4cad/loady/sys_path.py#L18-L27 | train |
LEMS/pylems | lems/model/dynamics.py | ConditionalDerivedVariable.add | def add(self, child):
"""
Adds a typed child object to the conditional derived variable.
@param child: Child object to be added.
"""
if isinstance(child, Case):
self.add_case(child)
else:
raise ModelError('Unsupported child element') | python | def add(self, child):
"""
Adds a typed child object to the conditional derived variable.
@param child: Child object to be added.
"""
if isinstance(child, Case):
self.add_case(child)
else:
raise ModelError('Unsupported child element') | [
"def",
"add",
"(",
"self",
",",
"child",
")",
":",
"if",
"isinstance",
"(",
"child",
",",
"Case",
")",
":",
"self",
".",
"add_case",
"(",
"child",
")",
"else",
":",
"raise",
"ModelError",
"(",
"'Unsupported child element'",
")"
] | Adds a typed child object to the conditional derived variable.
@param child: Child object to be added. | [
"Adds",
"a",
"typed",
"child",
"object",
"to",
"the",
"conditional",
"derived",
"variable",
"."
] | 4eeb719d2f23650fe16c38626663b69b5c83818b | https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/model/dynamics.py#L203-L213 | train |
LEMS/pylems | lems/model/dynamics.py | EventHandler.add | def add(self, child):
"""
Adds a typed child object to the event handler.
@param child: Child object to be added.
"""
if isinstance(child, Action):
self.add_action(child)
else:
raise ModelError('Unsupported child element') | python | def add(self, child):
"""
Adds a typed child object to the event handler.
@param child: Child object to be added.
"""
if isinstance(child, Action):
self.add_action(child)
else:
raise ModelError('Unsupported child element') | [
"def",
"add",
"(",
"self",
",",
"child",
")",
":",
"if",
"isinstance",
"(",
"child",
",",
"Action",
")",
":",
"self",
".",
"add_action",
"(",
"child",
")",
"else",
":",
"raise",
"ModelError",
"(",
"'Unsupported child element'",
")"
] | Adds a typed child object to the event handler.
@param child: Child object to be added. | [
"Adds",
"a",
"typed",
"child",
"object",
"to",
"the",
"event",
"handler",
"."
] | 4eeb719d2f23650fe16c38626663b69b5c83818b | https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/model/dynamics.py#L402-L412 | train |
LEMS/pylems | lems/model/dynamics.py | Behavioral.add | def add(self, child):
"""
Adds a typed child object to the behavioral object.
@param child: Child object to be added.
"""
if isinstance(child, StateVariable):
self.add_state_variable(child)
elif isinstance(child, DerivedVariable):
self.add_derived_variable(child)
elif isinstance(child, ConditionalDerivedVariable):
self.add_conditional_derived_variable(child)
elif isinstance(child, TimeDerivative):
self.add_time_derivative(child)
elif isinstance(child, EventHandler):
self.add_event_handler(child)
elif isinstance(child, KineticScheme):
self.add_kinetic_scheme(child)
else:
raise ModelError('Unsupported child element') | python | def add(self, child):
"""
Adds a typed child object to the behavioral object.
@param child: Child object to be added.
"""
if isinstance(child, StateVariable):
self.add_state_variable(child)
elif isinstance(child, DerivedVariable):
self.add_derived_variable(child)
elif isinstance(child, ConditionalDerivedVariable):
self.add_conditional_derived_variable(child)
elif isinstance(child, TimeDerivative):
self.add_time_derivative(child)
elif isinstance(child, EventHandler):
self.add_event_handler(child)
elif isinstance(child, KineticScheme):
self.add_kinetic_scheme(child)
else:
raise ModelError('Unsupported child element') | [
"def",
"add",
"(",
"self",
",",
"child",
")",
":",
"if",
"isinstance",
"(",
"child",
",",
"StateVariable",
")",
":",
"self",
".",
"add_state_variable",
"(",
"child",
")",
"elif",
"isinstance",
"(",
"child",
",",
"DerivedVariable",
")",
":",
"self",
".",
"add_derived_variable",
"(",
"child",
")",
"elif",
"isinstance",
"(",
"child",
",",
"ConditionalDerivedVariable",
")",
":",
"self",
".",
"add_conditional_derived_variable",
"(",
"child",
")",
"elif",
"isinstance",
"(",
"child",
",",
"TimeDerivative",
")",
":",
"self",
".",
"add_time_derivative",
"(",
"child",
")",
"elif",
"isinstance",
"(",
"child",
",",
"EventHandler",
")",
":",
"self",
".",
"add_event_handler",
"(",
"child",
")",
"elif",
"isinstance",
"(",
"child",
",",
"KineticScheme",
")",
":",
"self",
".",
"add_kinetic_scheme",
"(",
"child",
")",
"else",
":",
"raise",
"ModelError",
"(",
"'Unsupported child element'",
")"
] | Adds a typed child object to the behavioral object.
@param child: Child object to be added. | [
"Adds",
"a",
"typed",
"child",
"object",
"to",
"the",
"behavioral",
"object",
"."
] | 4eeb719d2f23650fe16c38626663b69b5c83818b | https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/model/dynamics.py#L757-L777 | train |
LEMS/pylems | lems/model/dynamics.py | Dynamics.add | def add(self, child):
"""
Adds a typed child object to the dynamics object.
@param child: Child object to be added.
"""
if isinstance(child, Regime):
self.add_regime(child)
else:
Behavioral.add(self, child) | python | def add(self, child):
"""
Adds a typed child object to the dynamics object.
@param child: Child object to be added.
"""
if isinstance(child, Regime):
self.add_regime(child)
else:
Behavioral.add(self, child) | [
"def",
"add",
"(",
"self",
",",
"child",
")",
":",
"if",
"isinstance",
"(",
"child",
",",
"Regime",
")",
":",
"self",
".",
"add_regime",
"(",
"child",
")",
"else",
":",
"Behavioral",
".",
"add",
"(",
"self",
",",
"child",
")"
] | Adds a typed child object to the dynamics object.
@param child: Child object to be added. | [
"Adds",
"a",
"typed",
"child",
"object",
"to",
"the",
"dynamics",
"object",
"."
] | 4eeb719d2f23650fe16c38626663b69b5c83818b | https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/model/dynamics.py#L876-L886 | train |
glormph/msstitch | src/app/actions/mslookup/biosets.py | create_bioset_lookup | def create_bioset_lookup(lookupdb, spectrafns, set_names):
"""Fills lookup database with biological set names"""
unique_setnames = set(set_names)
lookupdb.store_biosets(((x,) for x in unique_setnames))
set_id_map = lookupdb.get_setnames()
mzmlfiles = ((os.path.basename(fn), set_id_map[setname])
for fn, setname in zip(spectrafns, set_names))
lookupdb.store_mzmlfiles(mzmlfiles)
lookupdb.index_biosets() | python | def create_bioset_lookup(lookupdb, spectrafns, set_names):
"""Fills lookup database with biological set names"""
unique_setnames = set(set_names)
lookupdb.store_biosets(((x,) for x in unique_setnames))
set_id_map = lookupdb.get_setnames()
mzmlfiles = ((os.path.basename(fn), set_id_map[setname])
for fn, setname in zip(spectrafns, set_names))
lookupdb.store_mzmlfiles(mzmlfiles)
lookupdb.index_biosets() | [
"def",
"create_bioset_lookup",
"(",
"lookupdb",
",",
"spectrafns",
",",
"set_names",
")",
":",
"unique_setnames",
"=",
"set",
"(",
"set_names",
")",
"lookupdb",
".",
"store_biosets",
"(",
"(",
"(",
"x",
",",
")",
"for",
"x",
"in",
"unique_setnames",
")",
")",
"set_id_map",
"=",
"lookupdb",
".",
"get_setnames",
"(",
")",
"mzmlfiles",
"=",
"(",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"fn",
")",
",",
"set_id_map",
"[",
"setname",
"]",
")",
"for",
"fn",
",",
"setname",
"in",
"zip",
"(",
"spectrafns",
",",
"set_names",
")",
")",
"lookupdb",
".",
"store_mzmlfiles",
"(",
"mzmlfiles",
")",
"lookupdb",
".",
"index_biosets",
"(",
")"
] | Fills lookup database with biological set names | [
"Fills",
"lookup",
"database",
"with",
"biological",
"set",
"names"
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mslookup/biosets.py#L4-L12 | train |
Erotemic/utool | utool/util_import.py | get_modpath_from_modname | def get_modpath_from_modname(modname, prefer_pkg=False, prefer_main=False):
"""
Same as get_modpath but doesnt import directly
SeeAlso:
get_modpath
"""
from os.path import dirname, basename, join, exists
initname = '__init__.py'
mainname = '__main__.py'
if modname in sys.modules:
modpath = sys.modules[modname].__file__.replace('.pyc', '.py')
else:
import pkgutil
loader = pkgutil.find_loader(modname)
modpath = loader.filename.replace('.pyc', '.py')
if '.' not in basename(modpath):
modpath = join(modpath, initname)
if prefer_pkg:
if modpath.endswith(initname) or modpath.endswith(mainname):
modpath = dirname(modpath)
if prefer_main:
if modpath.endswith(initname):
main_modpath = modpath[:-len(initname)] + mainname
if exists(main_modpath):
modpath = main_modpath
return modpath | python | def get_modpath_from_modname(modname, prefer_pkg=False, prefer_main=False):
"""
Same as get_modpath but doesnt import directly
SeeAlso:
get_modpath
"""
from os.path import dirname, basename, join, exists
initname = '__init__.py'
mainname = '__main__.py'
if modname in sys.modules:
modpath = sys.modules[modname].__file__.replace('.pyc', '.py')
else:
import pkgutil
loader = pkgutil.find_loader(modname)
modpath = loader.filename.replace('.pyc', '.py')
if '.' not in basename(modpath):
modpath = join(modpath, initname)
if prefer_pkg:
if modpath.endswith(initname) or modpath.endswith(mainname):
modpath = dirname(modpath)
if prefer_main:
if modpath.endswith(initname):
main_modpath = modpath[:-len(initname)] + mainname
if exists(main_modpath):
modpath = main_modpath
return modpath | [
"def",
"get_modpath_from_modname",
"(",
"modname",
",",
"prefer_pkg",
"=",
"False",
",",
"prefer_main",
"=",
"False",
")",
":",
"from",
"os",
".",
"path",
"import",
"dirname",
",",
"basename",
",",
"join",
",",
"exists",
"initname",
"=",
"'__init__.py'",
"mainname",
"=",
"'__main__.py'",
"if",
"modname",
"in",
"sys",
".",
"modules",
":",
"modpath",
"=",
"sys",
".",
"modules",
"[",
"modname",
"]",
".",
"__file__",
".",
"replace",
"(",
"'.pyc'",
",",
"'.py'",
")",
"else",
":",
"import",
"pkgutil",
"loader",
"=",
"pkgutil",
".",
"find_loader",
"(",
"modname",
")",
"modpath",
"=",
"loader",
".",
"filename",
".",
"replace",
"(",
"'.pyc'",
",",
"'.py'",
")",
"if",
"'.'",
"not",
"in",
"basename",
"(",
"modpath",
")",
":",
"modpath",
"=",
"join",
"(",
"modpath",
",",
"initname",
")",
"if",
"prefer_pkg",
":",
"if",
"modpath",
".",
"endswith",
"(",
"initname",
")",
"or",
"modpath",
".",
"endswith",
"(",
"mainname",
")",
":",
"modpath",
"=",
"dirname",
"(",
"modpath",
")",
"if",
"prefer_main",
":",
"if",
"modpath",
".",
"endswith",
"(",
"initname",
")",
":",
"main_modpath",
"=",
"modpath",
"[",
":",
"-",
"len",
"(",
"initname",
")",
"]",
"+",
"mainname",
"if",
"exists",
"(",
"main_modpath",
")",
":",
"modpath",
"=",
"main_modpath",
"return",
"modpath"
] | Same as get_modpath but doesnt import directly
SeeAlso:
get_modpath | [
"Same",
"as",
"get_modpath",
"but",
"doesnt",
"import",
"directly"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_import.py#L243-L269 | train |
Erotemic/utool | utool/util_import.py | check_module_installed | def check_module_installed(modname):
"""
Check if a python module is installed without attempting to import it.
Note, that if ``modname`` indicates a child module, the parent module is
always loaded.
Args:
modname (str): module name
Returns:
bool: found
References:
http://stackoverflow.com/questions/14050281/module-exists-without-importing
CommandLine:
python -m utool.util_import check_module_installed --show --verbimp --modname=this
python -m utool.util_import check_module_installed --show --verbimp --modname=guitool
python -m utool.util_import check_module_installed --show --verbimp --modname=guitool.__PYQT__
python -m utool.util_import check_module_installed --show --verbimp --modname=ibeis.scripts.iccv
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_import import * # NOQA
>>> import utool as ut
>>> modname = ut.get_argval('--modname', default='this')
>>> is_installed = check_module_installed(modname)
>>> is_imported = modname in sys.modules
>>> print('module(%r).is_installed = %r' % (modname, is_installed))
>>> print('module(%r).is_imported = %r' % (modname, is_imported))
>>> assert 'this' not in sys.modules, 'module(this) should not have ever been imported'
"""
import pkgutil
if '.' in modname:
# Prevent explicit import if possible
parts = modname.split('.')
base = parts[0]
submods = parts[1:]
loader = pkgutil.find_loader(base)
if loader is not None:
# TODO: check to see if path to the submod exists
submods
return True
loader = pkgutil.find_loader(modname)
is_installed = loader is not None
return is_installed | python | def check_module_installed(modname):
"""
Check if a python module is installed without attempting to import it.
Note, that if ``modname`` indicates a child module, the parent module is
always loaded.
Args:
modname (str): module name
Returns:
bool: found
References:
http://stackoverflow.com/questions/14050281/module-exists-without-importing
CommandLine:
python -m utool.util_import check_module_installed --show --verbimp --modname=this
python -m utool.util_import check_module_installed --show --verbimp --modname=guitool
python -m utool.util_import check_module_installed --show --verbimp --modname=guitool.__PYQT__
python -m utool.util_import check_module_installed --show --verbimp --modname=ibeis.scripts.iccv
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_import import * # NOQA
>>> import utool as ut
>>> modname = ut.get_argval('--modname', default='this')
>>> is_installed = check_module_installed(modname)
>>> is_imported = modname in sys.modules
>>> print('module(%r).is_installed = %r' % (modname, is_installed))
>>> print('module(%r).is_imported = %r' % (modname, is_imported))
>>> assert 'this' not in sys.modules, 'module(this) should not have ever been imported'
"""
import pkgutil
if '.' in modname:
# Prevent explicit import if possible
parts = modname.split('.')
base = parts[0]
submods = parts[1:]
loader = pkgutil.find_loader(base)
if loader is not None:
# TODO: check to see if path to the submod exists
submods
return True
loader = pkgutil.find_loader(modname)
is_installed = loader is not None
return is_installed | [
"def",
"check_module_installed",
"(",
"modname",
")",
":",
"import",
"pkgutil",
"if",
"'.'",
"in",
"modname",
":",
"# Prevent explicit import if possible",
"parts",
"=",
"modname",
".",
"split",
"(",
"'.'",
")",
"base",
"=",
"parts",
"[",
"0",
"]",
"submods",
"=",
"parts",
"[",
"1",
":",
"]",
"loader",
"=",
"pkgutil",
".",
"find_loader",
"(",
"base",
")",
"if",
"loader",
"is",
"not",
"None",
":",
"# TODO: check to see if path to the submod exists",
"submods",
"return",
"True",
"loader",
"=",
"pkgutil",
".",
"find_loader",
"(",
"modname",
")",
"is_installed",
"=",
"loader",
"is",
"not",
"None",
"return",
"is_installed"
] | Check if a python module is installed without attempting to import it.
Note, that if ``modname`` indicates a child module, the parent module is
always loaded.
Args:
modname (str): module name
Returns:
bool: found
References:
http://stackoverflow.com/questions/14050281/module-exists-without-importing
CommandLine:
python -m utool.util_import check_module_installed --show --verbimp --modname=this
python -m utool.util_import check_module_installed --show --verbimp --modname=guitool
python -m utool.util_import check_module_installed --show --verbimp --modname=guitool.__PYQT__
python -m utool.util_import check_module_installed --show --verbimp --modname=ibeis.scripts.iccv
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_import import * # NOQA
>>> import utool as ut
>>> modname = ut.get_argval('--modname', default='this')
>>> is_installed = check_module_installed(modname)
>>> is_imported = modname in sys.modules
>>> print('module(%r).is_installed = %r' % (modname, is_installed))
>>> print('module(%r).is_imported = %r' % (modname, is_imported))
>>> assert 'this' not in sys.modules, 'module(this) should not have ever been imported' | [
"Check",
"if",
"a",
"python",
"module",
"is",
"installed",
"without",
"attempting",
"to",
"import",
"it",
".",
"Note",
"that",
"if",
"modname",
"indicates",
"a",
"child",
"module",
"the",
"parent",
"module",
"is",
"always",
"loaded",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_import.py#L272-L317 | train |
Erotemic/utool | utool/util_import.py | import_module_from_fpath | def import_module_from_fpath(module_fpath):
r""" imports module from a file path
Args:
module_fpath (str):
Returns:
module: module
CommandLine:
python -m utool.util_import --test-import_module_from_fpath
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_import import * # NOQA
>>> import utool
>>> module_fpath = utool.__file__
>>> module = import_module_from_fpath(module_fpath)
>>> result = ('module = %s' % (str(module),))
>>> print(result)
Ignore:
import shutil
import ubelt as ub
test_root = ub.ensure_app_cache_dir('test_fpath_import')
# Clear the directory
shutil.rmtree(test_root)
test_root = ub.ensure_app_cache_dir('test_fpath_import')
# -----
# Define two temporary modules with the same name that are not in sys.path
import sys, os, os.path
from os.path import join
# Even though they have the same name they have different values
mod1_fpath = ub.ensuredir((test_root, 'path1', 'testmod'))
ub.writeto(join(mod1_fpath, '__init__.py'), 'version = 1\nfrom . import sibling\na1 = 1')
ub.writeto(join(mod1_fpath, 'sibling.py'), 'spam = \"ham\"\nb1 = 2')
# Even though they have the same name they have different values
mod2_fpath = ub.ensuredir((test_root, 'path2', 'testmod'))
ub.writeto(join(mod2_fpath, '__init__.py'), 'version = 2\nfrom . import sibling\na2 = 3')
ub.writeto(join(mod2_fpath, 'sibling.py'), 'spam = \"jam\"\nb2 = 4')
# -----
# Neither module should be importable through the normal mechanism
try:
import testmod
assert False, 'should fail'
except ImportError as ex:
pass
mod1 = ut.import_module_from_fpath(mod1_fpath)
print('mod1.version = {!r}'.format(mod1.version))
print('mod1.version = {!r}'.format(mod1.version))
print(mod1.version == 1, 'mod1 version is 1')
print('mod1.a1 = {!r}'.format(mod1.a1))
mod2 = ut.import_module_from_fpath(mod2_fpath)
print('mod2.version = {!r}'.format(mod2.version))
print(mod2.version == 2, 'mod2 version is 2')
print('mod2.a2 = {!r}'.format(mod1.a2))
# BUT Notice how mod1 is mod2
print(mod1 is mod2)
# mod1 has attributes from mod1 and mod2
print('mod1.a1 = {!r}'.format(mod1.a1))
print('mod1.a2 = {!r}'.format(mod1.a2))
print('mod2.a1 = {!r}'.format(mod2.a1))
print('mod2.a2 = {!r}'.format(mod2.a2))
# Both are version 2
print('mod1.version = {!r}'.format(mod1.version))
print('mod2.version = {!r}'.format(mod2.version))
# However sibling always remains at version1 (ham)
print('mod2.sibling.spam = {!r}'.format(mod2.sibling.spam))
# now importing testmod works because it reads from sys.modules
import testmod
# reloading mod1 overwrites attrs again
mod1 = ut.import_module_from_fpath(mod1_fpath)
# Removing both from sys.modules
del sys.modules['testmod']
del sys.modules['testmod.sibling']
mod2 = ut.import_module_from_fpath(mod2_fpath)
print(not hasattr(mod2, 'a1'),
'mod2 no longer has a1 and it reloads itself correctly')
# -------
del sys.modules['testmod']
del sys.modules['testmod.sibling']
mod1 = ut.import_module_from_fpath(mod1_fpath)
# third test
mod3_fpath = ub.ensuredir((test_root, 'path3', 'testmod'))
ub.writeto(join(mod3_fpath, '__init__.py'), 'version = 3')
module_fpath = mod3_fpath
modname = 'testmod'
# third test
mod4_fpath = ub.ensuredir((test_root, 'path3', 'novelmod'))
ub.writeto(join(mod4_fpath, '__init__.py'), 'version = 4')
"""
from os.path import basename, splitext, isdir, join, exists, dirname, split
import platform
if isdir(module_fpath):
module_fpath = join(module_fpath, '__init__.py')
print('module_fpath = {!r}'.format(module_fpath))
if not exists(module_fpath):
raise ImportError('module_fpath={!r} does not exist'.format(
module_fpath))
python_version = platform.python_version()
modname = splitext(basename(module_fpath))[0]
if modname == '__init__':
modname = split(dirname(module_fpath))[1]
if util_inject.PRINT_INJECT_ORDER:
if modname not in sys.argv:
util_inject.noinject(modname, N=2, via='ut.import_module_from_fpath')
if python_version.startswith('2.7'):
import imp
module = imp.load_source(modname, module_fpath)
elif python_version.startswith('3'):
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(modname, module_fpath)
module = loader.load_module()
# module = loader.exec_module(modname)
else:
raise AssertionError('invalid python version={!r}'.format(
python_version))
return module | python | def import_module_from_fpath(module_fpath):
r""" imports module from a file path
Args:
module_fpath (str):
Returns:
module: module
CommandLine:
python -m utool.util_import --test-import_module_from_fpath
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_import import * # NOQA
>>> import utool
>>> module_fpath = utool.__file__
>>> module = import_module_from_fpath(module_fpath)
>>> result = ('module = %s' % (str(module),))
>>> print(result)
Ignore:
import shutil
import ubelt as ub
test_root = ub.ensure_app_cache_dir('test_fpath_import')
# Clear the directory
shutil.rmtree(test_root)
test_root = ub.ensure_app_cache_dir('test_fpath_import')
# -----
# Define two temporary modules with the same name that are not in sys.path
import sys, os, os.path
from os.path import join
# Even though they have the same name they have different values
mod1_fpath = ub.ensuredir((test_root, 'path1', 'testmod'))
ub.writeto(join(mod1_fpath, '__init__.py'), 'version = 1\nfrom . import sibling\na1 = 1')
ub.writeto(join(mod1_fpath, 'sibling.py'), 'spam = \"ham\"\nb1 = 2')
# Even though they have the same name they have different values
mod2_fpath = ub.ensuredir((test_root, 'path2', 'testmod'))
ub.writeto(join(mod2_fpath, '__init__.py'), 'version = 2\nfrom . import sibling\na2 = 3')
ub.writeto(join(mod2_fpath, 'sibling.py'), 'spam = \"jam\"\nb2 = 4')
# -----
# Neither module should be importable through the normal mechanism
try:
import testmod
assert False, 'should fail'
except ImportError as ex:
pass
mod1 = ut.import_module_from_fpath(mod1_fpath)
print('mod1.version = {!r}'.format(mod1.version))
print('mod1.version = {!r}'.format(mod1.version))
print(mod1.version == 1, 'mod1 version is 1')
print('mod1.a1 = {!r}'.format(mod1.a1))
mod2 = ut.import_module_from_fpath(mod2_fpath)
print('mod2.version = {!r}'.format(mod2.version))
print(mod2.version == 2, 'mod2 version is 2')
print('mod2.a2 = {!r}'.format(mod1.a2))
# BUT Notice how mod1 is mod2
print(mod1 is mod2)
# mod1 has attributes from mod1 and mod2
print('mod1.a1 = {!r}'.format(mod1.a1))
print('mod1.a2 = {!r}'.format(mod1.a2))
print('mod2.a1 = {!r}'.format(mod2.a1))
print('mod2.a2 = {!r}'.format(mod2.a2))
# Both are version 2
print('mod1.version = {!r}'.format(mod1.version))
print('mod2.version = {!r}'.format(mod2.version))
# However sibling always remains at version1 (ham)
print('mod2.sibling.spam = {!r}'.format(mod2.sibling.spam))
# now importing testmod works because it reads from sys.modules
import testmod
# reloading mod1 overwrites attrs again
mod1 = ut.import_module_from_fpath(mod1_fpath)
# Removing both from sys.modules
del sys.modules['testmod']
del sys.modules['testmod.sibling']
mod2 = ut.import_module_from_fpath(mod2_fpath)
print(not hasattr(mod2, 'a1'),
'mod2 no longer has a1 and it reloads itself correctly')
# -------
del sys.modules['testmod']
del sys.modules['testmod.sibling']
mod1 = ut.import_module_from_fpath(mod1_fpath)
# third test
mod3_fpath = ub.ensuredir((test_root, 'path3', 'testmod'))
ub.writeto(join(mod3_fpath, '__init__.py'), 'version = 3')
module_fpath = mod3_fpath
modname = 'testmod'
# third test
mod4_fpath = ub.ensuredir((test_root, 'path3', 'novelmod'))
ub.writeto(join(mod4_fpath, '__init__.py'), 'version = 4')
"""
from os.path import basename, splitext, isdir, join, exists, dirname, split
import platform
if isdir(module_fpath):
module_fpath = join(module_fpath, '__init__.py')
print('module_fpath = {!r}'.format(module_fpath))
if not exists(module_fpath):
raise ImportError('module_fpath={!r} does not exist'.format(
module_fpath))
python_version = platform.python_version()
modname = splitext(basename(module_fpath))[0]
if modname == '__init__':
modname = split(dirname(module_fpath))[1]
if util_inject.PRINT_INJECT_ORDER:
if modname not in sys.argv:
util_inject.noinject(modname, N=2, via='ut.import_module_from_fpath')
if python_version.startswith('2.7'):
import imp
module = imp.load_source(modname, module_fpath)
elif python_version.startswith('3'):
import importlib.machinery
loader = importlib.machinery.SourceFileLoader(modname, module_fpath)
module = loader.load_module()
# module = loader.exec_module(modname)
else:
raise AssertionError('invalid python version={!r}'.format(
python_version))
return module | [
"def",
"import_module_from_fpath",
"(",
"module_fpath",
")",
":",
"from",
"os",
".",
"path",
"import",
"basename",
",",
"splitext",
",",
"isdir",
",",
"join",
",",
"exists",
",",
"dirname",
",",
"split",
"import",
"platform",
"if",
"isdir",
"(",
"module_fpath",
")",
":",
"module_fpath",
"=",
"join",
"(",
"module_fpath",
",",
"'__init__.py'",
")",
"print",
"(",
"'module_fpath = {!r}'",
".",
"format",
"(",
"module_fpath",
")",
")",
"if",
"not",
"exists",
"(",
"module_fpath",
")",
":",
"raise",
"ImportError",
"(",
"'module_fpath={!r} does not exist'",
".",
"format",
"(",
"module_fpath",
")",
")",
"python_version",
"=",
"platform",
".",
"python_version",
"(",
")",
"modname",
"=",
"splitext",
"(",
"basename",
"(",
"module_fpath",
")",
")",
"[",
"0",
"]",
"if",
"modname",
"==",
"'__init__'",
":",
"modname",
"=",
"split",
"(",
"dirname",
"(",
"module_fpath",
")",
")",
"[",
"1",
"]",
"if",
"util_inject",
".",
"PRINT_INJECT_ORDER",
":",
"if",
"modname",
"not",
"in",
"sys",
".",
"argv",
":",
"util_inject",
".",
"noinject",
"(",
"modname",
",",
"N",
"=",
"2",
",",
"via",
"=",
"'ut.import_module_from_fpath'",
")",
"if",
"python_version",
".",
"startswith",
"(",
"'2.7'",
")",
":",
"import",
"imp",
"module",
"=",
"imp",
".",
"load_source",
"(",
"modname",
",",
"module_fpath",
")",
"elif",
"python_version",
".",
"startswith",
"(",
"'3'",
")",
":",
"import",
"importlib",
".",
"machinery",
"loader",
"=",
"importlib",
".",
"machinery",
".",
"SourceFileLoader",
"(",
"modname",
",",
"module_fpath",
")",
"module",
"=",
"loader",
".",
"load_module",
"(",
")",
"# module = loader.exec_module(modname)",
"else",
":",
"raise",
"AssertionError",
"(",
"'invalid python version={!r}'",
".",
"format",
"(",
"python_version",
")",
")",
"return",
"module"
] | r""" imports module from a file path
Args:
module_fpath (str):
Returns:
module: module
CommandLine:
python -m utool.util_import --test-import_module_from_fpath
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_import import * # NOQA
>>> import utool
>>> module_fpath = utool.__file__
>>> module = import_module_from_fpath(module_fpath)
>>> result = ('module = %s' % (str(module),))
>>> print(result)
Ignore:
import shutil
import ubelt as ub
test_root = ub.ensure_app_cache_dir('test_fpath_import')
# Clear the directory
shutil.rmtree(test_root)
test_root = ub.ensure_app_cache_dir('test_fpath_import')
# -----
# Define two temporary modules with the same name that are not in sys.path
import sys, os, os.path
from os.path import join
# Even though they have the same name they have different values
mod1_fpath = ub.ensuredir((test_root, 'path1', 'testmod'))
ub.writeto(join(mod1_fpath, '__init__.py'), 'version = 1\nfrom . import sibling\na1 = 1')
ub.writeto(join(mod1_fpath, 'sibling.py'), 'spam = \"ham\"\nb1 = 2')
# Even though they have the same name they have different values
mod2_fpath = ub.ensuredir((test_root, 'path2', 'testmod'))
ub.writeto(join(mod2_fpath, '__init__.py'), 'version = 2\nfrom . import sibling\na2 = 3')
ub.writeto(join(mod2_fpath, 'sibling.py'), 'spam = \"jam\"\nb2 = 4')
# -----
# Neither module should be importable through the normal mechanism
try:
import testmod
assert False, 'should fail'
except ImportError as ex:
pass
mod1 = ut.import_module_from_fpath(mod1_fpath)
print('mod1.version = {!r}'.format(mod1.version))
print('mod1.version = {!r}'.format(mod1.version))
print(mod1.version == 1, 'mod1 version is 1')
print('mod1.a1 = {!r}'.format(mod1.a1))
mod2 = ut.import_module_from_fpath(mod2_fpath)
print('mod2.version = {!r}'.format(mod2.version))
print(mod2.version == 2, 'mod2 version is 2')
print('mod2.a2 = {!r}'.format(mod1.a2))
# BUT Notice how mod1 is mod2
print(mod1 is mod2)
# mod1 has attributes from mod1 and mod2
print('mod1.a1 = {!r}'.format(mod1.a1))
print('mod1.a2 = {!r}'.format(mod1.a2))
print('mod2.a1 = {!r}'.format(mod2.a1))
print('mod2.a2 = {!r}'.format(mod2.a2))
# Both are version 2
print('mod1.version = {!r}'.format(mod1.version))
print('mod2.version = {!r}'.format(mod2.version))
# However sibling always remains at version1 (ham)
print('mod2.sibling.spam = {!r}'.format(mod2.sibling.spam))
# now importing testmod works because it reads from sys.modules
import testmod
# reloading mod1 overwrites attrs again
mod1 = ut.import_module_from_fpath(mod1_fpath)
# Removing both from sys.modules
del sys.modules['testmod']
del sys.modules['testmod.sibling']
mod2 = ut.import_module_from_fpath(mod2_fpath)
print(not hasattr(mod2, 'a1'),
'mod2 no longer has a1 and it reloads itself correctly')
# -------
del sys.modules['testmod']
del sys.modules['testmod.sibling']
mod1 = ut.import_module_from_fpath(mod1_fpath)
# third test
mod3_fpath = ub.ensuredir((test_root, 'path3', 'testmod'))
ub.writeto(join(mod3_fpath, '__init__.py'), 'version = 3')
module_fpath = mod3_fpath
modname = 'testmod'
# third test
mod4_fpath = ub.ensuredir((test_root, 'path3', 'novelmod'))
ub.writeto(join(mod4_fpath, '__init__.py'), 'version = 4') | [
"r",
"imports",
"module",
"from",
"a",
"file",
"path"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_import.py#L469-L609 | train |
Erotemic/utool | utool/util_print.py | print_locals | def print_locals(*args, **kwargs):
"""
Prints local variables in function.
If no arguments all locals are printed.
Variables can be specified directly (variable values passed in) as varargs
or indirectly (variable names passed in) in kwargs by using keys and a list
of strings.
"""
from utool import util_str
from utool import util_dbg
from utool import util_dict
locals_ = util_dbg.get_parent_frame().f_locals
keys = kwargs.get('keys', None if len(args) == 0 else [])
to_print = {}
for arg in args:
varname = util_dbg.get_varname_from_locals(arg, locals_)
to_print[varname] = arg
if keys is not None:
to_print.update(util_dict.dict_take(locals_, keys))
if not to_print:
to_print = locals_
locals_str = util_str.repr4(to_print)
print(locals_str) | python | def print_locals(*args, **kwargs):
"""
Prints local variables in function.
If no arguments all locals are printed.
Variables can be specified directly (variable values passed in) as varargs
or indirectly (variable names passed in) in kwargs by using keys and a list
of strings.
"""
from utool import util_str
from utool import util_dbg
from utool import util_dict
locals_ = util_dbg.get_parent_frame().f_locals
keys = kwargs.get('keys', None if len(args) == 0 else [])
to_print = {}
for arg in args:
varname = util_dbg.get_varname_from_locals(arg, locals_)
to_print[varname] = arg
if keys is not None:
to_print.update(util_dict.dict_take(locals_, keys))
if not to_print:
to_print = locals_
locals_str = util_str.repr4(to_print)
print(locals_str) | [
"def",
"print_locals",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"utool",
"import",
"util_str",
"from",
"utool",
"import",
"util_dbg",
"from",
"utool",
"import",
"util_dict",
"locals_",
"=",
"util_dbg",
".",
"get_parent_frame",
"(",
")",
".",
"f_locals",
"keys",
"=",
"kwargs",
".",
"get",
"(",
"'keys'",
",",
"None",
"if",
"len",
"(",
"args",
")",
"==",
"0",
"else",
"[",
"]",
")",
"to_print",
"=",
"{",
"}",
"for",
"arg",
"in",
"args",
":",
"varname",
"=",
"util_dbg",
".",
"get_varname_from_locals",
"(",
"arg",
",",
"locals_",
")",
"to_print",
"[",
"varname",
"]",
"=",
"arg",
"if",
"keys",
"is",
"not",
"None",
":",
"to_print",
".",
"update",
"(",
"util_dict",
".",
"dict_take",
"(",
"locals_",
",",
"keys",
")",
")",
"if",
"not",
"to_print",
":",
"to_print",
"=",
"locals_",
"locals_str",
"=",
"util_str",
".",
"repr4",
"(",
"to_print",
")",
"print",
"(",
"locals_str",
")"
] | Prints local variables in function.
If no arguments all locals are printed.
Variables can be specified directly (variable values passed in) as varargs
or indirectly (variable names passed in) in kwargs by using keys and a list
of strings. | [
"Prints",
"local",
"variables",
"in",
"function",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_print.py#L329-L353 | train |
Erotemic/utool | utool/util_grabdata.py | _extract_archive | def _extract_archive(archive_fpath, archive_file, archive_namelist, output_dir,
force_commonprefix=True, prefix=None,
dryrun=False, verbose=not QUIET, overwrite=None):
"""
archive_fpath = zip_fpath
archive_file = zip_file
"""
# force extracted components into a subdirectory if force_commonprefix is
# on return_path = output_diG
# FIXMpathE doesn't work right
if prefix is not None:
output_dir = join(output_dir, prefix)
util_path.ensurepath(output_dir)
archive_basename, ext = split_archive_ext(basename(archive_fpath))
if force_commonprefix and commonprefix(archive_namelist) == '':
# use the archivename as the default common prefix
output_dir = join(output_dir, archive_basename)
util_path.ensurepath(output_dir)
for member in archive_namelist:
(dname, fname) = split(member)
dpath = join(output_dir, dname)
util_path.ensurepath(dpath)
if verbose:
print('[utool] Unarchive ' + fname + ' in ' + dpath)
if not dryrun:
if overwrite is False:
if exists(join(output_dir, member)):
continue
archive_file.extract(member, path=output_dir)
return output_dir | python | def _extract_archive(archive_fpath, archive_file, archive_namelist, output_dir,
force_commonprefix=True, prefix=None,
dryrun=False, verbose=not QUIET, overwrite=None):
"""
archive_fpath = zip_fpath
archive_file = zip_file
"""
# force extracted components into a subdirectory if force_commonprefix is
# on return_path = output_diG
# FIXMpathE doesn't work right
if prefix is not None:
output_dir = join(output_dir, prefix)
util_path.ensurepath(output_dir)
archive_basename, ext = split_archive_ext(basename(archive_fpath))
if force_commonprefix and commonprefix(archive_namelist) == '':
# use the archivename as the default common prefix
output_dir = join(output_dir, archive_basename)
util_path.ensurepath(output_dir)
for member in archive_namelist:
(dname, fname) = split(member)
dpath = join(output_dir, dname)
util_path.ensurepath(dpath)
if verbose:
print('[utool] Unarchive ' + fname + ' in ' + dpath)
if not dryrun:
if overwrite is False:
if exists(join(output_dir, member)):
continue
archive_file.extract(member, path=output_dir)
return output_dir | [
"def",
"_extract_archive",
"(",
"archive_fpath",
",",
"archive_file",
",",
"archive_namelist",
",",
"output_dir",
",",
"force_commonprefix",
"=",
"True",
",",
"prefix",
"=",
"None",
",",
"dryrun",
"=",
"False",
",",
"verbose",
"=",
"not",
"QUIET",
",",
"overwrite",
"=",
"None",
")",
":",
"# force extracted components into a subdirectory if force_commonprefix is",
"# on return_path = output_diG",
"# FIXMpathE doesn't work right",
"if",
"prefix",
"is",
"not",
"None",
":",
"output_dir",
"=",
"join",
"(",
"output_dir",
",",
"prefix",
")",
"util_path",
".",
"ensurepath",
"(",
"output_dir",
")",
"archive_basename",
",",
"ext",
"=",
"split_archive_ext",
"(",
"basename",
"(",
"archive_fpath",
")",
")",
"if",
"force_commonprefix",
"and",
"commonprefix",
"(",
"archive_namelist",
")",
"==",
"''",
":",
"# use the archivename as the default common prefix",
"output_dir",
"=",
"join",
"(",
"output_dir",
",",
"archive_basename",
")",
"util_path",
".",
"ensurepath",
"(",
"output_dir",
")",
"for",
"member",
"in",
"archive_namelist",
":",
"(",
"dname",
",",
"fname",
")",
"=",
"split",
"(",
"member",
")",
"dpath",
"=",
"join",
"(",
"output_dir",
",",
"dname",
")",
"util_path",
".",
"ensurepath",
"(",
"dpath",
")",
"if",
"verbose",
":",
"print",
"(",
"'[utool] Unarchive '",
"+",
"fname",
"+",
"' in '",
"+",
"dpath",
")",
"if",
"not",
"dryrun",
":",
"if",
"overwrite",
"is",
"False",
":",
"if",
"exists",
"(",
"join",
"(",
"output_dir",
",",
"member",
")",
")",
":",
"continue",
"archive_file",
".",
"extract",
"(",
"member",
",",
"path",
"=",
"output_dir",
")",
"return",
"output_dir"
] | archive_fpath = zip_fpath
archive_file = zip_file | [
"archive_fpath",
"=",
"zip_fpath",
"archive_file",
"=",
"zip_file"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_grabdata.py#L164-L196 | train |
Erotemic/utool | utool/util_grabdata.py | open_url_in_browser | def open_url_in_browser(url, browsername=None, fallback=False):
r"""
Opens a url in the specified or default browser
Args:
url (str): web url
CommandLine:
python -m utool.util_grabdata --test-open_url_in_browser
Example:
>>> # DISABLE_DOCTEST
>>> # SCRIPT
>>> from utool.util_grabdata import * # NOQA
>>> url = 'http://www.jrsoftware.org/isdl.php'
>>> open_url_in_browser(url, 'chrome')
"""
import webbrowser
print('[utool] Opening url=%r in browser' % (url,))
if browsername is None:
browser = webbrowser.open(url)
else:
browser = get_prefered_browser(pref_list=[browsername], fallback=fallback)
return browser.open(url) | python | def open_url_in_browser(url, browsername=None, fallback=False):
r"""
Opens a url in the specified or default browser
Args:
url (str): web url
CommandLine:
python -m utool.util_grabdata --test-open_url_in_browser
Example:
>>> # DISABLE_DOCTEST
>>> # SCRIPT
>>> from utool.util_grabdata import * # NOQA
>>> url = 'http://www.jrsoftware.org/isdl.php'
>>> open_url_in_browser(url, 'chrome')
"""
import webbrowser
print('[utool] Opening url=%r in browser' % (url,))
if browsername is None:
browser = webbrowser.open(url)
else:
browser = get_prefered_browser(pref_list=[browsername], fallback=fallback)
return browser.open(url) | [
"def",
"open_url_in_browser",
"(",
"url",
",",
"browsername",
"=",
"None",
",",
"fallback",
"=",
"False",
")",
":",
"import",
"webbrowser",
"print",
"(",
"'[utool] Opening url=%r in browser'",
"%",
"(",
"url",
",",
")",
")",
"if",
"browsername",
"is",
"None",
":",
"browser",
"=",
"webbrowser",
".",
"open",
"(",
"url",
")",
"else",
":",
"browser",
"=",
"get_prefered_browser",
"(",
"pref_list",
"=",
"[",
"browsername",
"]",
",",
"fallback",
"=",
"fallback",
")",
"return",
"browser",
".",
"open",
"(",
"url",
")"
] | r"""
Opens a url in the specified or default browser
Args:
url (str): web url
CommandLine:
python -m utool.util_grabdata --test-open_url_in_browser
Example:
>>> # DISABLE_DOCTEST
>>> # SCRIPT
>>> from utool.util_grabdata import * # NOQA
>>> url = 'http://www.jrsoftware.org/isdl.php'
>>> open_url_in_browser(url, 'chrome') | [
"r",
"Opens",
"a",
"url",
"in",
"the",
"specified",
"or",
"default",
"browser"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_grabdata.py#L199-L222 | train |
Erotemic/utool | utool/util_grabdata.py | url_read | def url_read(url, verbose=True):
r"""
Directly reads data from url
"""
if url.find('://') == -1:
url = 'http://' + url
if verbose:
print('Reading data from url=%r' % (url,))
try:
file_ = _urllib.request.urlopen(url)
#file_ = _urllib.urlopen(url)
except IOError:
raise
data = file_.read()
file_.close()
return data | python | def url_read(url, verbose=True):
r"""
Directly reads data from url
"""
if url.find('://') == -1:
url = 'http://' + url
if verbose:
print('Reading data from url=%r' % (url,))
try:
file_ = _urllib.request.urlopen(url)
#file_ = _urllib.urlopen(url)
except IOError:
raise
data = file_.read()
file_.close()
return data | [
"def",
"url_read",
"(",
"url",
",",
"verbose",
"=",
"True",
")",
":",
"if",
"url",
".",
"find",
"(",
"'://'",
")",
"==",
"-",
"1",
":",
"url",
"=",
"'http://'",
"+",
"url",
"if",
"verbose",
":",
"print",
"(",
"'Reading data from url=%r'",
"%",
"(",
"url",
",",
")",
")",
"try",
":",
"file_",
"=",
"_urllib",
".",
"request",
".",
"urlopen",
"(",
"url",
")",
"#file_ = _urllib.urlopen(url)",
"except",
"IOError",
":",
"raise",
"data",
"=",
"file_",
".",
"read",
"(",
")",
"file_",
".",
"close",
"(",
")",
"return",
"data"
] | r"""
Directly reads data from url | [
"r",
"Directly",
"reads",
"data",
"from",
"url"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_grabdata.py#L402-L417 | train |
Erotemic/utool | utool/util_grabdata.py | url_read_text | def url_read_text(url, verbose=True):
r"""
Directly reads text data from url
"""
data = url_read(url, verbose)
text = data.decode('utf8')
return text | python | def url_read_text(url, verbose=True):
r"""
Directly reads text data from url
"""
data = url_read(url, verbose)
text = data.decode('utf8')
return text | [
"def",
"url_read_text",
"(",
"url",
",",
"verbose",
"=",
"True",
")",
":",
"data",
"=",
"url_read",
"(",
"url",
",",
"verbose",
")",
"text",
"=",
"data",
".",
"decode",
"(",
"'utf8'",
")",
"return",
"text"
] | r"""
Directly reads text data from url | [
"r",
"Directly",
"reads",
"text",
"data",
"from",
"url"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_grabdata.py#L420-L426 | train |
Erotemic/utool | utool/util_grabdata.py | clean_dropbox_link | def clean_dropbox_link(dropbox_url):
"""
Dropbox links should be en-mass downloaed from dl.dropbox
DEPRICATE?
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> dropbox_url = 'www.dropbox.com/s/123456789abcdef/foobar.zip?dl=0'
>>> cleaned_url = clean_dropbox_link(dropbox_url)
>>> result = str(cleaned_url)
>>> print(result)
dl.dropbox.com/s/123456789abcdef/foobar.zip
"""
cleaned_url = dropbox_url.replace('www.dropbox', 'dl.dropbox')
postfix_list = [
'?dl=0'
]
for postfix in postfix_list:
if cleaned_url.endswith(postfix):
cleaned_url = cleaned_url[:-1 * len(postfix)]
# cleaned_url = cleaned_url.rstrip('?dl=0')
return cleaned_url | python | def clean_dropbox_link(dropbox_url):
"""
Dropbox links should be en-mass downloaed from dl.dropbox
DEPRICATE?
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> dropbox_url = 'www.dropbox.com/s/123456789abcdef/foobar.zip?dl=0'
>>> cleaned_url = clean_dropbox_link(dropbox_url)
>>> result = str(cleaned_url)
>>> print(result)
dl.dropbox.com/s/123456789abcdef/foobar.zip
"""
cleaned_url = dropbox_url.replace('www.dropbox', 'dl.dropbox')
postfix_list = [
'?dl=0'
]
for postfix in postfix_list:
if cleaned_url.endswith(postfix):
cleaned_url = cleaned_url[:-1 * len(postfix)]
# cleaned_url = cleaned_url.rstrip('?dl=0')
return cleaned_url | [
"def",
"clean_dropbox_link",
"(",
"dropbox_url",
")",
":",
"cleaned_url",
"=",
"dropbox_url",
".",
"replace",
"(",
"'www.dropbox'",
",",
"'dl.dropbox'",
")",
"postfix_list",
"=",
"[",
"'?dl=0'",
"]",
"for",
"postfix",
"in",
"postfix_list",
":",
"if",
"cleaned_url",
".",
"endswith",
"(",
"postfix",
")",
":",
"cleaned_url",
"=",
"cleaned_url",
"[",
":",
"-",
"1",
"*",
"len",
"(",
"postfix",
")",
"]",
"# cleaned_url = cleaned_url.rstrip('?dl=0')",
"return",
"cleaned_url"
] | Dropbox links should be en-mass downloaed from dl.dropbox
DEPRICATE?
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> dropbox_url = 'www.dropbox.com/s/123456789abcdef/foobar.zip?dl=0'
>>> cleaned_url = clean_dropbox_link(dropbox_url)
>>> result = str(cleaned_url)
>>> print(result)
dl.dropbox.com/s/123456789abcdef/foobar.zip | [
"Dropbox",
"links",
"should",
"be",
"en",
"-",
"mass",
"downloaed",
"from",
"dl",
".",
"dropbox"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_grabdata.py#L503-L526 | train |
Erotemic/utool | utool/util_grabdata.py | grab_selenium_chromedriver | def grab_selenium_chromedriver(redownload=False):
r"""
Automatically download selenium chrome driver if needed
CommandLine:
python -m utool.util_grabdata --test-grab_selenium_chromedriver:1
Example:
>>> # DISABLE_DOCTEST
>>> ut.grab_selenium_chromedriver()
>>> import selenium.webdriver
>>> driver = selenium.webdriver.Chrome()
>>> driver.get('http://www.google.com')
>>> search_field = driver.find_element_by_name('q')
>>> search_field.send_keys('puppies')
>>> search_field.send_keys(selenium.webdriver.common.keys.Keys.ENTER)
Example1:
>>> # DISABLE_DOCTEST
>>> import selenium.webdriver
>>> driver = selenium.webdriver.Firefox()
>>> driver.get('http://www.google.com')
>>> search_field = driver.find_element_by_name('q')
>>> search_field.send_keys('puppies')
>>> search_field.send_keys(selenium.webdriver.common.keys.Keys.ENTER)
"""
import utool as ut
import os
import stat
# TODO: use a better download dir (but it must be in the PATh or selenium freaks out)
chromedriver_dpath = ut.ensuredir(ut.truepath('~/bin'))
chromedriver_fpath = join(chromedriver_dpath, 'chromedriver')
if not ut.checkpath(chromedriver_fpath) or redownload:
assert chromedriver_dpath in os.environ['PATH'].split(os.pathsep)
# TODO: make this work for windows as well
if ut.LINUX and ut.util_cplat.is64bit_python():
import requests
rsp = requests.get('http://chromedriver.storage.googleapis.com/LATEST_RELEASE', timeout=TIMEOUT)
assert rsp.status_code == 200
url = 'http://chromedriver.storage.googleapis.com/' + rsp.text.strip() + '/chromedriver_linux64.zip'
ut.grab_zipped_url(url, download_dir=chromedriver_dpath, redownload=True)
else:
raise AssertionError('unsupported chrome driver getter script')
if not ut.WIN32:
st = os.stat(chromedriver_fpath)
os.chmod(chromedriver_fpath, st.st_mode | stat.S_IEXEC)
ut.assert_exists(chromedriver_fpath)
os.environ['webdriver.chrome.driver'] = chromedriver_fpath
return chromedriver_fpath | python | def grab_selenium_chromedriver(redownload=False):
r"""
Automatically download selenium chrome driver if needed
CommandLine:
python -m utool.util_grabdata --test-grab_selenium_chromedriver:1
Example:
>>> # DISABLE_DOCTEST
>>> ut.grab_selenium_chromedriver()
>>> import selenium.webdriver
>>> driver = selenium.webdriver.Chrome()
>>> driver.get('http://www.google.com')
>>> search_field = driver.find_element_by_name('q')
>>> search_field.send_keys('puppies')
>>> search_field.send_keys(selenium.webdriver.common.keys.Keys.ENTER)
Example1:
>>> # DISABLE_DOCTEST
>>> import selenium.webdriver
>>> driver = selenium.webdriver.Firefox()
>>> driver.get('http://www.google.com')
>>> search_field = driver.find_element_by_name('q')
>>> search_field.send_keys('puppies')
>>> search_field.send_keys(selenium.webdriver.common.keys.Keys.ENTER)
"""
import utool as ut
import os
import stat
# TODO: use a better download dir (but it must be in the PATh or selenium freaks out)
chromedriver_dpath = ut.ensuredir(ut.truepath('~/bin'))
chromedriver_fpath = join(chromedriver_dpath, 'chromedriver')
if not ut.checkpath(chromedriver_fpath) or redownload:
assert chromedriver_dpath in os.environ['PATH'].split(os.pathsep)
# TODO: make this work for windows as well
if ut.LINUX and ut.util_cplat.is64bit_python():
import requests
rsp = requests.get('http://chromedriver.storage.googleapis.com/LATEST_RELEASE', timeout=TIMEOUT)
assert rsp.status_code == 200
url = 'http://chromedriver.storage.googleapis.com/' + rsp.text.strip() + '/chromedriver_linux64.zip'
ut.grab_zipped_url(url, download_dir=chromedriver_dpath, redownload=True)
else:
raise AssertionError('unsupported chrome driver getter script')
if not ut.WIN32:
st = os.stat(chromedriver_fpath)
os.chmod(chromedriver_fpath, st.st_mode | stat.S_IEXEC)
ut.assert_exists(chromedriver_fpath)
os.environ['webdriver.chrome.driver'] = chromedriver_fpath
return chromedriver_fpath | [
"def",
"grab_selenium_chromedriver",
"(",
"redownload",
"=",
"False",
")",
":",
"import",
"utool",
"as",
"ut",
"import",
"os",
"import",
"stat",
"# TODO: use a better download dir (but it must be in the PATh or selenium freaks out)",
"chromedriver_dpath",
"=",
"ut",
".",
"ensuredir",
"(",
"ut",
".",
"truepath",
"(",
"'~/bin'",
")",
")",
"chromedriver_fpath",
"=",
"join",
"(",
"chromedriver_dpath",
",",
"'chromedriver'",
")",
"if",
"not",
"ut",
".",
"checkpath",
"(",
"chromedriver_fpath",
")",
"or",
"redownload",
":",
"assert",
"chromedriver_dpath",
"in",
"os",
".",
"environ",
"[",
"'PATH'",
"]",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"# TODO: make this work for windows as well",
"if",
"ut",
".",
"LINUX",
"and",
"ut",
".",
"util_cplat",
".",
"is64bit_python",
"(",
")",
":",
"import",
"requests",
"rsp",
"=",
"requests",
".",
"get",
"(",
"'http://chromedriver.storage.googleapis.com/LATEST_RELEASE'",
",",
"timeout",
"=",
"TIMEOUT",
")",
"assert",
"rsp",
".",
"status_code",
"==",
"200",
"url",
"=",
"'http://chromedriver.storage.googleapis.com/'",
"+",
"rsp",
".",
"text",
".",
"strip",
"(",
")",
"+",
"'/chromedriver_linux64.zip'",
"ut",
".",
"grab_zipped_url",
"(",
"url",
",",
"download_dir",
"=",
"chromedriver_dpath",
",",
"redownload",
"=",
"True",
")",
"else",
":",
"raise",
"AssertionError",
"(",
"'unsupported chrome driver getter script'",
")",
"if",
"not",
"ut",
".",
"WIN32",
":",
"st",
"=",
"os",
".",
"stat",
"(",
"chromedriver_fpath",
")",
"os",
".",
"chmod",
"(",
"chromedriver_fpath",
",",
"st",
".",
"st_mode",
"|",
"stat",
".",
"S_IEXEC",
")",
"ut",
".",
"assert_exists",
"(",
"chromedriver_fpath",
")",
"os",
".",
"environ",
"[",
"'webdriver.chrome.driver'",
"]",
"=",
"chromedriver_fpath",
"return",
"chromedriver_fpath"
] | r"""
Automatically download selenium chrome driver if needed
CommandLine:
python -m utool.util_grabdata --test-grab_selenium_chromedriver:1
Example:
>>> # DISABLE_DOCTEST
>>> ut.grab_selenium_chromedriver()
>>> import selenium.webdriver
>>> driver = selenium.webdriver.Chrome()
>>> driver.get('http://www.google.com')
>>> search_field = driver.find_element_by_name('q')
>>> search_field.send_keys('puppies')
>>> search_field.send_keys(selenium.webdriver.common.keys.Keys.ENTER)
Example1:
>>> # DISABLE_DOCTEST
>>> import selenium.webdriver
>>> driver = selenium.webdriver.Firefox()
>>> driver.get('http://www.google.com')
>>> search_field = driver.find_element_by_name('q')
>>> search_field.send_keys('puppies')
>>> search_field.send_keys(selenium.webdriver.common.keys.Keys.ENTER) | [
"r",
"Automatically",
"download",
"selenium",
"chrome",
"driver",
"if",
"needed"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_grabdata.py#L627-L675 | train |
Erotemic/utool | utool/util_grabdata.py | grab_selenium_driver | def grab_selenium_driver(driver_name=None):
"""
pip install selenium -U
"""
from selenium import webdriver
if driver_name is None:
driver_name = 'firefox'
if driver_name.lower() == 'chrome':
grab_selenium_chromedriver()
return webdriver.Chrome()
elif driver_name.lower() == 'firefox':
# grab_selenium_chromedriver()
return webdriver.Firefox()
else:
raise AssertionError('unknown name = %r' % (driver_name,)) | python | def grab_selenium_driver(driver_name=None):
"""
pip install selenium -U
"""
from selenium import webdriver
if driver_name is None:
driver_name = 'firefox'
if driver_name.lower() == 'chrome':
grab_selenium_chromedriver()
return webdriver.Chrome()
elif driver_name.lower() == 'firefox':
# grab_selenium_chromedriver()
return webdriver.Firefox()
else:
raise AssertionError('unknown name = %r' % (driver_name,)) | [
"def",
"grab_selenium_driver",
"(",
"driver_name",
"=",
"None",
")",
":",
"from",
"selenium",
"import",
"webdriver",
"if",
"driver_name",
"is",
"None",
":",
"driver_name",
"=",
"'firefox'",
"if",
"driver_name",
".",
"lower",
"(",
")",
"==",
"'chrome'",
":",
"grab_selenium_chromedriver",
"(",
")",
"return",
"webdriver",
".",
"Chrome",
"(",
")",
"elif",
"driver_name",
".",
"lower",
"(",
")",
"==",
"'firefox'",
":",
"# grab_selenium_chromedriver()",
"return",
"webdriver",
".",
"Firefox",
"(",
")",
"else",
":",
"raise",
"AssertionError",
"(",
"'unknown name = %r'",
"%",
"(",
"driver_name",
",",
")",
")"
] | pip install selenium -U | [
"pip",
"install",
"selenium",
"-",
"U"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_grabdata.py#L678-L692 | train |
Erotemic/utool | utool/util_grabdata.py | grab_file_url | def grab_file_url(file_url, appname='utool', download_dir=None, delay=None,
spoof=False, fname=None, verbose=True, redownload=False,
check_hash=False):
r"""
Downloads a file and returns the local path of the file.
The resulting file is cached, so multiple calls to this function do not
result in multiple dowloads.
Args:
file_url (str): url to the file
appname (str): (default = 'utool')
download_dir custom directory (None): (default = None)
delay (None): delay time before download (default = None)
spoof (bool): (default = False)
fname (str): custom file name (default = None)
verbose (bool): verbosity flag (default = True)
redownload (bool): if True forces redownload of the file
(default = False)
check_hash (bool or iterable): if True, defaults to checking 4 hashes
(in order): custom, md5, sha1, sha256. These hashes are checked
for remote copies and, if found, will check the local file. You may
also specify a list of hashes to check, for example ['md5', 'sha256']
in the specified order. The first verified hash to be found is used
(default = False)
Returns:
str: fpath - file path string
CommandLine:
python -m utool.util_grabdata --test-grab_file_url:0
python -m utool.util_grabdata --test-grab_file_url:1
Example0:
>>> # ENABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> import utool as ut # NOQA
>>> from os.path import basename
>>> ut.exec_funckw(ut.grab_file_url, locals())
>>> file_url = 'http://i.imgur.com/JGrqMnV.png'
>>> redownload = True
>>> fname = 'lena.png'
>>> lena_fpath = ut.grab_file_url(file_url, fname=fname,
>>> redownload=redownload)
>>> result = basename(lena_fpath)
>>> print(result)
lena.png
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> import utool as ut # NOQA
>>> ut.exec_funckw(ut.grab_file_url, locals())
>>> file_url = 'https://lev.cs.rpi.edu/public/models/detect.yolo.12.classes'
>>> fname = 'detect.yolo.12.classes'
>>> check_hash = True
>>> fpath = ut.grab_file_url(file_url, fname=fname, check_hash=check_hash)
"""
file_url = clean_dropbox_link(file_url)
if fname is None:
fname = basename(file_url)
# Download zipfile to
if download_dir is None:
download_dir = util_cplat.get_app_cache_dir(appname)
# Zipfile should unzip to:
fpath = join(download_dir, fname)
# If check hash, get remote hash and assert local copy is the same
if check_hash:
if isinstance(check_hash, (list, tuple)):
hash_list = check_hash
else:
hash_list = ['md5']
# hash_list = ['sha1.custom', 'md5', 'sha1', 'sha256']
# Get expected remote file
hash_remote, hash_tag_remote = grab_file_remote_hash(file_url, hash_list, verbose=verbose)
hash_list = [hash_tag_remote]
# We have a valid candidate hash from remote, check for same hash locally
hash_local, hash_tag_local = get_file_local_hash(fpath, hash_list, verbose=verbose)
if verbose:
print('[utool] Pre Local Hash: %r' % (hash_local, ))
print('[utool] Pre Remote Hash: %r' % (hash_remote, ))
# Check all 4 hash conditions
if hash_remote is None:
# No remote hash provided, turn off post-download hash check
check_hash = False
elif hash_local is None:
if verbose:
print('[utool] Remote hash provided but local hash missing, redownloading.')
redownload = True
elif hash_local == hash_remote:
assert hash_tag_local == hash_tag_remote, ('hash tag disagreement')
else:
if verbose:
print('[utool] Both hashes provided, but they disagree, redownloading.')
redownload = True
# Download
util_path.ensurepath(download_dir)
if redownload or not exists(fpath):
# Download testdata
if verbose:
print('[utool] Downloading file %s' % fpath)
if delay is not None:
print('[utool] delay download by %r seconds' % (delay,))
time.sleep(delay)
download_url(file_url, fpath, spoof=spoof)
else:
if verbose:
print('[utool] Already have file %s' % fpath)
util_path.assert_exists(fpath)
# Post-download local hash verification
if check_hash:
# File has been successfuly downloaded, write remote hash to local hash file
hash_fpath = '%s.%s' % (fpath, hash_tag_remote, )
with open(hash_fpath, 'w') as hash_file:
hash_file.write(hash_remote)
# For sanity check (custom) and file verification (hashing), get local hash again
hash_local, hash_tag_local = get_file_local_hash(fpath, hash_list, verbose=verbose)
if verbose:
print('[utool] Post Local Hash: %r' % (hash_local, ))
assert hash_local == hash_remote, 'Post-download hash disagreement'
assert hash_tag_local == hash_tag_remote, 'Post-download hash tag disagreement'
return fpath | python | def grab_file_url(file_url, appname='utool', download_dir=None, delay=None,
spoof=False, fname=None, verbose=True, redownload=False,
check_hash=False):
r"""
Downloads a file and returns the local path of the file.
The resulting file is cached, so multiple calls to this function do not
result in multiple dowloads.
Args:
file_url (str): url to the file
appname (str): (default = 'utool')
download_dir custom directory (None): (default = None)
delay (None): delay time before download (default = None)
spoof (bool): (default = False)
fname (str): custom file name (default = None)
verbose (bool): verbosity flag (default = True)
redownload (bool): if True forces redownload of the file
(default = False)
check_hash (bool or iterable): if True, defaults to checking 4 hashes
(in order): custom, md5, sha1, sha256. These hashes are checked
for remote copies and, if found, will check the local file. You may
also specify a list of hashes to check, for example ['md5', 'sha256']
in the specified order. The first verified hash to be found is used
(default = False)
Returns:
str: fpath - file path string
CommandLine:
python -m utool.util_grabdata --test-grab_file_url:0
python -m utool.util_grabdata --test-grab_file_url:1
Example0:
>>> # ENABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> import utool as ut # NOQA
>>> from os.path import basename
>>> ut.exec_funckw(ut.grab_file_url, locals())
>>> file_url = 'http://i.imgur.com/JGrqMnV.png'
>>> redownload = True
>>> fname = 'lena.png'
>>> lena_fpath = ut.grab_file_url(file_url, fname=fname,
>>> redownload=redownload)
>>> result = basename(lena_fpath)
>>> print(result)
lena.png
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> import utool as ut # NOQA
>>> ut.exec_funckw(ut.grab_file_url, locals())
>>> file_url = 'https://lev.cs.rpi.edu/public/models/detect.yolo.12.classes'
>>> fname = 'detect.yolo.12.classes'
>>> check_hash = True
>>> fpath = ut.grab_file_url(file_url, fname=fname, check_hash=check_hash)
"""
file_url = clean_dropbox_link(file_url)
if fname is None:
fname = basename(file_url)
# Download zipfile to
if download_dir is None:
download_dir = util_cplat.get_app_cache_dir(appname)
# Zipfile should unzip to:
fpath = join(download_dir, fname)
# If check hash, get remote hash and assert local copy is the same
if check_hash:
if isinstance(check_hash, (list, tuple)):
hash_list = check_hash
else:
hash_list = ['md5']
# hash_list = ['sha1.custom', 'md5', 'sha1', 'sha256']
# Get expected remote file
hash_remote, hash_tag_remote = grab_file_remote_hash(file_url, hash_list, verbose=verbose)
hash_list = [hash_tag_remote]
# We have a valid candidate hash from remote, check for same hash locally
hash_local, hash_tag_local = get_file_local_hash(fpath, hash_list, verbose=verbose)
if verbose:
print('[utool] Pre Local Hash: %r' % (hash_local, ))
print('[utool] Pre Remote Hash: %r' % (hash_remote, ))
# Check all 4 hash conditions
if hash_remote is None:
# No remote hash provided, turn off post-download hash check
check_hash = False
elif hash_local is None:
if verbose:
print('[utool] Remote hash provided but local hash missing, redownloading.')
redownload = True
elif hash_local == hash_remote:
assert hash_tag_local == hash_tag_remote, ('hash tag disagreement')
else:
if verbose:
print('[utool] Both hashes provided, but they disagree, redownloading.')
redownload = True
# Download
util_path.ensurepath(download_dir)
if redownload or not exists(fpath):
# Download testdata
if verbose:
print('[utool] Downloading file %s' % fpath)
if delay is not None:
print('[utool] delay download by %r seconds' % (delay,))
time.sleep(delay)
download_url(file_url, fpath, spoof=spoof)
else:
if verbose:
print('[utool] Already have file %s' % fpath)
util_path.assert_exists(fpath)
# Post-download local hash verification
if check_hash:
# File has been successfuly downloaded, write remote hash to local hash file
hash_fpath = '%s.%s' % (fpath, hash_tag_remote, )
with open(hash_fpath, 'w') as hash_file:
hash_file.write(hash_remote)
# For sanity check (custom) and file verification (hashing), get local hash again
hash_local, hash_tag_local = get_file_local_hash(fpath, hash_list, verbose=verbose)
if verbose:
print('[utool] Post Local Hash: %r' % (hash_local, ))
assert hash_local == hash_remote, 'Post-download hash disagreement'
assert hash_tag_local == hash_tag_remote, 'Post-download hash tag disagreement'
return fpath | [
"def",
"grab_file_url",
"(",
"file_url",
",",
"appname",
"=",
"'utool'",
",",
"download_dir",
"=",
"None",
",",
"delay",
"=",
"None",
",",
"spoof",
"=",
"False",
",",
"fname",
"=",
"None",
",",
"verbose",
"=",
"True",
",",
"redownload",
"=",
"False",
",",
"check_hash",
"=",
"False",
")",
":",
"file_url",
"=",
"clean_dropbox_link",
"(",
"file_url",
")",
"if",
"fname",
"is",
"None",
":",
"fname",
"=",
"basename",
"(",
"file_url",
")",
"# Download zipfile to",
"if",
"download_dir",
"is",
"None",
":",
"download_dir",
"=",
"util_cplat",
".",
"get_app_cache_dir",
"(",
"appname",
")",
"# Zipfile should unzip to:",
"fpath",
"=",
"join",
"(",
"download_dir",
",",
"fname",
")",
"# If check hash, get remote hash and assert local copy is the same",
"if",
"check_hash",
":",
"if",
"isinstance",
"(",
"check_hash",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"hash_list",
"=",
"check_hash",
"else",
":",
"hash_list",
"=",
"[",
"'md5'",
"]",
"# hash_list = ['sha1.custom', 'md5', 'sha1', 'sha256']",
"# Get expected remote file",
"hash_remote",
",",
"hash_tag_remote",
"=",
"grab_file_remote_hash",
"(",
"file_url",
",",
"hash_list",
",",
"verbose",
"=",
"verbose",
")",
"hash_list",
"=",
"[",
"hash_tag_remote",
"]",
"# We have a valid candidate hash from remote, check for same hash locally",
"hash_local",
",",
"hash_tag_local",
"=",
"get_file_local_hash",
"(",
"fpath",
",",
"hash_list",
",",
"verbose",
"=",
"verbose",
")",
"if",
"verbose",
":",
"print",
"(",
"'[utool] Pre Local Hash: %r'",
"%",
"(",
"hash_local",
",",
")",
")",
"print",
"(",
"'[utool] Pre Remote Hash: %r'",
"%",
"(",
"hash_remote",
",",
")",
")",
"# Check all 4 hash conditions",
"if",
"hash_remote",
"is",
"None",
":",
"# No remote hash provided, turn off post-download hash check",
"check_hash",
"=",
"False",
"elif",
"hash_local",
"is",
"None",
":",
"if",
"verbose",
":",
"print",
"(",
"'[utool] Remote hash provided but local hash missing, redownloading.'",
")",
"redownload",
"=",
"True",
"elif",
"hash_local",
"==",
"hash_remote",
":",
"assert",
"hash_tag_local",
"==",
"hash_tag_remote",
",",
"(",
"'hash tag disagreement'",
")",
"else",
":",
"if",
"verbose",
":",
"print",
"(",
"'[utool] Both hashes provided, but they disagree, redownloading.'",
")",
"redownload",
"=",
"True",
"# Download",
"util_path",
".",
"ensurepath",
"(",
"download_dir",
")",
"if",
"redownload",
"or",
"not",
"exists",
"(",
"fpath",
")",
":",
"# Download testdata",
"if",
"verbose",
":",
"print",
"(",
"'[utool] Downloading file %s'",
"%",
"fpath",
")",
"if",
"delay",
"is",
"not",
"None",
":",
"print",
"(",
"'[utool] delay download by %r seconds'",
"%",
"(",
"delay",
",",
")",
")",
"time",
".",
"sleep",
"(",
"delay",
")",
"download_url",
"(",
"file_url",
",",
"fpath",
",",
"spoof",
"=",
"spoof",
")",
"else",
":",
"if",
"verbose",
":",
"print",
"(",
"'[utool] Already have file %s'",
"%",
"fpath",
")",
"util_path",
".",
"assert_exists",
"(",
"fpath",
")",
"# Post-download local hash verification",
"if",
"check_hash",
":",
"# File has been successfuly downloaded, write remote hash to local hash file",
"hash_fpath",
"=",
"'%s.%s'",
"%",
"(",
"fpath",
",",
"hash_tag_remote",
",",
")",
"with",
"open",
"(",
"hash_fpath",
",",
"'w'",
")",
"as",
"hash_file",
":",
"hash_file",
".",
"write",
"(",
"hash_remote",
")",
"# For sanity check (custom) and file verification (hashing), get local hash again",
"hash_local",
",",
"hash_tag_local",
"=",
"get_file_local_hash",
"(",
"fpath",
",",
"hash_list",
",",
"verbose",
"=",
"verbose",
")",
"if",
"verbose",
":",
"print",
"(",
"'[utool] Post Local Hash: %r'",
"%",
"(",
"hash_local",
",",
")",
")",
"assert",
"hash_local",
"==",
"hash_remote",
",",
"'Post-download hash disagreement'",
"assert",
"hash_tag_local",
"==",
"hash_tag_remote",
",",
"'Post-download hash tag disagreement'",
"return",
"fpath"
] | r"""
Downloads a file and returns the local path of the file.
The resulting file is cached, so multiple calls to this function do not
result in multiple dowloads.
Args:
file_url (str): url to the file
appname (str): (default = 'utool')
download_dir custom directory (None): (default = None)
delay (None): delay time before download (default = None)
spoof (bool): (default = False)
fname (str): custom file name (default = None)
verbose (bool): verbosity flag (default = True)
redownload (bool): if True forces redownload of the file
(default = False)
check_hash (bool or iterable): if True, defaults to checking 4 hashes
(in order): custom, md5, sha1, sha256. These hashes are checked
for remote copies and, if found, will check the local file. You may
also specify a list of hashes to check, for example ['md5', 'sha256']
in the specified order. The first verified hash to be found is used
(default = False)
Returns:
str: fpath - file path string
CommandLine:
python -m utool.util_grabdata --test-grab_file_url:0
python -m utool.util_grabdata --test-grab_file_url:1
Example0:
>>> # ENABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> import utool as ut # NOQA
>>> from os.path import basename
>>> ut.exec_funckw(ut.grab_file_url, locals())
>>> file_url = 'http://i.imgur.com/JGrqMnV.png'
>>> redownload = True
>>> fname = 'lena.png'
>>> lena_fpath = ut.grab_file_url(file_url, fname=fname,
>>> redownload=redownload)
>>> result = basename(lena_fpath)
>>> print(result)
lena.png
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> import utool as ut # NOQA
>>> ut.exec_funckw(ut.grab_file_url, locals())
>>> file_url = 'https://lev.cs.rpi.edu/public/models/detect.yolo.12.classes'
>>> fname = 'detect.yolo.12.classes'
>>> check_hash = True
>>> fpath = ut.grab_file_url(file_url, fname=fname, check_hash=check_hash) | [
"r",
"Downloads",
"a",
"file",
"and",
"returns",
"the",
"local",
"path",
"of",
"the",
"file",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_grabdata.py#L782-L905 | train |
Erotemic/utool | utool/util_grabdata.py | grab_zipped_url | def grab_zipped_url(zipped_url, ensure=True, appname='utool',
download_dir=None, force_commonprefix=True, cleanup=False,
redownload=False, spoof=False):
r"""
downloads and unzips the url
Args:
zipped_url (str): url which must be either a .zip of a .tar.gz file
ensure (bool): eager evaluation if True(default = True)
appname (str): (default = 'utool')
download_dir (str): containing downloading directory
force_commonprefix (bool): (default = True)
cleanup (bool): (default = False)
redownload (bool): (default = False)
spoof (bool): (default = False)
CommandLine:
python -m utool.util_grabdata --exec-grab_zipped_url --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> import utool as ut
>>> zipped_url = '?'
>>> ensure = True
>>> appname = 'utool'
>>> download_dir = None
>>> force_commonprefix = True
>>> cleanup = False
>>> redownload = False
>>> spoof = False
>>> result = grab_zipped_url(zipped_url, ensure, appname, download_dir,
>>> force_commonprefix, cleanup, redownload,
>>> spoof)
>>> print(result)
Examples:
>>> # DISABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> zipped_url = 'https://lev.cs.rpi.edu/public/data/testdata.zip'
>>> zipped_url = 'http://www.spam.com/eggs/data.zip'
"""
zipped_url = clean_dropbox_link(zipped_url)
zip_fname = split(zipped_url)[1]
data_name = split_archive_ext(zip_fname)[0]
# Download zipfile to
if download_dir is None:
download_dir = util_cplat.get_app_cache_dir(appname)
# Zipfile should unzip to:
data_dir = join(download_dir, data_name)
if ensure or redownload:
if redownload:
util_path.remove_dirs(data_dir)
util_path.ensurepath(download_dir)
if not exists(data_dir) or redownload:
# Download and unzip testdata
zip_fpath = realpath(join(download_dir, zip_fname))
#print('[utool] Downloading archive %s' % zip_fpath)
if not exists(zip_fpath) or redownload:
download_url(zipped_url, zip_fpath, spoof=spoof)
unarchive_file(zip_fpath, force_commonprefix)
if cleanup:
util_path.delete(zip_fpath) # Cleanup
if cleanup:
util_path.assert_exists(data_dir)
return util_path.unixpath(data_dir) | python | def grab_zipped_url(zipped_url, ensure=True, appname='utool',
download_dir=None, force_commonprefix=True, cleanup=False,
redownload=False, spoof=False):
r"""
downloads and unzips the url
Args:
zipped_url (str): url which must be either a .zip of a .tar.gz file
ensure (bool): eager evaluation if True(default = True)
appname (str): (default = 'utool')
download_dir (str): containing downloading directory
force_commonprefix (bool): (default = True)
cleanup (bool): (default = False)
redownload (bool): (default = False)
spoof (bool): (default = False)
CommandLine:
python -m utool.util_grabdata --exec-grab_zipped_url --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> import utool as ut
>>> zipped_url = '?'
>>> ensure = True
>>> appname = 'utool'
>>> download_dir = None
>>> force_commonprefix = True
>>> cleanup = False
>>> redownload = False
>>> spoof = False
>>> result = grab_zipped_url(zipped_url, ensure, appname, download_dir,
>>> force_commonprefix, cleanup, redownload,
>>> spoof)
>>> print(result)
Examples:
>>> # DISABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> zipped_url = 'https://lev.cs.rpi.edu/public/data/testdata.zip'
>>> zipped_url = 'http://www.spam.com/eggs/data.zip'
"""
zipped_url = clean_dropbox_link(zipped_url)
zip_fname = split(zipped_url)[1]
data_name = split_archive_ext(zip_fname)[0]
# Download zipfile to
if download_dir is None:
download_dir = util_cplat.get_app_cache_dir(appname)
# Zipfile should unzip to:
data_dir = join(download_dir, data_name)
if ensure or redownload:
if redownload:
util_path.remove_dirs(data_dir)
util_path.ensurepath(download_dir)
if not exists(data_dir) or redownload:
# Download and unzip testdata
zip_fpath = realpath(join(download_dir, zip_fname))
#print('[utool] Downloading archive %s' % zip_fpath)
if not exists(zip_fpath) or redownload:
download_url(zipped_url, zip_fpath, spoof=spoof)
unarchive_file(zip_fpath, force_commonprefix)
if cleanup:
util_path.delete(zip_fpath) # Cleanup
if cleanup:
util_path.assert_exists(data_dir)
return util_path.unixpath(data_dir) | [
"def",
"grab_zipped_url",
"(",
"zipped_url",
",",
"ensure",
"=",
"True",
",",
"appname",
"=",
"'utool'",
",",
"download_dir",
"=",
"None",
",",
"force_commonprefix",
"=",
"True",
",",
"cleanup",
"=",
"False",
",",
"redownload",
"=",
"False",
",",
"spoof",
"=",
"False",
")",
":",
"zipped_url",
"=",
"clean_dropbox_link",
"(",
"zipped_url",
")",
"zip_fname",
"=",
"split",
"(",
"zipped_url",
")",
"[",
"1",
"]",
"data_name",
"=",
"split_archive_ext",
"(",
"zip_fname",
")",
"[",
"0",
"]",
"# Download zipfile to",
"if",
"download_dir",
"is",
"None",
":",
"download_dir",
"=",
"util_cplat",
".",
"get_app_cache_dir",
"(",
"appname",
")",
"# Zipfile should unzip to:",
"data_dir",
"=",
"join",
"(",
"download_dir",
",",
"data_name",
")",
"if",
"ensure",
"or",
"redownload",
":",
"if",
"redownload",
":",
"util_path",
".",
"remove_dirs",
"(",
"data_dir",
")",
"util_path",
".",
"ensurepath",
"(",
"download_dir",
")",
"if",
"not",
"exists",
"(",
"data_dir",
")",
"or",
"redownload",
":",
"# Download and unzip testdata",
"zip_fpath",
"=",
"realpath",
"(",
"join",
"(",
"download_dir",
",",
"zip_fname",
")",
")",
"#print('[utool] Downloading archive %s' % zip_fpath)",
"if",
"not",
"exists",
"(",
"zip_fpath",
")",
"or",
"redownload",
":",
"download_url",
"(",
"zipped_url",
",",
"zip_fpath",
",",
"spoof",
"=",
"spoof",
")",
"unarchive_file",
"(",
"zip_fpath",
",",
"force_commonprefix",
")",
"if",
"cleanup",
":",
"util_path",
".",
"delete",
"(",
"zip_fpath",
")",
"# Cleanup",
"if",
"cleanup",
":",
"util_path",
".",
"assert_exists",
"(",
"data_dir",
")",
"return",
"util_path",
".",
"unixpath",
"(",
"data_dir",
")"
] | r"""
downloads and unzips the url
Args:
zipped_url (str): url which must be either a .zip of a .tar.gz file
ensure (bool): eager evaluation if True(default = True)
appname (str): (default = 'utool')
download_dir (str): containing downloading directory
force_commonprefix (bool): (default = True)
cleanup (bool): (default = False)
redownload (bool): (default = False)
spoof (bool): (default = False)
CommandLine:
python -m utool.util_grabdata --exec-grab_zipped_url --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> import utool as ut
>>> zipped_url = '?'
>>> ensure = True
>>> appname = 'utool'
>>> download_dir = None
>>> force_commonprefix = True
>>> cleanup = False
>>> redownload = False
>>> spoof = False
>>> result = grab_zipped_url(zipped_url, ensure, appname, download_dir,
>>> force_commonprefix, cleanup, redownload,
>>> spoof)
>>> print(result)
Examples:
>>> # DISABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> zipped_url = 'https://lev.cs.rpi.edu/public/data/testdata.zip'
>>> zipped_url = 'http://www.spam.com/eggs/data.zip' | [
"r",
"downloads",
"and",
"unzips",
"the",
"url"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_grabdata.py#L908-L974 | train |
Erotemic/utool | utool/util_grabdata.py | scp_pull | def scp_pull(remote_path, local_path='.', remote='localhost', user=None):
r""" wrapper for scp """
import utool as ut
if user is not None:
remote_uri = user + '@' + remote + ':' + remote_path
else:
remote_uri = remote + ':' + remote_path
scp_exe = 'scp'
scp_args = (scp_exe, '-r', remote_uri, local_path)
ut.cmd(scp_args) | python | def scp_pull(remote_path, local_path='.', remote='localhost', user=None):
r""" wrapper for scp """
import utool as ut
if user is not None:
remote_uri = user + '@' + remote + ':' + remote_path
else:
remote_uri = remote + ':' + remote_path
scp_exe = 'scp'
scp_args = (scp_exe, '-r', remote_uri, local_path)
ut.cmd(scp_args) | [
"def",
"scp_pull",
"(",
"remote_path",
",",
"local_path",
"=",
"'.'",
",",
"remote",
"=",
"'localhost'",
",",
"user",
"=",
"None",
")",
":",
"import",
"utool",
"as",
"ut",
"if",
"user",
"is",
"not",
"None",
":",
"remote_uri",
"=",
"user",
"+",
"'@'",
"+",
"remote",
"+",
"':'",
"+",
"remote_path",
"else",
":",
"remote_uri",
"=",
"remote",
"+",
"':'",
"+",
"remote_path",
"scp_exe",
"=",
"'scp'",
"scp_args",
"=",
"(",
"scp_exe",
",",
"'-r'",
",",
"remote_uri",
",",
"local_path",
")",
"ut",
".",
"cmd",
"(",
"scp_args",
")"
] | r""" wrapper for scp | [
"r",
"wrapper",
"for",
"scp"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_grabdata.py#L1123-L1132 | train |
Erotemic/utool | utool/util_grabdata.py | list_remote | def list_remote(remote_uri, verbose=False):
"""
remote_uri = '[email protected]'
"""
remote_uri1, remote_dpath = remote_uri.split(':')
if not remote_dpath:
remote_dpath = '.'
import utool as ut
out = ut.cmd('ssh', remote_uri1, 'ls -l %s' % (remote_dpath,), verbose=verbose)
import re
# Find lines that look like ls output
split_lines = [re.split(r'\s+', t) for t in out[0].split('\n')]
paths = [' '.join(t2[8:]) for t2 in split_lines if len(t2) > 8]
return paths | python | def list_remote(remote_uri, verbose=False):
"""
remote_uri = '[email protected]'
"""
remote_uri1, remote_dpath = remote_uri.split(':')
if not remote_dpath:
remote_dpath = '.'
import utool as ut
out = ut.cmd('ssh', remote_uri1, 'ls -l %s' % (remote_dpath,), verbose=verbose)
import re
# Find lines that look like ls output
split_lines = [re.split(r'\s+', t) for t in out[0].split('\n')]
paths = [' '.join(t2[8:]) for t2 in split_lines if len(t2) > 8]
return paths | [
"def",
"list_remote",
"(",
"remote_uri",
",",
"verbose",
"=",
"False",
")",
":",
"remote_uri1",
",",
"remote_dpath",
"=",
"remote_uri",
".",
"split",
"(",
"':'",
")",
"if",
"not",
"remote_dpath",
":",
"remote_dpath",
"=",
"'.'",
"import",
"utool",
"as",
"ut",
"out",
"=",
"ut",
".",
"cmd",
"(",
"'ssh'",
",",
"remote_uri1",
",",
"'ls -l %s'",
"%",
"(",
"remote_dpath",
",",
")",
",",
"verbose",
"=",
"verbose",
")",
"import",
"re",
"# Find lines that look like ls output",
"split_lines",
"=",
"[",
"re",
".",
"split",
"(",
"r'\\s+'",
",",
"t",
")",
"for",
"t",
"in",
"out",
"[",
"0",
"]",
".",
"split",
"(",
"'\\n'",
")",
"]",
"paths",
"=",
"[",
"' '",
".",
"join",
"(",
"t2",
"[",
"8",
":",
"]",
")",
"for",
"t2",
"in",
"split_lines",
"if",
"len",
"(",
"t2",
")",
">",
"8",
"]",
"return",
"paths"
] | remote_uri = '[email protected]' | [
"remote_uri",
"=",
"user"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_grabdata.py#L1135-L1148 | train |
Erotemic/utool | utool/util_grabdata.py | rsync | def rsync(src_uri, dst_uri, exclude_dirs=[], port=22, dryrun=False):
r"""
Wrapper for rsync
General function to push or pull a directory from a remote server to a
local path
Args:
src_uri (str):
dst_uri (str):
exclude_dirs (list): (default = [])
port (int): (default = 22)
dryrun (bool): (default = False)
References:
http://www.tecmint.com/rsync-local-remote-file-synchronization-commands/
CommandLine:
python -m utool.util_grabdata rsync
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> import utool as ut
>>> src_uri = 'local/path/file.txt'
>>> dst_uri = 'host.cs.college.edge:/remove/path/file.txt'
>>> exclude_dirs = []
>>> port = 22
>>> dryrun = False
>>> result = rsync(src_uri, dst_uri, exclude_dirs, port, dryrun)
>>> print(result)
Notes (rsync commandline options):
rsync [OPTION]... SRC [SRC]... DEST
-v : verbose
-r : copies data recursively (but dont preserve timestamps and
permission while transferring data
-a : archive mode, allows recursive copying and preserves symlinks,
permissions, user and group ownerships, and timestamps
-z : compress file data
-i, --itemize-changes output a change-summary for all updates
-s, --protect-args : no space-splitting; only wildcard special-chars
-h : human-readable, output numbers in a human-readable format
-P same as --partial --progress
"""
from utool import util_cplat
rsync_exe = 'rsync'
rsync_options = '-avhzP'
#rsync_options += ' --port=%d' % (port,)
rsync_options += ' -e "ssh -p %d"' % (port,)
if len(exclude_dirs) > 0:
exclude_tup = ['--exclude ' + dir_ for dir_ in exclude_dirs]
exclude_opts = ' '.join(exclude_tup)
rsync_options += ' ' + exclude_opts
cmdtuple = (rsync_exe, rsync_options, src_uri, dst_uri)
cmdstr = ' '.join(cmdtuple)
print('[rsync] src_uri = %r ' % (src_uri,))
print('[rsync] dst_uri = %r ' % (dst_uri,))
print('[rsync] cmdstr = %r' % cmdstr)
print(cmdstr)
#if not dryrun:
util_cplat.cmd(cmdstr, dryrun=dryrun) | python | def rsync(src_uri, dst_uri, exclude_dirs=[], port=22, dryrun=False):
r"""
Wrapper for rsync
General function to push or pull a directory from a remote server to a
local path
Args:
src_uri (str):
dst_uri (str):
exclude_dirs (list): (default = [])
port (int): (default = 22)
dryrun (bool): (default = False)
References:
http://www.tecmint.com/rsync-local-remote-file-synchronization-commands/
CommandLine:
python -m utool.util_grabdata rsync
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> import utool as ut
>>> src_uri = 'local/path/file.txt'
>>> dst_uri = 'host.cs.college.edge:/remove/path/file.txt'
>>> exclude_dirs = []
>>> port = 22
>>> dryrun = False
>>> result = rsync(src_uri, dst_uri, exclude_dirs, port, dryrun)
>>> print(result)
Notes (rsync commandline options):
rsync [OPTION]... SRC [SRC]... DEST
-v : verbose
-r : copies data recursively (but dont preserve timestamps and
permission while transferring data
-a : archive mode, allows recursive copying and preserves symlinks,
permissions, user and group ownerships, and timestamps
-z : compress file data
-i, --itemize-changes output a change-summary for all updates
-s, --protect-args : no space-splitting; only wildcard special-chars
-h : human-readable, output numbers in a human-readable format
-P same as --partial --progress
"""
from utool import util_cplat
rsync_exe = 'rsync'
rsync_options = '-avhzP'
#rsync_options += ' --port=%d' % (port,)
rsync_options += ' -e "ssh -p %d"' % (port,)
if len(exclude_dirs) > 0:
exclude_tup = ['--exclude ' + dir_ for dir_ in exclude_dirs]
exclude_opts = ' '.join(exclude_tup)
rsync_options += ' ' + exclude_opts
cmdtuple = (rsync_exe, rsync_options, src_uri, dst_uri)
cmdstr = ' '.join(cmdtuple)
print('[rsync] src_uri = %r ' % (src_uri,))
print('[rsync] dst_uri = %r ' % (dst_uri,))
print('[rsync] cmdstr = %r' % cmdstr)
print(cmdstr)
#if not dryrun:
util_cplat.cmd(cmdstr, dryrun=dryrun) | [
"def",
"rsync",
"(",
"src_uri",
",",
"dst_uri",
",",
"exclude_dirs",
"=",
"[",
"]",
",",
"port",
"=",
"22",
",",
"dryrun",
"=",
"False",
")",
":",
"from",
"utool",
"import",
"util_cplat",
"rsync_exe",
"=",
"'rsync'",
"rsync_options",
"=",
"'-avhzP'",
"#rsync_options += ' --port=%d' % (port,)",
"rsync_options",
"+=",
"' -e \"ssh -p %d\"'",
"%",
"(",
"port",
",",
")",
"if",
"len",
"(",
"exclude_dirs",
")",
">",
"0",
":",
"exclude_tup",
"=",
"[",
"'--exclude '",
"+",
"dir_",
"for",
"dir_",
"in",
"exclude_dirs",
"]",
"exclude_opts",
"=",
"' '",
".",
"join",
"(",
"exclude_tup",
")",
"rsync_options",
"+=",
"' '",
"+",
"exclude_opts",
"cmdtuple",
"=",
"(",
"rsync_exe",
",",
"rsync_options",
",",
"src_uri",
",",
"dst_uri",
")",
"cmdstr",
"=",
"' '",
".",
"join",
"(",
"cmdtuple",
")",
"print",
"(",
"'[rsync] src_uri = %r '",
"%",
"(",
"src_uri",
",",
")",
")",
"print",
"(",
"'[rsync] dst_uri = %r '",
"%",
"(",
"dst_uri",
",",
")",
")",
"print",
"(",
"'[rsync] cmdstr = %r'",
"%",
"cmdstr",
")",
"print",
"(",
"cmdstr",
")",
"#if not dryrun:",
"util_cplat",
".",
"cmd",
"(",
"cmdstr",
",",
"dryrun",
"=",
"dryrun",
")"
] | r"""
Wrapper for rsync
General function to push or pull a directory from a remote server to a
local path
Args:
src_uri (str):
dst_uri (str):
exclude_dirs (list): (default = [])
port (int): (default = 22)
dryrun (bool): (default = False)
References:
http://www.tecmint.com/rsync-local-remote-file-synchronization-commands/
CommandLine:
python -m utool.util_grabdata rsync
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> import utool as ut
>>> src_uri = 'local/path/file.txt'
>>> dst_uri = 'host.cs.college.edge:/remove/path/file.txt'
>>> exclude_dirs = []
>>> port = 22
>>> dryrun = False
>>> result = rsync(src_uri, dst_uri, exclude_dirs, port, dryrun)
>>> print(result)
Notes (rsync commandline options):
rsync [OPTION]... SRC [SRC]... DEST
-v : verbose
-r : copies data recursively (but dont preserve timestamps and
permission while transferring data
-a : archive mode, allows recursive copying and preserves symlinks,
permissions, user and group ownerships, and timestamps
-z : compress file data
-i, --itemize-changes output a change-summary for all updates
-s, --protect-args : no space-splitting; only wildcard special-chars
-h : human-readable, output numbers in a human-readable format
-P same as --partial --progress | [
"r",
"Wrapper",
"for",
"rsync"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_grabdata.py#L1151-L1214 | train |
chriso/gauged | gauged/drivers/sqlite.py | SQLiteDriver.get_cache | def get_cache(self, namespace, query_hash, length, start, end):
"""Get a cached value for the specified date range and query"""
query = 'SELECT start, value FROM gauged_cache WHERE namespace = ? ' \
'AND hash = ? AND length = ? AND start BETWEEN ? AND ?'
cursor = self.cursor
cursor.execute(query, (namespace, query_hash, length, start, end))
return tuple(cursor.fetchall()) | python | def get_cache(self, namespace, query_hash, length, start, end):
"""Get a cached value for the specified date range and query"""
query = 'SELECT start, value FROM gauged_cache WHERE namespace = ? ' \
'AND hash = ? AND length = ? AND start BETWEEN ? AND ?'
cursor = self.cursor
cursor.execute(query, (namespace, query_hash, length, start, end))
return tuple(cursor.fetchall()) | [
"def",
"get_cache",
"(",
"self",
",",
"namespace",
",",
"query_hash",
",",
"length",
",",
"start",
",",
"end",
")",
":",
"query",
"=",
"'SELECT start, value FROM gauged_cache WHERE namespace = ? '",
"'AND hash = ? AND length = ? AND start BETWEEN ? AND ?'",
"cursor",
"=",
"self",
".",
"cursor",
"cursor",
".",
"execute",
"(",
"query",
",",
"(",
"namespace",
",",
"query_hash",
",",
"length",
",",
"start",
",",
"end",
")",
")",
"return",
"tuple",
"(",
"cursor",
".",
"fetchall",
"(",
")",
")"
] | Get a cached value for the specified date range and query | [
"Get",
"a",
"cached",
"value",
"for",
"the",
"specified",
"date",
"range",
"and",
"query"
] | cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976 | https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/sqlite.py#L237-L243 | train |
ColinDuquesnoy/QCrash | qcrash/_dialogs/review.py | DlgReview.review | def review(cls, content, log, parent, window_icon): # pragma: no cover
"""
Reviews the final bug report.
:param content: content of the final report, before review
:param parent: parent widget
:returns: the reviewed report content or None if the review was
canceled.
"""
dlg = DlgReview(content, log, parent, window_icon)
if dlg.exec_():
return dlg.ui.edit_main.toPlainText(), \
dlg.ui.edit_log.toPlainText()
return None, None | python | def review(cls, content, log, parent, window_icon): # pragma: no cover
"""
Reviews the final bug report.
:param content: content of the final report, before review
:param parent: parent widget
:returns: the reviewed report content or None if the review was
canceled.
"""
dlg = DlgReview(content, log, parent, window_icon)
if dlg.exec_():
return dlg.ui.edit_main.toPlainText(), \
dlg.ui.edit_log.toPlainText()
return None, None | [
"def",
"review",
"(",
"cls",
",",
"content",
",",
"log",
",",
"parent",
",",
"window_icon",
")",
":",
"# pragma: no cover",
"dlg",
"=",
"DlgReview",
"(",
"content",
",",
"log",
",",
"parent",
",",
"window_icon",
")",
"if",
"dlg",
".",
"exec_",
"(",
")",
":",
"return",
"dlg",
".",
"ui",
".",
"edit_main",
".",
"toPlainText",
"(",
")",
",",
"dlg",
".",
"ui",
".",
"edit_log",
".",
"toPlainText",
"(",
")",
"return",
"None",
",",
"None"
] | Reviews the final bug report.
:param content: content of the final report, before review
:param parent: parent widget
:returns: the reviewed report content or None if the review was
canceled. | [
"Reviews",
"the",
"final",
"bug",
"report",
"."
] | 775e1b15764e2041a8f9a08bea938e4d6ce817c7 | https://github.com/ColinDuquesnoy/QCrash/blob/775e1b15764e2041a8f9a08bea938e4d6ce817c7/qcrash/_dialogs/review.py#L44-L58 | train |
rhazdon/django-sonic-screwdriver | django_sonic_screwdriver/version/version.py | Version.get_version | def get_version():
"""
Return version from setup.py
"""
version_desc = open(os.path.join(os.path.abspath(APISettings.VERSION_FILE)))
version_file = version_desc.read()
try:
version = re.search(r"version=['\"]([^'\"]+)['\"]", version_file).group(1)
return version
except FileNotFoundError:
Shell.fail('File not found!')
raise FileNotFoundError
except ValueError:
Shell.fail('Version not found in file ' + version_file + '!')
raise ValueError
finally:
version_desc.close() | python | def get_version():
"""
Return version from setup.py
"""
version_desc = open(os.path.join(os.path.abspath(APISettings.VERSION_FILE)))
version_file = version_desc.read()
try:
version = re.search(r"version=['\"]([^'\"]+)['\"]", version_file).group(1)
return version
except FileNotFoundError:
Shell.fail('File not found!')
raise FileNotFoundError
except ValueError:
Shell.fail('Version not found in file ' + version_file + '!')
raise ValueError
finally:
version_desc.close() | [
"def",
"get_version",
"(",
")",
":",
"version_desc",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"APISettings",
".",
"VERSION_FILE",
")",
")",
")",
"version_file",
"=",
"version_desc",
".",
"read",
"(",
")",
"try",
":",
"version",
"=",
"re",
".",
"search",
"(",
"r\"version=['\\\"]([^'\\\"]+)['\\\"]\"",
",",
"version_file",
")",
".",
"group",
"(",
"1",
")",
"return",
"version",
"except",
"FileNotFoundError",
":",
"Shell",
".",
"fail",
"(",
"'File not found!'",
")",
"raise",
"FileNotFoundError",
"except",
"ValueError",
":",
"Shell",
".",
"fail",
"(",
"'Version not found in file '",
"+",
"version_file",
"+",
"'!'",
")",
"raise",
"ValueError",
"finally",
":",
"version_desc",
".",
"close",
"(",
")"
] | Return version from setup.py | [
"Return",
"version",
"from",
"setup",
".",
"py"
] | 89e885e8c1322fc5c3e0f79b03a55acdc6e63972 | https://github.com/rhazdon/django-sonic-screwdriver/blob/89e885e8c1322fc5c3e0f79b03a55acdc6e63972/django_sonic_screwdriver/version/version.py#L13-L30 | train |
rhazdon/django-sonic-screwdriver | django_sonic_screwdriver/version/version.py | Version.set_version | def set_version(old_version, new_version):
"""
Write new version into VERSION_FILE
"""
try:
if APISettings.DEBUG:
Shell.debug('* ' + old_version + ' --> ' + new_version)
return True
for line in fileinput.input(os.path.abspath(APISettings.VERSION_FILE), inplace=True):
print(line.replace(old_version, new_version), end='')
Shell.success('* ' + old_version + ' --> ' + new_version)
except FileNotFoundError:
Shell.warn('File not found!') | python | def set_version(old_version, new_version):
"""
Write new version into VERSION_FILE
"""
try:
if APISettings.DEBUG:
Shell.debug('* ' + old_version + ' --> ' + new_version)
return True
for line in fileinput.input(os.path.abspath(APISettings.VERSION_FILE), inplace=True):
print(line.replace(old_version, new_version), end='')
Shell.success('* ' + old_version + ' --> ' + new_version)
except FileNotFoundError:
Shell.warn('File not found!') | [
"def",
"set_version",
"(",
"old_version",
",",
"new_version",
")",
":",
"try",
":",
"if",
"APISettings",
".",
"DEBUG",
":",
"Shell",
".",
"debug",
"(",
"'* '",
"+",
"old_version",
"+",
"' --> '",
"+",
"new_version",
")",
"return",
"True",
"for",
"line",
"in",
"fileinput",
".",
"input",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"APISettings",
".",
"VERSION_FILE",
")",
",",
"inplace",
"=",
"True",
")",
":",
"print",
"(",
"line",
".",
"replace",
"(",
"old_version",
",",
"new_version",
")",
",",
"end",
"=",
"''",
")",
"Shell",
".",
"success",
"(",
"'* '",
"+",
"old_version",
"+",
"' --> '",
"+",
"new_version",
")",
"except",
"FileNotFoundError",
":",
"Shell",
".",
"warn",
"(",
"'File not found!'",
")"
] | Write new version into VERSION_FILE | [
"Write",
"new",
"version",
"into",
"VERSION_FILE"
] | 89e885e8c1322fc5c3e0f79b03a55acdc6e63972 | https://github.com/rhazdon/django-sonic-screwdriver/blob/89e885e8c1322fc5c3e0f79b03a55acdc6e63972/django_sonic_screwdriver/version/version.py#L33-L46 | train |
rhazdon/django-sonic-screwdriver | django_sonic_screwdriver/version/version.py | Version.set_major | def set_major(self):
"""
Increment the major number of project
"""
old_version = self.get_version()
new_version = str(int(old_version.split('.', 5)[0])+1) + '.0.0'
self.set_version(old_version, new_version) | python | def set_major(self):
"""
Increment the major number of project
"""
old_version = self.get_version()
new_version = str(int(old_version.split('.', 5)[0])+1) + '.0.0'
self.set_version(old_version, new_version) | [
"def",
"set_major",
"(",
"self",
")",
":",
"old_version",
"=",
"self",
".",
"get_version",
"(",
")",
"new_version",
"=",
"str",
"(",
"int",
"(",
"old_version",
".",
"split",
"(",
"'.'",
",",
"5",
")",
"[",
"0",
"]",
")",
"+",
"1",
")",
"+",
"'.0.0'",
"self",
".",
"set_version",
"(",
"old_version",
",",
"new_version",
")"
] | Increment the major number of project | [
"Increment",
"the",
"major",
"number",
"of",
"project"
] | 89e885e8c1322fc5c3e0f79b03a55acdc6e63972 | https://github.com/rhazdon/django-sonic-screwdriver/blob/89e885e8c1322fc5c3e0f79b03a55acdc6e63972/django_sonic_screwdriver/version/version.py#L71-L77 | train |
rhazdon/django-sonic-screwdriver | django_sonic_screwdriver/version/version.py | Version.set_minor | def set_minor(self):
"""
Increment the minor number of project
"""
old_version = self.get_version()
new_version = str(int(old_version.split('.', 5)[0])) + '.' + \
str(int(old_version.split('.', 5)[1])+1) + '.0'
self.set_version(old_version, new_version) | python | def set_minor(self):
"""
Increment the minor number of project
"""
old_version = self.get_version()
new_version = str(int(old_version.split('.', 5)[0])) + '.' + \
str(int(old_version.split('.', 5)[1])+1) + '.0'
self.set_version(old_version, new_version) | [
"def",
"set_minor",
"(",
"self",
")",
":",
"old_version",
"=",
"self",
".",
"get_version",
"(",
")",
"new_version",
"=",
"str",
"(",
"int",
"(",
"old_version",
".",
"split",
"(",
"'.'",
",",
"5",
")",
"[",
"0",
"]",
")",
")",
"+",
"'.'",
"+",
"str",
"(",
"int",
"(",
"old_version",
".",
"split",
"(",
"'.'",
",",
"5",
")",
"[",
"1",
"]",
")",
"+",
"1",
")",
"+",
"'.0'",
"self",
".",
"set_version",
"(",
"old_version",
",",
"new_version",
")"
] | Increment the minor number of project | [
"Increment",
"the",
"minor",
"number",
"of",
"project"
] | 89e885e8c1322fc5c3e0f79b03a55acdc6e63972 | https://github.com/rhazdon/django-sonic-screwdriver/blob/89e885e8c1322fc5c3e0f79b03a55acdc6e63972/django_sonic_screwdriver/version/version.py#L79-L86 | train |
rhazdon/django-sonic-screwdriver | django_sonic_screwdriver/version/version.py | Version.set_patch | def set_patch(self, pre_release_tag=''):
"""
Increment the patch number of project
:var release_tag describes the tag ('a', 'b', 'rc', ...)
:var release_tag_version describes the number behind the 'a', 'b' or 'rc'
For e.g.:
"""
current_version = self.get_version()
current_patch = self.get_patch_version(current_version)
current_pre_release_tag = self.get_current_pre_release_tag(current_patch)
current_RELEASE_SEPARATOR = self.get_current_RELEASE_SEPARATOR(current_patch)
new_patch = ''
# The new patch should get a release tag
if pre_release_tag:
# Check, if the current patch already contains a pre_release_tag.
if current_pre_release_tag:
new_patch = str(current_patch.split(current_pre_release_tag, 2)[0]) + pre_release_tag
if pre_release_tag == current_pre_release_tag:
new_patch += str(int(current_patch.split(current_pre_release_tag, 2)[1])+1)
else:
new_patch += '0'
# The current patch does not contains a pre_release_tag.
else:
new_patch = str(int(current_patch)+1) + \
APISettings.RELEASE_SEPARATOR + \
pre_release_tag + \
'0'
# The new patch should not contain any tag. So just increase it.
else:
if current_RELEASE_SEPARATOR:
new_patch = str(int(current_patch.split(current_RELEASE_SEPARATOR, 2)[0])+1)
elif current_pre_release_tag:
new_patch = str(int(current_patch.split(current_pre_release_tag, 2)[0])+1)
else:
new_patch = str(int(current_patch)+1)
new_version = str(int(current_version.split('.', 5)[0])) + '.' + \
str(int(current_version.split('.', 5)[1])) + '.' + \
str(new_patch)
self.set_version(current_version, new_version) | python | def set_patch(self, pre_release_tag=''):
"""
Increment the patch number of project
:var release_tag describes the tag ('a', 'b', 'rc', ...)
:var release_tag_version describes the number behind the 'a', 'b' or 'rc'
For e.g.:
"""
current_version = self.get_version()
current_patch = self.get_patch_version(current_version)
current_pre_release_tag = self.get_current_pre_release_tag(current_patch)
current_RELEASE_SEPARATOR = self.get_current_RELEASE_SEPARATOR(current_patch)
new_patch = ''
# The new patch should get a release tag
if pre_release_tag:
# Check, if the current patch already contains a pre_release_tag.
if current_pre_release_tag:
new_patch = str(current_patch.split(current_pre_release_tag, 2)[0]) + pre_release_tag
if pre_release_tag == current_pre_release_tag:
new_patch += str(int(current_patch.split(current_pre_release_tag, 2)[1])+1)
else:
new_patch += '0'
# The current patch does not contains a pre_release_tag.
else:
new_patch = str(int(current_patch)+1) + \
APISettings.RELEASE_SEPARATOR + \
pre_release_tag + \
'0'
# The new patch should not contain any tag. So just increase it.
else:
if current_RELEASE_SEPARATOR:
new_patch = str(int(current_patch.split(current_RELEASE_SEPARATOR, 2)[0])+1)
elif current_pre_release_tag:
new_patch = str(int(current_patch.split(current_pre_release_tag, 2)[0])+1)
else:
new_patch = str(int(current_patch)+1)
new_version = str(int(current_version.split('.', 5)[0])) + '.' + \
str(int(current_version.split('.', 5)[1])) + '.' + \
str(new_patch)
self.set_version(current_version, new_version) | [
"def",
"set_patch",
"(",
"self",
",",
"pre_release_tag",
"=",
"''",
")",
":",
"current_version",
"=",
"self",
".",
"get_version",
"(",
")",
"current_patch",
"=",
"self",
".",
"get_patch_version",
"(",
"current_version",
")",
"current_pre_release_tag",
"=",
"self",
".",
"get_current_pre_release_tag",
"(",
"current_patch",
")",
"current_RELEASE_SEPARATOR",
"=",
"self",
".",
"get_current_RELEASE_SEPARATOR",
"(",
"current_patch",
")",
"new_patch",
"=",
"''",
"# The new patch should get a release tag",
"if",
"pre_release_tag",
":",
"# Check, if the current patch already contains a pre_release_tag.",
"if",
"current_pre_release_tag",
":",
"new_patch",
"=",
"str",
"(",
"current_patch",
".",
"split",
"(",
"current_pre_release_tag",
",",
"2",
")",
"[",
"0",
"]",
")",
"+",
"pre_release_tag",
"if",
"pre_release_tag",
"==",
"current_pre_release_tag",
":",
"new_patch",
"+=",
"str",
"(",
"int",
"(",
"current_patch",
".",
"split",
"(",
"current_pre_release_tag",
",",
"2",
")",
"[",
"1",
"]",
")",
"+",
"1",
")",
"else",
":",
"new_patch",
"+=",
"'0'",
"# The current patch does not contains a pre_release_tag.",
"else",
":",
"new_patch",
"=",
"str",
"(",
"int",
"(",
"current_patch",
")",
"+",
"1",
")",
"+",
"APISettings",
".",
"RELEASE_SEPARATOR",
"+",
"pre_release_tag",
"+",
"'0'",
"# The new patch should not contain any tag. So just increase it.",
"else",
":",
"if",
"current_RELEASE_SEPARATOR",
":",
"new_patch",
"=",
"str",
"(",
"int",
"(",
"current_patch",
".",
"split",
"(",
"current_RELEASE_SEPARATOR",
",",
"2",
")",
"[",
"0",
"]",
")",
"+",
"1",
")",
"elif",
"current_pre_release_tag",
":",
"new_patch",
"=",
"str",
"(",
"int",
"(",
"current_patch",
".",
"split",
"(",
"current_pre_release_tag",
",",
"2",
")",
"[",
"0",
"]",
")",
"+",
"1",
")",
"else",
":",
"new_patch",
"=",
"str",
"(",
"int",
"(",
"current_patch",
")",
"+",
"1",
")",
"new_version",
"=",
"str",
"(",
"int",
"(",
"current_version",
".",
"split",
"(",
"'.'",
",",
"5",
")",
"[",
"0",
"]",
")",
")",
"+",
"'.'",
"+",
"str",
"(",
"int",
"(",
"current_version",
".",
"split",
"(",
"'.'",
",",
"5",
")",
"[",
"1",
"]",
")",
")",
"+",
"'.'",
"+",
"str",
"(",
"new_patch",
")",
"self",
".",
"set_version",
"(",
"current_version",
",",
"new_version",
")"
] | Increment the patch number of project
:var release_tag describes the tag ('a', 'b', 'rc', ...)
:var release_tag_version describes the number behind the 'a', 'b' or 'rc'
For e.g.: | [
"Increment",
"the",
"patch",
"number",
"of",
"project"
] | 89e885e8c1322fc5c3e0f79b03a55acdc6e63972 | https://github.com/rhazdon/django-sonic-screwdriver/blob/89e885e8c1322fc5c3e0f79b03a55acdc6e63972/django_sonic_screwdriver/version/version.py#L88-L134 | train |
dsoprea/NsqSpinner | nsq/connection.py | _Buffer.flush | def flush(self):
"""Return all buffered data, and clear the stack."""
(slice_, self.__buffer) = (self.__buffer, '')
self.__size = 0
return slice_ | python | def flush(self):
"""Return all buffered data, and clear the stack."""
(slice_, self.__buffer) = (self.__buffer, '')
self.__size = 0
return slice_ | [
"def",
"flush",
"(",
"self",
")",
":",
"(",
"slice_",
",",
"self",
".",
"__buffer",
")",
"=",
"(",
"self",
".",
"__buffer",
",",
"''",
")",
"self",
".",
"__size",
"=",
"0",
"return",
"slice_"
] | Return all buffered data, and clear the stack. | [
"Return",
"all",
"buffered",
"data",
"and",
"clear",
"the",
"stack",
"."
] | 972237b8ddce737983bfed001fde52e5236be695 | https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/connection.py#L78-L84 | train |
dsoprea/NsqSpinner | nsq/connection.py | _ManagedConnection.__send_hello | def __send_hello(self):
"""Initiate the handshake."""
_logger.debug("Saying hello: [%s]", self)
self.__c.send(nsq.config.protocol.MAGIC_IDENTIFIER) | python | def __send_hello(self):
"""Initiate the handshake."""
_logger.debug("Saying hello: [%s]", self)
self.__c.send(nsq.config.protocol.MAGIC_IDENTIFIER) | [
"def",
"__send_hello",
"(",
"self",
")",
":",
"_logger",
".",
"debug",
"(",
"\"Saying hello: [%s]\"",
",",
"self",
")",
"self",
".",
"__c",
".",
"send",
"(",
"nsq",
".",
"config",
".",
"protocol",
".",
"MAGIC_IDENTIFIER",
")"
] | Initiate the handshake. | [
"Initiate",
"the",
"handshake",
"."
] | 972237b8ddce737983bfed001fde52e5236be695 | https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/connection.py#L137-L142 | train |
dsoprea/NsqSpinner | nsq/connection.py | _ManagedConnection.__sender | def __sender(self):
"""Send-loop."""
# If we're ignoring the quit, the connections will have to be closed
# by the server.
while (self.__ignore_quit is True or \
self.__nice_quit_ev.is_set() is False) and \
self.__force_quit_ev.is_set() is False:
# TODO(dustin): The quit-signals aren't being properly set after a producer
# stop.
# TODO(dustin): Consider breaking the loop if we haven't yet retried to
# reconnect a couple of times. A connection will automatically be
# reattempted.
try:
(command, parts) = self.__outgoing_q.get(block=False)
except gevent.queue.Empty:
gevent.sleep(nsq.config.client.WRITE_THROTTLE_S)
else:
_logger.debug("Dequeued outgoing command ((%d) remaining): "
"[%s]", self.__outgoing_q.qsize(),
self.__distill_command_name(command))
self.__send_command_primitive(command, parts)
self.__send_thread_ev.set() | python | def __sender(self):
"""Send-loop."""
# If we're ignoring the quit, the connections will have to be closed
# by the server.
while (self.__ignore_quit is True or \
self.__nice_quit_ev.is_set() is False) and \
self.__force_quit_ev.is_set() is False:
# TODO(dustin): The quit-signals aren't being properly set after a producer
# stop.
# TODO(dustin): Consider breaking the loop if we haven't yet retried to
# reconnect a couple of times. A connection will automatically be
# reattempted.
try:
(command, parts) = self.__outgoing_q.get(block=False)
except gevent.queue.Empty:
gevent.sleep(nsq.config.client.WRITE_THROTTLE_S)
else:
_logger.debug("Dequeued outgoing command ((%d) remaining): "
"[%s]", self.__outgoing_q.qsize(),
self.__distill_command_name(command))
self.__send_command_primitive(command, parts)
self.__send_thread_ev.set() | [
"def",
"__sender",
"(",
"self",
")",
":",
"# If we're ignoring the quit, the connections will have to be closed ",
"# by the server.",
"while",
"(",
"self",
".",
"__ignore_quit",
"is",
"True",
"or",
"self",
".",
"__nice_quit_ev",
".",
"is_set",
"(",
")",
"is",
"False",
")",
"and",
"self",
".",
"__force_quit_ev",
".",
"is_set",
"(",
")",
"is",
"False",
":",
"# TODO(dustin): The quit-signals aren't being properly set after a producer ",
"# stop.",
"# TODO(dustin): Consider breaking the loop if we haven't yet retried to ",
"# reconnect a couple of times. A connection will automatically be ",
"# reattempted.",
"try",
":",
"(",
"command",
",",
"parts",
")",
"=",
"self",
".",
"__outgoing_q",
".",
"get",
"(",
"block",
"=",
"False",
")",
"except",
"gevent",
".",
"queue",
".",
"Empty",
":",
"gevent",
".",
"sleep",
"(",
"nsq",
".",
"config",
".",
"client",
".",
"WRITE_THROTTLE_S",
")",
"else",
":",
"_logger",
".",
"debug",
"(",
"\"Dequeued outgoing command ((%d) remaining): \"",
"\"[%s]\"",
",",
"self",
".",
"__outgoing_q",
".",
"qsize",
"(",
")",
",",
"self",
".",
"__distill_command_name",
"(",
"command",
")",
")",
"self",
".",
"__send_command_primitive",
"(",
"command",
",",
"parts",
")",
"self",
".",
"__send_thread_ev",
".",
"set",
"(",
")"
] | Send-loop. | [
"Send",
"-",
"loop",
"."
] | 972237b8ddce737983bfed001fde52e5236be695 | https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/connection.py#L479-L506 | train |
dsoprea/NsqSpinner | nsq/connection.py | _ManagedConnection.__receiver | def __receiver(self):
"""Receive-loop."""
# If we're ignoring the quit, the connections will have to be closed
# by the server.
while (self.__ignore_quit is True or \
self.__nice_quit_ev.is_set() is False) and \
self.__force_quit_ev.is_set() is False:
# TODO(dustin): The quit-signals aren't being properly set after a producer
# stop.
# TODO(dustin): Consider breaking the loop if we haven't yet retried to
# reconnect a couple of times. A connection will automatically be
# reattempted.
try:
self.__read_frame()
except errno.EAGAIN:
gevent.sleep(nsq.config.client.READ_THROTTLE_S)
self.__receive_thread_ev.set() | python | def __receiver(self):
"""Receive-loop."""
# If we're ignoring the quit, the connections will have to be closed
# by the server.
while (self.__ignore_quit is True or \
self.__nice_quit_ev.is_set() is False) and \
self.__force_quit_ev.is_set() is False:
# TODO(dustin): The quit-signals aren't being properly set after a producer
# stop.
# TODO(dustin): Consider breaking the loop if we haven't yet retried to
# reconnect a couple of times. A connection will automatically be
# reattempted.
try:
self.__read_frame()
except errno.EAGAIN:
gevent.sleep(nsq.config.client.READ_THROTTLE_S)
self.__receive_thread_ev.set() | [
"def",
"__receiver",
"(",
"self",
")",
":",
"# If we're ignoring the quit, the connections will have to be closed ",
"# by the server.",
"while",
"(",
"self",
".",
"__ignore_quit",
"is",
"True",
"or",
"self",
".",
"__nice_quit_ev",
".",
"is_set",
"(",
")",
"is",
"False",
")",
"and",
"self",
".",
"__force_quit_ev",
".",
"is_set",
"(",
")",
"is",
"False",
":",
"# TODO(dustin): The quit-signals aren't being properly set after a producer ",
"# stop.",
"# TODO(dustin): Consider breaking the loop if we haven't yet retried to ",
"# reconnect a couple of times. A connection will automatically be ",
"# reattempted.",
"try",
":",
"self",
".",
"__read_frame",
"(",
")",
"except",
"errno",
".",
"EAGAIN",
":",
"gevent",
".",
"sleep",
"(",
"nsq",
".",
"config",
".",
"client",
".",
"READ_THROTTLE_S",
")",
"self",
".",
"__receive_thread_ev",
".",
"set",
"(",
")"
] | Receive-loop. | [
"Receive",
"-",
"loop",
"."
] | 972237b8ddce737983bfed001fde52e5236be695 | https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/connection.py#L508-L529 | train |
dsoprea/NsqSpinner | nsq/connection.py | Connection.run | def run(self):
"""Connect the server, and maintain the connection. This shall not
return until a connection has been determined to absolutely not be
available.
"""
while self.__nice_quit_ev.is_set() is False:
self.__connect()
_logger.info("Connection re-connect loop has terminated: %s", self.__mc) | python | def run(self):
"""Connect the server, and maintain the connection. This shall not
return until a connection has been determined to absolutely not be
available.
"""
while self.__nice_quit_ev.is_set() is False:
self.__connect()
_logger.info("Connection re-connect loop has terminated: %s", self.__mc) | [
"def",
"run",
"(",
"self",
")",
":",
"while",
"self",
".",
"__nice_quit_ev",
".",
"is_set",
"(",
")",
"is",
"False",
":",
"self",
".",
"__connect",
"(",
")",
"_logger",
".",
"info",
"(",
"\"Connection re-connect loop has terminated: %s\"",
",",
"self",
".",
"__mc",
")"
] | Connect the server, and maintain the connection. This shall not
return until a connection has been determined to absolutely not be
available. | [
"Connect",
"the",
"server",
"and",
"maintain",
"the",
"connection",
".",
"This",
"shall",
"not",
"return",
"until",
"a",
"connection",
"has",
"been",
"determined",
"to",
"absolutely",
"not",
"be",
"available",
"."
] | 972237b8ddce737983bfed001fde52e5236be695 | https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/connection.py#L598-L607 | train |
YuriyGuts/pygoose | pygoose/kg/io.py | save | def save(obj, filename, protocol=4):
"""
Serialize an object to disk using pickle protocol.
Args:
obj: The object to serialize.
filename: Path to the output file.
protocol: Version of the pickle protocol.
"""
with open(filename, 'wb') as f:
pickle.dump(obj, f, protocol=protocol) | python | def save(obj, filename, protocol=4):
"""
Serialize an object to disk using pickle protocol.
Args:
obj: The object to serialize.
filename: Path to the output file.
protocol: Version of the pickle protocol.
"""
with open(filename, 'wb') as f:
pickle.dump(obj, f, protocol=protocol) | [
"def",
"save",
"(",
"obj",
",",
"filename",
",",
"protocol",
"=",
"4",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"obj",
",",
"f",
",",
"protocol",
"=",
"protocol",
")"
] | Serialize an object to disk using pickle protocol.
Args:
obj: The object to serialize.
filename: Path to the output file.
protocol: Version of the pickle protocol. | [
"Serialize",
"an",
"object",
"to",
"disk",
"using",
"pickle",
"protocol",
"."
] | 4d9b8827c6d6c4b79949d1cd653393498c0bb3c2 | https://github.com/YuriyGuts/pygoose/blob/4d9b8827c6d6c4b79949d1cd653393498c0bb3c2/pygoose/kg/io.py#L20-L31 | train |
YuriyGuts/pygoose | pygoose/kg/io.py | load_json | def load_json(filename, **kwargs):
"""
Load a JSON object from the specified file.
Args:
filename: Path to the input JSON file.
**kwargs: Additional arguments to `json.load`.
Returns:
The object deserialized from JSON.
"""
with open(filename, 'r', encoding='utf-8') as f:
return json.load(f, **kwargs) | python | def load_json(filename, **kwargs):
"""
Load a JSON object from the specified file.
Args:
filename: Path to the input JSON file.
**kwargs: Additional arguments to `json.load`.
Returns:
The object deserialized from JSON.
"""
with open(filename, 'r', encoding='utf-8') as f:
return json.load(f, **kwargs) | [
"def",
"load_json",
"(",
"filename",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"return",
"json",
".",
"load",
"(",
"f",
",",
"*",
"*",
"kwargs",
")"
] | Load a JSON object from the specified file.
Args:
filename: Path to the input JSON file.
**kwargs: Additional arguments to `json.load`.
Returns:
The object deserialized from JSON. | [
"Load",
"a",
"JSON",
"object",
"from",
"the",
"specified",
"file",
"."
] | 4d9b8827c6d6c4b79949d1cd653393498c0bb3c2 | https://github.com/YuriyGuts/pygoose/blob/4d9b8827c6d6c4b79949d1cd653393498c0bb3c2/pygoose/kg/io.py#L34-L47 | train |
YuriyGuts/pygoose | pygoose/kg/io.py | save_json | def save_json(obj, filename, **kwargs):
"""
Save an object as a JSON file.
Args:
obj: The object to save. Must be JSON-serializable.
filename: Path to the output file.
**kwargs: Additional arguments to `json.dump`.
"""
with open(filename, 'w', encoding='utf-8') as f:
json.dump(obj, f, **kwargs) | python | def save_json(obj, filename, **kwargs):
"""
Save an object as a JSON file.
Args:
obj: The object to save. Must be JSON-serializable.
filename: Path to the output file.
**kwargs: Additional arguments to `json.dump`.
"""
with open(filename, 'w', encoding='utf-8') as f:
json.dump(obj, f, **kwargs) | [
"def",
"save_json",
"(",
"obj",
",",
"filename",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"obj",
",",
"f",
",",
"*",
"*",
"kwargs",
")"
] | Save an object as a JSON file.
Args:
obj: The object to save. Must be JSON-serializable.
filename: Path to the output file.
**kwargs: Additional arguments to `json.dump`. | [
"Save",
"an",
"object",
"as",
"a",
"JSON",
"file",
"."
] | 4d9b8827c6d6c4b79949d1cd653393498c0bb3c2 | https://github.com/YuriyGuts/pygoose/blob/4d9b8827c6d6c4b79949d1cd653393498c0bb3c2/pygoose/kg/io.py#L50-L61 | train |
YuriyGuts/pygoose | pygoose/kg/io.py | load_lines | def load_lines(filename):
"""
Load a text file as an array of lines.
Args:
filename: Path to the input file.
Returns:
An array of strings, each representing an individual line.
"""
with open(filename, 'r', encoding='utf-8') as f:
return [line.rstrip('\n') for line in f.readlines()] | python | def load_lines(filename):
"""
Load a text file as an array of lines.
Args:
filename: Path to the input file.
Returns:
An array of strings, each representing an individual line.
"""
with open(filename, 'r', encoding='utf-8') as f:
return [line.rstrip('\n') for line in f.readlines()] | [
"def",
"load_lines",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"return",
"[",
"line",
".",
"rstrip",
"(",
"'\\n'",
")",
"for",
"line",
"in",
"f",
".",
"readlines",
"(",
")",
"]"
] | Load a text file as an array of lines.
Args:
filename: Path to the input file.
Returns:
An array of strings, each representing an individual line. | [
"Load",
"a",
"text",
"file",
"as",
"an",
"array",
"of",
"lines",
"."
] | 4d9b8827c6d6c4b79949d1cd653393498c0bb3c2 | https://github.com/YuriyGuts/pygoose/blob/4d9b8827c6d6c4b79949d1cd653393498c0bb3c2/pygoose/kg/io.py#L64-L76 | train |
YuriyGuts/pygoose | pygoose/kg/io.py | save_lines | def save_lines(lines, filename):
"""
Save an array of lines to a file.
Args:
lines: An array of strings that will be saved as individual lines.
filename: Path to the output file.
"""
with open(filename, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines)) | python | def save_lines(lines, filename):
"""
Save an array of lines to a file.
Args:
lines: An array of strings that will be saved as individual lines.
filename: Path to the output file.
"""
with open(filename, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines)) | [
"def",
"save_lines",
"(",
"lines",
",",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"'\\n'",
".",
"join",
"(",
"lines",
")",
")"
] | Save an array of lines to a file.
Args:
lines: An array of strings that will be saved as individual lines.
filename: Path to the output file. | [
"Save",
"an",
"array",
"of",
"lines",
"to",
"a",
"file",
"."
] | 4d9b8827c6d6c4b79949d1cd653393498c0bb3c2 | https://github.com/YuriyGuts/pygoose/blob/4d9b8827c6d6c4b79949d1cd653393498c0bb3c2/pygoose/kg/io.py#L79-L89 | train |
LEMS/pylems | lems/model/component.py | Component.add | def add(self, child):
"""
Adds a typed child object to the component.
@param child: Child object to be added.
"""
if isinstance(child, Component):
self.add_child(child)
else:
raise ModelError('Unsupported child element') | python | def add(self, child):
"""
Adds a typed child object to the component.
@param child: Child object to be added.
"""
if isinstance(child, Component):
self.add_child(child)
else:
raise ModelError('Unsupported child element') | [
"def",
"add",
"(",
"self",
",",
"child",
")",
":",
"if",
"isinstance",
"(",
"child",
",",
"Component",
")",
":",
"self",
".",
"add_child",
"(",
"child",
")",
"else",
":",
"raise",
"ModelError",
"(",
"'Unsupported child element'",
")"
] | Adds a typed child object to the component.
@param child: Child object to be added. | [
"Adds",
"a",
"typed",
"child",
"object",
"to",
"the",
"component",
"."
] | 4eeb719d2f23650fe16c38626663b69b5c83818b | https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/model/component.py#L1122-L1132 | train |
glormph/msstitch | src/app/lookups/sqlite/searchspace.py | SearchSpaceDB.write_peps | def write_peps(self, peps, reverse_seqs):
"""Writes peps to db. We can reverse to be able to look up
peptides that have some amino acids missing at the N-terminal.
This way we can still use the index.
"""
if reverse_seqs:
peps = [(x[0][::-1],) for x in peps]
cursor = self.get_cursor()
cursor.executemany(
'INSERT INTO known_searchspace(seqs) VALUES (?)', peps)
self.conn.commit() | python | def write_peps(self, peps, reverse_seqs):
"""Writes peps to db. We can reverse to be able to look up
peptides that have some amino acids missing at the N-terminal.
This way we can still use the index.
"""
if reverse_seqs:
peps = [(x[0][::-1],) for x in peps]
cursor = self.get_cursor()
cursor.executemany(
'INSERT INTO known_searchspace(seqs) VALUES (?)', peps)
self.conn.commit() | [
"def",
"write_peps",
"(",
"self",
",",
"peps",
",",
"reverse_seqs",
")",
":",
"if",
"reverse_seqs",
":",
"peps",
"=",
"[",
"(",
"x",
"[",
"0",
"]",
"[",
":",
":",
"-",
"1",
"]",
",",
")",
"for",
"x",
"in",
"peps",
"]",
"cursor",
"=",
"self",
".",
"get_cursor",
"(",
")",
"cursor",
".",
"executemany",
"(",
"'INSERT INTO known_searchspace(seqs) VALUES (?)'",
",",
"peps",
")",
"self",
".",
"conn",
".",
"commit",
"(",
")"
] | Writes peps to db. We can reverse to be able to look up
peptides that have some amino acids missing at the N-terminal.
This way we can still use the index. | [
"Writes",
"peps",
"to",
"db",
".",
"We",
"can",
"reverse",
"to",
"be",
"able",
"to",
"look",
"up",
"peptides",
"that",
"have",
"some",
"amino",
"acids",
"missing",
"at",
"the",
"N",
"-",
"terminal",
".",
"This",
"way",
"we",
"can",
"still",
"use",
"the",
"index",
"."
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/lookups/sqlite/searchspace.py#L9-L19 | train |
quikmile/trellio | trellio/bus.py | HTTPBus.send_http_request | def send_http_request(self, app: str, service: str, version: str, method: str, entity: str, params: dict):
"""
A convenience method that allows you to send a well formatted http request to another service
"""
host, port, node_id, service_type = self._registry_client.resolve(service, version, entity, HTTP)
url = 'http://{}:{}{}'.format(host, port, params.pop('path'))
http_keys = ['data', 'headers', 'cookies', 'auth', 'allow_redirects', 'compress', 'chunked']
kwargs = {k: params[k] for k in http_keys if k in params}
query_params = params.pop('params', {})
if app is not None:
query_params['app'] = app
query_params['version'] = version
query_params['service'] = service
response = yield from aiohttp.request(method, url, params=query_params, **kwargs)
return response | python | def send_http_request(self, app: str, service: str, version: str, method: str, entity: str, params: dict):
"""
A convenience method that allows you to send a well formatted http request to another service
"""
host, port, node_id, service_type = self._registry_client.resolve(service, version, entity, HTTP)
url = 'http://{}:{}{}'.format(host, port, params.pop('path'))
http_keys = ['data', 'headers', 'cookies', 'auth', 'allow_redirects', 'compress', 'chunked']
kwargs = {k: params[k] for k in http_keys if k in params}
query_params = params.pop('params', {})
if app is not None:
query_params['app'] = app
query_params['version'] = version
query_params['service'] = service
response = yield from aiohttp.request(method, url, params=query_params, **kwargs)
return response | [
"def",
"send_http_request",
"(",
"self",
",",
"app",
":",
"str",
",",
"service",
":",
"str",
",",
"version",
":",
"str",
",",
"method",
":",
"str",
",",
"entity",
":",
"str",
",",
"params",
":",
"dict",
")",
":",
"host",
",",
"port",
",",
"node_id",
",",
"service_type",
"=",
"self",
".",
"_registry_client",
".",
"resolve",
"(",
"service",
",",
"version",
",",
"entity",
",",
"HTTP",
")",
"url",
"=",
"'http://{}:{}{}'",
".",
"format",
"(",
"host",
",",
"port",
",",
"params",
".",
"pop",
"(",
"'path'",
")",
")",
"http_keys",
"=",
"[",
"'data'",
",",
"'headers'",
",",
"'cookies'",
",",
"'auth'",
",",
"'allow_redirects'",
",",
"'compress'",
",",
"'chunked'",
"]",
"kwargs",
"=",
"{",
"k",
":",
"params",
"[",
"k",
"]",
"for",
"k",
"in",
"http_keys",
"if",
"k",
"in",
"params",
"}",
"query_params",
"=",
"params",
".",
"pop",
"(",
"'params'",
",",
"{",
"}",
")",
"if",
"app",
"is",
"not",
"None",
":",
"query_params",
"[",
"'app'",
"]",
"=",
"app",
"query_params",
"[",
"'version'",
"]",
"=",
"version",
"query_params",
"[",
"'service'",
"]",
"=",
"service",
"response",
"=",
"yield",
"from",
"aiohttp",
".",
"request",
"(",
"method",
",",
"url",
",",
"params",
"=",
"query_params",
",",
"*",
"*",
"kwargs",
")",
"return",
"response"
] | A convenience method that allows you to send a well formatted http request to another service | [
"A",
"convenience",
"method",
"that",
"allows",
"you",
"to",
"send",
"a",
"well",
"formatted",
"http",
"request",
"to",
"another",
"service"
] | e8b050077562acf32805fcbb9c0c162248a23c62 | https://github.com/quikmile/trellio/blob/e8b050077562acf32805fcbb9c0c162248a23c62/trellio/bus.py#L31-L51 | train |
ColinDuquesnoy/QCrash | qcrash/api.py | install_except_hook | def install_except_hook(except_hook=_hooks.except_hook):
"""
Install an except hook that will show the crash report dialog when an
unhandled exception has occured.
:param except_hook: except_hook function that will be called on the main
thread whenever an unhandled exception occured. The function takes
two parameters: the exception object and the traceback string.
"""
if not _backends:
raise ValueError('no backends found, you must at least install one '
'backend before calling this function')
global _except_hook
_except_hook = _hooks.QtExceptHook(except_hook) | python | def install_except_hook(except_hook=_hooks.except_hook):
"""
Install an except hook that will show the crash report dialog when an
unhandled exception has occured.
:param except_hook: except_hook function that will be called on the main
thread whenever an unhandled exception occured. The function takes
two parameters: the exception object and the traceback string.
"""
if not _backends:
raise ValueError('no backends found, you must at least install one '
'backend before calling this function')
global _except_hook
_except_hook = _hooks.QtExceptHook(except_hook) | [
"def",
"install_except_hook",
"(",
"except_hook",
"=",
"_hooks",
".",
"except_hook",
")",
":",
"if",
"not",
"_backends",
":",
"raise",
"ValueError",
"(",
"'no backends found, you must at least install one '",
"'backend before calling this function'",
")",
"global",
"_except_hook",
"_except_hook",
"=",
"_hooks",
".",
"QtExceptHook",
"(",
"except_hook",
")"
] | Install an except hook that will show the crash report dialog when an
unhandled exception has occured.
:param except_hook: except_hook function that will be called on the main
thread whenever an unhandled exception occured. The function takes
two parameters: the exception object and the traceback string. | [
"Install",
"an",
"except",
"hook",
"that",
"will",
"show",
"the",
"crash",
"report",
"dialog",
"when",
"an",
"unhandled",
"exception",
"has",
"occured",
"."
] | 775e1b15764e2041a8f9a08bea938e4d6ce817c7 | https://github.com/ColinDuquesnoy/QCrash/blob/775e1b15764e2041a8f9a08bea938e4d6ce817c7/qcrash/api.py#L36-L49 | train |
ColinDuquesnoy/QCrash | qcrash/api.py | show_report_dialog | def show_report_dialog(window_title='Report an issue...',
window_icon=None, traceback=None, issue_title='',
issue_description='', parent=None,
modal=None, include_log=True, include_sys_info=True):
"""
Show the issue report dialog manually.
:param window_title: Title of dialog window
:param window_icon: the icon to use for the dialog window
:param traceback: optional traceback string to include in the report.
:param issue_title: optional issue title
:param issue_description: optional issue description
:param parent: parent widget
:param include_log: Initial state of the include log check box
:param include_sys_info: Initial state of the include system info check box
"""
if not _backends:
raise ValueError('no backends found, you must at least install one '
'backend before calling this function')
from ._dialogs.report import DlgReport
dlg = DlgReport(_backends, window_title=window_title,
window_icon=window_icon, traceback=traceback,
issue_title=issue_title,
issue_description=issue_description, parent=parent,
include_log=include_log, include_sys_info=include_sys_info)
if modal:
dlg.show()
return dlg
else:
dlg.exec_() | python | def show_report_dialog(window_title='Report an issue...',
window_icon=None, traceback=None, issue_title='',
issue_description='', parent=None,
modal=None, include_log=True, include_sys_info=True):
"""
Show the issue report dialog manually.
:param window_title: Title of dialog window
:param window_icon: the icon to use for the dialog window
:param traceback: optional traceback string to include in the report.
:param issue_title: optional issue title
:param issue_description: optional issue description
:param parent: parent widget
:param include_log: Initial state of the include log check box
:param include_sys_info: Initial state of the include system info check box
"""
if not _backends:
raise ValueError('no backends found, you must at least install one '
'backend before calling this function')
from ._dialogs.report import DlgReport
dlg = DlgReport(_backends, window_title=window_title,
window_icon=window_icon, traceback=traceback,
issue_title=issue_title,
issue_description=issue_description, parent=parent,
include_log=include_log, include_sys_info=include_sys_info)
if modal:
dlg.show()
return dlg
else:
dlg.exec_() | [
"def",
"show_report_dialog",
"(",
"window_title",
"=",
"'Report an issue...'",
",",
"window_icon",
"=",
"None",
",",
"traceback",
"=",
"None",
",",
"issue_title",
"=",
"''",
",",
"issue_description",
"=",
"''",
",",
"parent",
"=",
"None",
",",
"modal",
"=",
"None",
",",
"include_log",
"=",
"True",
",",
"include_sys_info",
"=",
"True",
")",
":",
"if",
"not",
"_backends",
":",
"raise",
"ValueError",
"(",
"'no backends found, you must at least install one '",
"'backend before calling this function'",
")",
"from",
".",
"_dialogs",
".",
"report",
"import",
"DlgReport",
"dlg",
"=",
"DlgReport",
"(",
"_backends",
",",
"window_title",
"=",
"window_title",
",",
"window_icon",
"=",
"window_icon",
",",
"traceback",
"=",
"traceback",
",",
"issue_title",
"=",
"issue_title",
",",
"issue_description",
"=",
"issue_description",
",",
"parent",
"=",
"parent",
",",
"include_log",
"=",
"include_log",
",",
"include_sys_info",
"=",
"include_sys_info",
")",
"if",
"modal",
":",
"dlg",
".",
"show",
"(",
")",
"return",
"dlg",
"else",
":",
"dlg",
".",
"exec_",
"(",
")"
] | Show the issue report dialog manually.
:param window_title: Title of dialog window
:param window_icon: the icon to use for the dialog window
:param traceback: optional traceback string to include in the report.
:param issue_title: optional issue title
:param issue_description: optional issue description
:param parent: parent widget
:param include_log: Initial state of the include log check box
:param include_sys_info: Initial state of the include system info check box | [
"Show",
"the",
"issue",
"report",
"dialog",
"manually",
"."
] | 775e1b15764e2041a8f9a08bea938e4d6ce817c7 | https://github.com/ColinDuquesnoy/QCrash/blob/775e1b15764e2041a8f9a08bea938e4d6ce817c7/qcrash/api.py#L64-L93 | train |
moluwole/Bast | bast/route.py | Route.middleware | def middleware(self, args):
"""
Appends a Middleware to the route which is to be executed before the route runs
"""
if self.url[(len(self.url) - 1)] == (self.url_, self.controller, dict(method=self.method, request_type=self.request_type, middleware=None)):
self.url.pop()
self.url.append((self.url_, self.controller, dict(method=self.method, request_type=self.request_type, middleware=args)))
return self | python | def middleware(self, args):
"""
Appends a Middleware to the route which is to be executed before the route runs
"""
if self.url[(len(self.url) - 1)] == (self.url_, self.controller, dict(method=self.method, request_type=self.request_type, middleware=None)):
self.url.pop()
self.url.append((self.url_, self.controller, dict(method=self.method, request_type=self.request_type, middleware=args)))
return self | [
"def",
"middleware",
"(",
"self",
",",
"args",
")",
":",
"if",
"self",
".",
"url",
"[",
"(",
"len",
"(",
"self",
".",
"url",
")",
"-",
"1",
")",
"]",
"==",
"(",
"self",
".",
"url_",
",",
"self",
".",
"controller",
",",
"dict",
"(",
"method",
"=",
"self",
".",
"method",
",",
"request_type",
"=",
"self",
".",
"request_type",
",",
"middleware",
"=",
"None",
")",
")",
":",
"self",
".",
"url",
".",
"pop",
"(",
")",
"self",
".",
"url",
".",
"append",
"(",
"(",
"self",
".",
"url_",
",",
"self",
".",
"controller",
",",
"dict",
"(",
"method",
"=",
"self",
".",
"method",
",",
"request_type",
"=",
"self",
".",
"request_type",
",",
"middleware",
"=",
"args",
")",
")",
")",
"return",
"self"
] | Appends a Middleware to the route which is to be executed before the route runs | [
"Appends",
"a",
"Middleware",
"to",
"the",
"route",
"which",
"is",
"to",
"be",
"executed",
"before",
"the",
"route",
"runs"
] | eecf55ae72e6f24af7c101549be0422cd2c1c95a | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/route.py#L27-L34 | train |
moluwole/Bast | bast/route.py | Route.get | def get(self, url, controller):
"""
Gets the Controller and adds the route, controller and method to the url list for GET request
"""
self.request_type = 'GET'
controller_class, controller_method = self.__return_controller__(controller)
self.controller = controller_class
self.method = controller_method
self.url_ = url
self.url.append((url, controller_class, dict(method=controller_method, request_type=self.request_type, middleware=None)))
return self | python | def get(self, url, controller):
"""
Gets the Controller and adds the route, controller and method to the url list for GET request
"""
self.request_type = 'GET'
controller_class, controller_method = self.__return_controller__(controller)
self.controller = controller_class
self.method = controller_method
self.url_ = url
self.url.append((url, controller_class, dict(method=controller_method, request_type=self.request_type, middleware=None)))
return self | [
"def",
"get",
"(",
"self",
",",
"url",
",",
"controller",
")",
":",
"self",
".",
"request_type",
"=",
"'GET'",
"controller_class",
",",
"controller_method",
"=",
"self",
".",
"__return_controller__",
"(",
"controller",
")",
"self",
".",
"controller",
"=",
"controller_class",
"self",
".",
"method",
"=",
"controller_method",
"self",
".",
"url_",
"=",
"url",
"self",
".",
"url",
".",
"append",
"(",
"(",
"url",
",",
"controller_class",
",",
"dict",
"(",
"method",
"=",
"controller_method",
",",
"request_type",
"=",
"self",
".",
"request_type",
",",
"middleware",
"=",
"None",
")",
")",
")",
"return",
"self"
] | Gets the Controller and adds the route, controller and method to the url list for GET request | [
"Gets",
"the",
"Controller",
"and",
"adds",
"the",
"route",
"controller",
"and",
"method",
"to",
"the",
"url",
"list",
"for",
"GET",
"request"
] | eecf55ae72e6f24af7c101549be0422cd2c1c95a | https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/route.py#L62-L75 | train |
chriso/gauged | gauged/utilities.py | to_bytes | def to_bytes(value):
"""Get a byte array representing the value"""
if isinstance(value, unicode):
return value.encode('utf8')
elif not isinstance(value, str):
return str(value)
return value | python | def to_bytes(value):
"""Get a byte array representing the value"""
if isinstance(value, unicode):
return value.encode('utf8')
elif not isinstance(value, str):
return str(value)
return value | [
"def",
"to_bytes",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"unicode",
")",
":",
"return",
"value",
".",
"encode",
"(",
"'utf8'",
")",
"elif",
"not",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"return",
"str",
"(",
"value",
")",
"return",
"value"
] | Get a byte array representing the value | [
"Get",
"a",
"byte",
"array",
"representing",
"the",
"value"
] | cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976 | https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/utilities.py#L14-L20 | train |
chriso/gauged | gauged/utilities.py | table_repr | def table_repr(columns, rows, data, padding=2):
"""Generate a table for cli output"""
padding = ' ' * padding
column_lengths = [len(column) for column in columns]
for row in rows:
for i, column in enumerate(columns):
item = str(data[row][column])
column_lengths[i] = max(len(item), column_lengths[i])
max_row_length = max(len(row) for row in rows) if len(rows) else 0
table_row = ' ' * max_row_length
for i, column in enumerate(columns):
table_row += padding + column.rjust(column_lengths[i])
table_rows = [table_row]
for row in rows:
table_row = row.rjust(max_row_length)
for i, column in enumerate(columns):
item = str(data[row][column])
table_row += padding + item.rjust(column_lengths[i])
table_rows.append(table_row)
return '\n'.join(table_rows) | python | def table_repr(columns, rows, data, padding=2):
"""Generate a table for cli output"""
padding = ' ' * padding
column_lengths = [len(column) for column in columns]
for row in rows:
for i, column in enumerate(columns):
item = str(data[row][column])
column_lengths[i] = max(len(item), column_lengths[i])
max_row_length = max(len(row) for row in rows) if len(rows) else 0
table_row = ' ' * max_row_length
for i, column in enumerate(columns):
table_row += padding + column.rjust(column_lengths[i])
table_rows = [table_row]
for row in rows:
table_row = row.rjust(max_row_length)
for i, column in enumerate(columns):
item = str(data[row][column])
table_row += padding + item.rjust(column_lengths[i])
table_rows.append(table_row)
return '\n'.join(table_rows) | [
"def",
"table_repr",
"(",
"columns",
",",
"rows",
",",
"data",
",",
"padding",
"=",
"2",
")",
":",
"padding",
"=",
"' '",
"*",
"padding",
"column_lengths",
"=",
"[",
"len",
"(",
"column",
")",
"for",
"column",
"in",
"columns",
"]",
"for",
"row",
"in",
"rows",
":",
"for",
"i",
",",
"column",
"in",
"enumerate",
"(",
"columns",
")",
":",
"item",
"=",
"str",
"(",
"data",
"[",
"row",
"]",
"[",
"column",
"]",
")",
"column_lengths",
"[",
"i",
"]",
"=",
"max",
"(",
"len",
"(",
"item",
")",
",",
"column_lengths",
"[",
"i",
"]",
")",
"max_row_length",
"=",
"max",
"(",
"len",
"(",
"row",
")",
"for",
"row",
"in",
"rows",
")",
"if",
"len",
"(",
"rows",
")",
"else",
"0",
"table_row",
"=",
"' '",
"*",
"max_row_length",
"for",
"i",
",",
"column",
"in",
"enumerate",
"(",
"columns",
")",
":",
"table_row",
"+=",
"padding",
"+",
"column",
".",
"rjust",
"(",
"column_lengths",
"[",
"i",
"]",
")",
"table_rows",
"=",
"[",
"table_row",
"]",
"for",
"row",
"in",
"rows",
":",
"table_row",
"=",
"row",
".",
"rjust",
"(",
"max_row_length",
")",
"for",
"i",
",",
"column",
"in",
"enumerate",
"(",
"columns",
")",
":",
"item",
"=",
"str",
"(",
"data",
"[",
"row",
"]",
"[",
"column",
"]",
")",
"table_row",
"+=",
"padding",
"+",
"item",
".",
"rjust",
"(",
"column_lengths",
"[",
"i",
"]",
")",
"table_rows",
".",
"append",
"(",
"table_row",
")",
"return",
"'\\n'",
".",
"join",
"(",
"table_rows",
")"
] | Generate a table for cli output | [
"Generate",
"a",
"table",
"for",
"cli",
"output"
] | cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976 | https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/utilities.py#L36-L55 | train |
glormph/msstitch | src/app/readers/fasta.py | get_proteins_for_db | def get_proteins_for_db(fastafn):
"""Runs through fasta file and returns proteins accession nrs, sequences
and evidence levels for storage in lookup DB. Duplicate accessions in
fasta are accepted and removed by keeping only the last one.
"""
objects = {}
for record in parse_fasta(fastafn):
objects[parse_protein_identifier(record)] = record
return (((acc,) for acc in list(objects)),
((acc, str(record.seq)) for acc, record in objects.items()),
((acc, get_uniprot_evidence_level(record.description))
for acc, record in objects.items())) | python | def get_proteins_for_db(fastafn):
"""Runs through fasta file and returns proteins accession nrs, sequences
and evidence levels for storage in lookup DB. Duplicate accessions in
fasta are accepted and removed by keeping only the last one.
"""
objects = {}
for record in parse_fasta(fastafn):
objects[parse_protein_identifier(record)] = record
return (((acc,) for acc in list(objects)),
((acc, str(record.seq)) for acc, record in objects.items()),
((acc, get_uniprot_evidence_level(record.description))
for acc, record in objects.items())) | [
"def",
"get_proteins_for_db",
"(",
"fastafn",
")",
":",
"objects",
"=",
"{",
"}",
"for",
"record",
"in",
"parse_fasta",
"(",
"fastafn",
")",
":",
"objects",
"[",
"parse_protein_identifier",
"(",
"record",
")",
"]",
"=",
"record",
"return",
"(",
"(",
"(",
"acc",
",",
")",
"for",
"acc",
"in",
"list",
"(",
"objects",
")",
")",
",",
"(",
"(",
"acc",
",",
"str",
"(",
"record",
".",
"seq",
")",
")",
"for",
"acc",
",",
"record",
"in",
"objects",
".",
"items",
"(",
")",
")",
",",
"(",
"(",
"acc",
",",
"get_uniprot_evidence_level",
"(",
"record",
".",
"description",
")",
")",
"for",
"acc",
",",
"record",
"in",
"objects",
".",
"items",
"(",
")",
")",
")"
] | Runs through fasta file and returns proteins accession nrs, sequences
and evidence levels for storage in lookup DB. Duplicate accessions in
fasta are accepted and removed by keeping only the last one. | [
"Runs",
"through",
"fasta",
"file",
"and",
"returns",
"proteins",
"accession",
"nrs",
"sequences",
"and",
"evidence",
"levels",
"for",
"storage",
"in",
"lookup",
"DB",
".",
"Duplicate",
"accessions",
"in",
"fasta",
"are",
"accepted",
"and",
"removed",
"by",
"keeping",
"only",
"the",
"last",
"one",
"."
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/readers/fasta.py#L4-L15 | train |
glormph/msstitch | src/app/readers/fasta.py | get_uniprot_evidence_level | def get_uniprot_evidence_level(header):
"""Returns uniprot protein existence evidence level for a fasta header.
Evidence levels are 1-5, but we return 5 - x since sorting still demands
that higher is better."""
header = header.split()
for item in header:
item = item.split('=')
try:
if item[0] == 'PE':
return 5 - int(item[1])
except IndexError:
continue
return -1 | python | def get_uniprot_evidence_level(header):
"""Returns uniprot protein existence evidence level for a fasta header.
Evidence levels are 1-5, but we return 5 - x since sorting still demands
that higher is better."""
header = header.split()
for item in header:
item = item.split('=')
try:
if item[0] == 'PE':
return 5 - int(item[1])
except IndexError:
continue
return -1 | [
"def",
"get_uniprot_evidence_level",
"(",
"header",
")",
":",
"header",
"=",
"header",
".",
"split",
"(",
")",
"for",
"item",
"in",
"header",
":",
"item",
"=",
"item",
".",
"split",
"(",
"'='",
")",
"try",
":",
"if",
"item",
"[",
"0",
"]",
"==",
"'PE'",
":",
"return",
"5",
"-",
"int",
"(",
"item",
"[",
"1",
"]",
")",
"except",
"IndexError",
":",
"continue",
"return",
"-",
"1"
] | Returns uniprot protein existence evidence level for a fasta header.
Evidence levels are 1-5, but we return 5 - x since sorting still demands
that higher is better. | [
"Returns",
"uniprot",
"protein",
"existence",
"evidence",
"level",
"for",
"a",
"fasta",
"header",
".",
"Evidence",
"levels",
"are",
"1",
"-",
"5",
"but",
"we",
"return",
"5",
"-",
"x",
"since",
"sorting",
"still",
"demands",
"that",
"higher",
"is",
"better",
"."
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/readers/fasta.py#L119-L131 | train |
jdodds/feather | feather/plugin.py | Plugin.run | def run(self):
"""Run our loop, and any defined hooks...
"""
self.pre_run()
first = True
while self.runnable:
self.pre_call_message()
if first:
self.pre_first_call_message()
message, payload = self.listener.get()
getattr(self, message)(payload)
if first:
first = False
self.post_first_call_message()
self.post_call_message()
self.post_run() | python | def run(self):
"""Run our loop, and any defined hooks...
"""
self.pre_run()
first = True
while self.runnable:
self.pre_call_message()
if first:
self.pre_first_call_message()
message, payload = self.listener.get()
getattr(self, message)(payload)
if first:
first = False
self.post_first_call_message()
self.post_call_message()
self.post_run() | [
"def",
"run",
"(",
"self",
")",
":",
"self",
".",
"pre_run",
"(",
")",
"first",
"=",
"True",
"while",
"self",
".",
"runnable",
":",
"self",
".",
"pre_call_message",
"(",
")",
"if",
"first",
":",
"self",
".",
"pre_first_call_message",
"(",
")",
"message",
",",
"payload",
"=",
"self",
".",
"listener",
".",
"get",
"(",
")",
"getattr",
"(",
"self",
",",
"message",
")",
"(",
"payload",
")",
"if",
"first",
":",
"first",
"=",
"False",
"self",
".",
"post_first_call_message",
"(",
")",
"self",
".",
"post_call_message",
"(",
")",
"self",
".",
"post_run",
"(",
")"
] | Run our loop, and any defined hooks... | [
"Run",
"our",
"loop",
"and",
"any",
"defined",
"hooks",
"..."
] | 92a9426e692b33c7fddf758df8dbc99a9a1ba8ef | https://github.com/jdodds/feather/blob/92a9426e692b33c7fddf758df8dbc99a9a1ba8ef/feather/plugin.py#L70-L90 | train |
tamasgal/km3pipe | km3modules/hits.py | count_multiplicities | def count_multiplicities(times, tmax=20):
"""Calculate an array of multiplicities and corresponding coincidence IDs
Note that this algorithm does not take care about DOM IDs, so it has to
be fed with DOM hits.
Parameters
----------
times: array[float], shape=(n,)
Hit times for n hits
dt: int [default: 20]
Time window of a coincidence
Returns
-------
(array[int]), array[int]), shape=(n,)
"""
n = times.shape[0]
mtp = np.ones(n, dtype='<i4') # multiplicities
cid = np.zeros(n, '<i4') # coincidence id
idx0 = 0
_mtp = 1
_cid = 0
t0 = times[idx0]
for i in range(1, n):
dt = times[i] - t0
if dt > tmax:
mtp[idx0:i] = _mtp
cid[idx0:i] = _cid
_mtp = 0
_cid += 1
idx0 = i
t0 = times[i]
_mtp += 1
if i == n - 1:
mtp[idx0:] = _mtp
cid[idx0:] = _cid
break
return mtp, cid | python | def count_multiplicities(times, tmax=20):
"""Calculate an array of multiplicities and corresponding coincidence IDs
Note that this algorithm does not take care about DOM IDs, so it has to
be fed with DOM hits.
Parameters
----------
times: array[float], shape=(n,)
Hit times for n hits
dt: int [default: 20]
Time window of a coincidence
Returns
-------
(array[int]), array[int]), shape=(n,)
"""
n = times.shape[0]
mtp = np.ones(n, dtype='<i4') # multiplicities
cid = np.zeros(n, '<i4') # coincidence id
idx0 = 0
_mtp = 1
_cid = 0
t0 = times[idx0]
for i in range(1, n):
dt = times[i] - t0
if dt > tmax:
mtp[idx0:i] = _mtp
cid[idx0:i] = _cid
_mtp = 0
_cid += 1
idx0 = i
t0 = times[i]
_mtp += 1
if i == n - 1:
mtp[idx0:] = _mtp
cid[idx0:] = _cid
break
return mtp, cid | [
"def",
"count_multiplicities",
"(",
"times",
",",
"tmax",
"=",
"20",
")",
":",
"n",
"=",
"times",
".",
"shape",
"[",
"0",
"]",
"mtp",
"=",
"np",
".",
"ones",
"(",
"n",
",",
"dtype",
"=",
"'<i4'",
")",
"# multiplicities",
"cid",
"=",
"np",
".",
"zeros",
"(",
"n",
",",
"'<i4'",
")",
"# coincidence id",
"idx0",
"=",
"0",
"_mtp",
"=",
"1",
"_cid",
"=",
"0",
"t0",
"=",
"times",
"[",
"idx0",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"n",
")",
":",
"dt",
"=",
"times",
"[",
"i",
"]",
"-",
"t0",
"if",
"dt",
">",
"tmax",
":",
"mtp",
"[",
"idx0",
":",
"i",
"]",
"=",
"_mtp",
"cid",
"[",
"idx0",
":",
"i",
"]",
"=",
"_cid",
"_mtp",
"=",
"0",
"_cid",
"+=",
"1",
"idx0",
"=",
"i",
"t0",
"=",
"times",
"[",
"i",
"]",
"_mtp",
"+=",
"1",
"if",
"i",
"==",
"n",
"-",
"1",
":",
"mtp",
"[",
"idx0",
":",
"]",
"=",
"_mtp",
"cid",
"[",
"idx0",
":",
"]",
"=",
"_cid",
"break",
"return",
"mtp",
",",
"cid"
] | Calculate an array of multiplicities and corresponding coincidence IDs
Note that this algorithm does not take care about DOM IDs, so it has to
be fed with DOM hits.
Parameters
----------
times: array[float], shape=(n,)
Hit times for n hits
dt: int [default: 20]
Time window of a coincidence
Returns
-------
(array[int]), array[int]), shape=(n,) | [
"Calculate",
"an",
"array",
"of",
"multiplicities",
"and",
"corresponding",
"coincidence",
"IDs"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/hits.py#L28-L68 | train |
vslutov/turingmarkov | turingmarkov/turing.py | build_machine | def build_machine(lines):
"""Build machine from list of lines."""
if lines == []:
raise SyntaxError('Empty file')
else:
machine = Machine(lines[0].split())
for line in lines[1:]:
if line.strip() != '':
machine.add_state(line)
machine.check()
return machine | python | def build_machine(lines):
"""Build machine from list of lines."""
if lines == []:
raise SyntaxError('Empty file')
else:
machine = Machine(lines[0].split())
for line in lines[1:]:
if line.strip() != '':
machine.add_state(line)
machine.check()
return machine | [
"def",
"build_machine",
"(",
"lines",
")",
":",
"if",
"lines",
"==",
"[",
"]",
":",
"raise",
"SyntaxError",
"(",
"'Empty file'",
")",
"else",
":",
"machine",
"=",
"Machine",
"(",
"lines",
"[",
"0",
"]",
".",
"split",
"(",
")",
")",
"for",
"line",
"in",
"lines",
"[",
"1",
":",
"]",
":",
"if",
"line",
".",
"strip",
"(",
")",
"!=",
"''",
":",
"machine",
".",
"add_state",
"(",
"line",
")",
"machine",
".",
"check",
"(",
")",
"return",
"machine"
] | Build machine from list of lines. | [
"Build",
"machine",
"from",
"list",
"of",
"lines",
"."
] | 63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce | https://github.com/vslutov/turingmarkov/blob/63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce/turingmarkov/turing.py#L175-L185 | train |
vslutov/turingmarkov | turingmarkov/turing.py | Machine.add_state | def add_state(self, string):
"""Add state and rules to machine."""
parsed_string = string.split()
if len(parsed_string) > 0:
state, rules = parsed_string[0], parsed_string[1:]
if len(rules) != len(self.alphabet):
raise SyntaxError('Wrong count of rules ({cur}/{exp}): {string}'
.format(cur=len(rules), exp=len(self.alphabet),
string=string))
if state in self.states or state == self.TERM_STATE:
raise SyntaxError('Double definition of state: ' + state)
else:
self.states[state] = []
for rule in rules:
try:
self._add_rule(state, rule)
except SyntaxError as err:
self.states.pop(state)
raise err | python | def add_state(self, string):
"""Add state and rules to machine."""
parsed_string = string.split()
if len(parsed_string) > 0:
state, rules = parsed_string[0], parsed_string[1:]
if len(rules) != len(self.alphabet):
raise SyntaxError('Wrong count of rules ({cur}/{exp}): {string}'
.format(cur=len(rules), exp=len(self.alphabet),
string=string))
if state in self.states or state == self.TERM_STATE:
raise SyntaxError('Double definition of state: ' + state)
else:
self.states[state] = []
for rule in rules:
try:
self._add_rule(state, rule)
except SyntaxError as err:
self.states.pop(state)
raise err | [
"def",
"add_state",
"(",
"self",
",",
"string",
")",
":",
"parsed_string",
"=",
"string",
".",
"split",
"(",
")",
"if",
"len",
"(",
"parsed_string",
")",
">",
"0",
":",
"state",
",",
"rules",
"=",
"parsed_string",
"[",
"0",
"]",
",",
"parsed_string",
"[",
"1",
":",
"]",
"if",
"len",
"(",
"rules",
")",
"!=",
"len",
"(",
"self",
".",
"alphabet",
")",
":",
"raise",
"SyntaxError",
"(",
"'Wrong count of rules ({cur}/{exp}): {string}'",
".",
"format",
"(",
"cur",
"=",
"len",
"(",
"rules",
")",
",",
"exp",
"=",
"len",
"(",
"self",
".",
"alphabet",
")",
",",
"string",
"=",
"string",
")",
")",
"if",
"state",
"in",
"self",
".",
"states",
"or",
"state",
"==",
"self",
".",
"TERM_STATE",
":",
"raise",
"SyntaxError",
"(",
"'Double definition of state: '",
"+",
"state",
")",
"else",
":",
"self",
".",
"states",
"[",
"state",
"]",
"=",
"[",
"]",
"for",
"rule",
"in",
"rules",
":",
"try",
":",
"self",
".",
"_add_rule",
"(",
"state",
",",
"rule",
")",
"except",
"SyntaxError",
"as",
"err",
":",
"self",
".",
"states",
".",
"pop",
"(",
"state",
")",
"raise",
"err"
] | Add state and rules to machine. | [
"Add",
"state",
"and",
"rules",
"to",
"machine",
"."
] | 63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce | https://github.com/vslutov/turingmarkov/blob/63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce/turingmarkov/turing.py#L60-L81 | train |
vslutov/turingmarkov | turingmarkov/turing.py | Machine.check | def check(self):
"""Check semantic rules."""
has_term = False
if self.START_STATE not in self.states:
raise SyntaxError('Undefined start rule')
for state in self.states:
for rule in self.states[state]:
if rule is not None:
if rule[2] == self.TERM_STATE:
has_term = True
elif rule[2] not in self.states:
raise SyntaxError('Unexpected state: ' + rule[2])
if not has_term:
raise SyntaxError('Missed terminate state') | python | def check(self):
"""Check semantic rules."""
has_term = False
if self.START_STATE not in self.states:
raise SyntaxError('Undefined start rule')
for state in self.states:
for rule in self.states[state]:
if rule is not None:
if rule[2] == self.TERM_STATE:
has_term = True
elif rule[2] not in self.states:
raise SyntaxError('Unexpected state: ' + rule[2])
if not has_term:
raise SyntaxError('Missed terminate state') | [
"def",
"check",
"(",
"self",
")",
":",
"has_term",
"=",
"False",
"if",
"self",
".",
"START_STATE",
"not",
"in",
"self",
".",
"states",
":",
"raise",
"SyntaxError",
"(",
"'Undefined start rule'",
")",
"for",
"state",
"in",
"self",
".",
"states",
":",
"for",
"rule",
"in",
"self",
".",
"states",
"[",
"state",
"]",
":",
"if",
"rule",
"is",
"not",
"None",
":",
"if",
"rule",
"[",
"2",
"]",
"==",
"self",
".",
"TERM_STATE",
":",
"has_term",
"=",
"True",
"elif",
"rule",
"[",
"2",
"]",
"not",
"in",
"self",
".",
"states",
":",
"raise",
"SyntaxError",
"(",
"'Unexpected state: '",
"+",
"rule",
"[",
"2",
"]",
")",
"if",
"not",
"has_term",
":",
"raise",
"SyntaxError",
"(",
"'Missed terminate state'",
")"
] | Check semantic rules. | [
"Check",
"semantic",
"rules",
"."
] | 63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce | https://github.com/vslutov/turingmarkov/blob/63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce/turingmarkov/turing.py#L83-L99 | train |
vslutov/turingmarkov | turingmarkov/turing.py | Machine.init_tape | def init_tape(self, string):
"""Init system values."""
for char in string:
if char not in self.alphabet and not char.isspace() and char != self.EMPTY_SYMBOL:
raise RuntimeError('Invalid symbol: "' + char + '"')
self.check()
self.state = self.START_STATE
self.head = 0
self.tape = {}
for i in range(len(string)):
symbol = string[i] if not string[i].isspace() else self.EMPTY_SYMBOL
self.tape[i] = symbol | python | def init_tape(self, string):
"""Init system values."""
for char in string:
if char not in self.alphabet and not char.isspace() and char != self.EMPTY_SYMBOL:
raise RuntimeError('Invalid symbol: "' + char + '"')
self.check()
self.state = self.START_STATE
self.head = 0
self.tape = {}
for i in range(len(string)):
symbol = string[i] if not string[i].isspace() else self.EMPTY_SYMBOL
self.tape[i] = symbol | [
"def",
"init_tape",
"(",
"self",
",",
"string",
")",
":",
"for",
"char",
"in",
"string",
":",
"if",
"char",
"not",
"in",
"self",
".",
"alphabet",
"and",
"not",
"char",
".",
"isspace",
"(",
")",
"and",
"char",
"!=",
"self",
".",
"EMPTY_SYMBOL",
":",
"raise",
"RuntimeError",
"(",
"'Invalid symbol: \"'",
"+",
"char",
"+",
"'\"'",
")",
"self",
".",
"check",
"(",
")",
"self",
".",
"state",
"=",
"self",
".",
"START_STATE",
"self",
".",
"head",
"=",
"0",
"self",
".",
"tape",
"=",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"string",
")",
")",
":",
"symbol",
"=",
"string",
"[",
"i",
"]",
"if",
"not",
"string",
"[",
"i",
"]",
".",
"isspace",
"(",
")",
"else",
"self",
".",
"EMPTY_SYMBOL",
"self",
".",
"tape",
"[",
"i",
"]",
"=",
"symbol"
] | Init system values. | [
"Init",
"system",
"values",
"."
] | 63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce | https://github.com/vslutov/turingmarkov/blob/63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce/turingmarkov/turing.py#L101-L114 | train |
vslutov/turingmarkov | turingmarkov/turing.py | Machine.get_tape | def get_tape(self):
"""Get content of tape."""
result = ''
for i in range(min(self.tape), max(self.tape) + 1):
symbol = self.tape[i] if self.tape[i] != self.EMPTY_SYMBOL else ' '
result += symbol
# Remove unnecessary empty symbols on tape
return result.strip() | python | def get_tape(self):
"""Get content of tape."""
result = ''
for i in range(min(self.tape), max(self.tape) + 1):
symbol = self.tape[i] if self.tape[i] != self.EMPTY_SYMBOL else ' '
result += symbol
# Remove unnecessary empty symbols on tape
return result.strip() | [
"def",
"get_tape",
"(",
"self",
")",
":",
"result",
"=",
"''",
"for",
"i",
"in",
"range",
"(",
"min",
"(",
"self",
".",
"tape",
")",
",",
"max",
"(",
"self",
".",
"tape",
")",
"+",
"1",
")",
":",
"symbol",
"=",
"self",
".",
"tape",
"[",
"i",
"]",
"if",
"self",
".",
"tape",
"[",
"i",
"]",
"!=",
"self",
".",
"EMPTY_SYMBOL",
"else",
"' '",
"result",
"+=",
"symbol",
"# Remove unnecessary empty symbols on tape",
"return",
"result",
".",
"strip",
"(",
")"
] | Get content of tape. | [
"Get",
"content",
"of",
"tape",
"."
] | 63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce | https://github.com/vslutov/turingmarkov/blob/63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce/turingmarkov/turing.py#L116-L123 | train |
vslutov/turingmarkov | turingmarkov/turing.py | Machine.execute_once | def execute_once(self):
"""One step of execution."""
symbol = self.tape.get(self.head, self.EMPTY_SYMBOL)
index = self.alphabet.index(symbol)
rule = self.states[self.state][index]
if rule is None:
raise RuntimeError('Unexpected symbol: ' + symbol)
self.tape[self.head] = rule[0]
if rule[1] == 'L':
self.head -= 1
elif rule[1] == 'R':
self.head += 1
self.state = rule[2] | python | def execute_once(self):
"""One step of execution."""
symbol = self.tape.get(self.head, self.EMPTY_SYMBOL)
index = self.alphabet.index(symbol)
rule = self.states[self.state][index]
if rule is None:
raise RuntimeError('Unexpected symbol: ' + symbol)
self.tape[self.head] = rule[0]
if rule[1] == 'L':
self.head -= 1
elif rule[1] == 'R':
self.head += 1
self.state = rule[2] | [
"def",
"execute_once",
"(",
"self",
")",
":",
"symbol",
"=",
"self",
".",
"tape",
".",
"get",
"(",
"self",
".",
"head",
",",
"self",
".",
"EMPTY_SYMBOL",
")",
"index",
"=",
"self",
".",
"alphabet",
".",
"index",
"(",
"symbol",
")",
"rule",
"=",
"self",
".",
"states",
"[",
"self",
".",
"state",
"]",
"[",
"index",
"]",
"if",
"rule",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"'Unexpected symbol: '",
"+",
"symbol",
")",
"self",
".",
"tape",
"[",
"self",
".",
"head",
"]",
"=",
"rule",
"[",
"0",
"]",
"if",
"rule",
"[",
"1",
"]",
"==",
"'L'",
":",
"self",
".",
"head",
"-=",
"1",
"elif",
"rule",
"[",
"1",
"]",
"==",
"'R'",
":",
"self",
".",
"head",
"+=",
"1",
"self",
".",
"state",
"=",
"rule",
"[",
"2",
"]"
] | One step of execution. | [
"One",
"step",
"of",
"execution",
"."
] | 63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce | https://github.com/vslutov/turingmarkov/blob/63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce/turingmarkov/turing.py#L125-L142 | train |
vslutov/turingmarkov | turingmarkov/turing.py | Machine.compile | def compile(self):
"""Return python code for create and execute machine."""
result = TEMPLATE
result += 'machine = Machine(' + repr(self.alphabet) + ')\n'
for state in self.states:
repr_state = state[0]
for rule in self.states[state]:
repr_state += ' ' + (','.join(rule) if rule is not None else '-')
result += ("machine.add_state({repr_state})\n".format(repr_state=repr(repr_state)))
result += "for line in stdin:\n"
result += " print(machine.execute(line))"
return result | python | def compile(self):
"""Return python code for create and execute machine."""
result = TEMPLATE
result += 'machine = Machine(' + repr(self.alphabet) + ')\n'
for state in self.states:
repr_state = state[0]
for rule in self.states[state]:
repr_state += ' ' + (','.join(rule) if rule is not None else '-')
result += ("machine.add_state({repr_state})\n".format(repr_state=repr(repr_state)))
result += "for line in stdin:\n"
result += " print(machine.execute(line))"
return result | [
"def",
"compile",
"(",
"self",
")",
":",
"result",
"=",
"TEMPLATE",
"result",
"+=",
"'machine = Machine('",
"+",
"repr",
"(",
"self",
".",
"alphabet",
")",
"+",
"')\\n'",
"for",
"state",
"in",
"self",
".",
"states",
":",
"repr_state",
"=",
"state",
"[",
"0",
"]",
"for",
"rule",
"in",
"self",
".",
"states",
"[",
"state",
"]",
":",
"repr_state",
"+=",
"' '",
"+",
"(",
"','",
".",
"join",
"(",
"rule",
")",
"if",
"rule",
"is",
"not",
"None",
"else",
"'-'",
")",
"result",
"+=",
"(",
"\"machine.add_state({repr_state})\\n\"",
".",
"format",
"(",
"repr_state",
"=",
"repr",
"(",
"repr_state",
")",
")",
")",
"result",
"+=",
"\"for line in stdin:\\n\"",
"result",
"+=",
"\" print(machine.execute(line))\"",
"return",
"result"
] | Return python code for create and execute machine. | [
"Return",
"python",
"code",
"for",
"create",
"and",
"execute",
"machine",
"."
] | 63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce | https://github.com/vslutov/turingmarkov/blob/63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce/turingmarkov/turing.py#L159-L173 | train |
tamasgal/km3pipe | km3pipe/core.py | ServiceManager.get_missing_services | def get_missing_services(self, services):
"""
Check if all required services are provided
Args:
services: List with the service names which are required
Returns:
List with missing services
"""
required_services = set(services)
provided_services = set(self._services.keys())
missing_services = required_services.difference(provided_services)
return sorted(missing_services) | python | def get_missing_services(self, services):
"""
Check if all required services are provided
Args:
services: List with the service names which are required
Returns:
List with missing services
"""
required_services = set(services)
provided_services = set(self._services.keys())
missing_services = required_services.difference(provided_services)
return sorted(missing_services) | [
"def",
"get_missing_services",
"(",
"self",
",",
"services",
")",
":",
"required_services",
"=",
"set",
"(",
"services",
")",
"provided_services",
"=",
"set",
"(",
"self",
".",
"_services",
".",
"keys",
"(",
")",
")",
"missing_services",
"=",
"required_services",
".",
"difference",
"(",
"provided_services",
")",
"return",
"sorted",
"(",
"missing_services",
")"
] | Check if all required services are provided
Args:
services: List with the service names which are required
Returns:
List with missing services | [
"Check",
"if",
"all",
"required",
"services",
"are",
"provided"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/core.py#L72-L85 | train |
tamasgal/km3pipe | km3pipe/core.py | Pipeline._drain | def _drain(self, cycles=None):
"""Activate the pump and let the flow go.
This will call the process() method on each attached module until
a StopIteration is raised, usually by a pump when it reached the EOF.
A StopIteration is also raised when self.cycles was set and the
number of cycles has reached that limit.
"""
log.info("Now draining...")
if not cycles:
log.info("No cycle count, the pipeline may be drained forever.")
if self.calibration:
log.info("Setting up the detector calibration.")
for module in self.modules:
module.detector = self.calibration.get_detector()
try:
while not self._stop:
cycle_start = timer()
cycle_start_cpu = process_time()
log.debug("Pumping blob #{0}".format(self._cycle_count))
self.blob = Blob()
for module in self.modules:
if self.blob is None:
log.debug(
"Skipping {0}, due to empty blob.".format(
module.name
)
)
continue
if module.only_if and not module.only_if.issubset(set(
self.blob.keys())):
log.debug(
"Skipping {0}, due to missing required key"
"'{1}'.".format(module.name, module.only_if)
)
continue
if (self._cycle_count + 1) % module.every != 0:
log.debug(
"Skipping {0} (every {1} iterations).".format(
module.name, module.every
)
)
continue
if module.blob_keys is not None:
blob_to_send = Blob({
k: self.blob[k]
for k in module.blob_keys
if k in self.blob
})
else:
blob_to_send = self.blob
log.debug("Processing {0} ".format(module.name))
start = timer()
start_cpu = process_time()
new_blob = module(blob_to_send)
if self.timeit or module.timeit:
self._timeit[module]['process'] \
.append(timer() - start)
self._timeit[module]['process_cpu'] \
.append(process_time() - start_cpu)
if module.blob_keys is not None:
if new_blob is not None:
for key in new_blob.keys():
self.blob[key] = new_blob[key]
else:
self.blob = new_blob
self._timeit['cycles'].append(timer() - cycle_start)
self._timeit['cycles_cpu'].append(
process_time() - cycle_start_cpu
)
self._cycle_count += 1
if cycles and self._cycle_count >= cycles:
raise StopIteration
except StopIteration:
log.info("Nothing left to pump through.")
return self.finish() | python | def _drain(self, cycles=None):
"""Activate the pump and let the flow go.
This will call the process() method on each attached module until
a StopIteration is raised, usually by a pump when it reached the EOF.
A StopIteration is also raised when self.cycles was set and the
number of cycles has reached that limit.
"""
log.info("Now draining...")
if not cycles:
log.info("No cycle count, the pipeline may be drained forever.")
if self.calibration:
log.info("Setting up the detector calibration.")
for module in self.modules:
module.detector = self.calibration.get_detector()
try:
while not self._stop:
cycle_start = timer()
cycle_start_cpu = process_time()
log.debug("Pumping blob #{0}".format(self._cycle_count))
self.blob = Blob()
for module in self.modules:
if self.blob is None:
log.debug(
"Skipping {0}, due to empty blob.".format(
module.name
)
)
continue
if module.only_if and not module.only_if.issubset(set(
self.blob.keys())):
log.debug(
"Skipping {0}, due to missing required key"
"'{1}'.".format(module.name, module.only_if)
)
continue
if (self._cycle_count + 1) % module.every != 0:
log.debug(
"Skipping {0} (every {1} iterations).".format(
module.name, module.every
)
)
continue
if module.blob_keys is not None:
blob_to_send = Blob({
k: self.blob[k]
for k in module.blob_keys
if k in self.blob
})
else:
blob_to_send = self.blob
log.debug("Processing {0} ".format(module.name))
start = timer()
start_cpu = process_time()
new_blob = module(blob_to_send)
if self.timeit or module.timeit:
self._timeit[module]['process'] \
.append(timer() - start)
self._timeit[module]['process_cpu'] \
.append(process_time() - start_cpu)
if module.blob_keys is not None:
if new_blob is not None:
for key in new_blob.keys():
self.blob[key] = new_blob[key]
else:
self.blob = new_blob
self._timeit['cycles'].append(timer() - cycle_start)
self._timeit['cycles_cpu'].append(
process_time() - cycle_start_cpu
)
self._cycle_count += 1
if cycles and self._cycle_count >= cycles:
raise StopIteration
except StopIteration:
log.info("Nothing left to pump through.")
return self.finish() | [
"def",
"_drain",
"(",
"self",
",",
"cycles",
"=",
"None",
")",
":",
"log",
".",
"info",
"(",
"\"Now draining...\"",
")",
"if",
"not",
"cycles",
":",
"log",
".",
"info",
"(",
"\"No cycle count, the pipeline may be drained forever.\"",
")",
"if",
"self",
".",
"calibration",
":",
"log",
".",
"info",
"(",
"\"Setting up the detector calibration.\"",
")",
"for",
"module",
"in",
"self",
".",
"modules",
":",
"module",
".",
"detector",
"=",
"self",
".",
"calibration",
".",
"get_detector",
"(",
")",
"try",
":",
"while",
"not",
"self",
".",
"_stop",
":",
"cycle_start",
"=",
"timer",
"(",
")",
"cycle_start_cpu",
"=",
"process_time",
"(",
")",
"log",
".",
"debug",
"(",
"\"Pumping blob #{0}\"",
".",
"format",
"(",
"self",
".",
"_cycle_count",
")",
")",
"self",
".",
"blob",
"=",
"Blob",
"(",
")",
"for",
"module",
"in",
"self",
".",
"modules",
":",
"if",
"self",
".",
"blob",
"is",
"None",
":",
"log",
".",
"debug",
"(",
"\"Skipping {0}, due to empty blob.\"",
".",
"format",
"(",
"module",
".",
"name",
")",
")",
"continue",
"if",
"module",
".",
"only_if",
"and",
"not",
"module",
".",
"only_if",
".",
"issubset",
"(",
"set",
"(",
"self",
".",
"blob",
".",
"keys",
"(",
")",
")",
")",
":",
"log",
".",
"debug",
"(",
"\"Skipping {0}, due to missing required key\"",
"\"'{1}'.\"",
".",
"format",
"(",
"module",
".",
"name",
",",
"module",
".",
"only_if",
")",
")",
"continue",
"if",
"(",
"self",
".",
"_cycle_count",
"+",
"1",
")",
"%",
"module",
".",
"every",
"!=",
"0",
":",
"log",
".",
"debug",
"(",
"\"Skipping {0} (every {1} iterations).\"",
".",
"format",
"(",
"module",
".",
"name",
",",
"module",
".",
"every",
")",
")",
"continue",
"if",
"module",
".",
"blob_keys",
"is",
"not",
"None",
":",
"blob_to_send",
"=",
"Blob",
"(",
"{",
"k",
":",
"self",
".",
"blob",
"[",
"k",
"]",
"for",
"k",
"in",
"module",
".",
"blob_keys",
"if",
"k",
"in",
"self",
".",
"blob",
"}",
")",
"else",
":",
"blob_to_send",
"=",
"self",
".",
"blob",
"log",
".",
"debug",
"(",
"\"Processing {0} \"",
".",
"format",
"(",
"module",
".",
"name",
")",
")",
"start",
"=",
"timer",
"(",
")",
"start_cpu",
"=",
"process_time",
"(",
")",
"new_blob",
"=",
"module",
"(",
"blob_to_send",
")",
"if",
"self",
".",
"timeit",
"or",
"module",
".",
"timeit",
":",
"self",
".",
"_timeit",
"[",
"module",
"]",
"[",
"'process'",
"]",
".",
"append",
"(",
"timer",
"(",
")",
"-",
"start",
")",
"self",
".",
"_timeit",
"[",
"module",
"]",
"[",
"'process_cpu'",
"]",
".",
"append",
"(",
"process_time",
"(",
")",
"-",
"start_cpu",
")",
"if",
"module",
".",
"blob_keys",
"is",
"not",
"None",
":",
"if",
"new_blob",
"is",
"not",
"None",
":",
"for",
"key",
"in",
"new_blob",
".",
"keys",
"(",
")",
":",
"self",
".",
"blob",
"[",
"key",
"]",
"=",
"new_blob",
"[",
"key",
"]",
"else",
":",
"self",
".",
"blob",
"=",
"new_blob",
"self",
".",
"_timeit",
"[",
"'cycles'",
"]",
".",
"append",
"(",
"timer",
"(",
")",
"-",
"cycle_start",
")",
"self",
".",
"_timeit",
"[",
"'cycles_cpu'",
"]",
".",
"append",
"(",
"process_time",
"(",
")",
"-",
"cycle_start_cpu",
")",
"self",
".",
"_cycle_count",
"+=",
"1",
"if",
"cycles",
"and",
"self",
".",
"_cycle_count",
">=",
"cycles",
":",
"raise",
"StopIteration",
"except",
"StopIteration",
":",
"log",
".",
"info",
"(",
"\"Nothing left to pump through.\"",
")",
"return",
"self",
".",
"finish",
"(",
")"
] | Activate the pump and let the flow go.
This will call the process() method on each attached module until
a StopIteration is raised, usually by a pump when it reached the EOF.
A StopIteration is also raised when self.cycles was set and the
number of cycles has reached that limit. | [
"Activate",
"the",
"pump",
"and",
"let",
"the",
"flow",
"go",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/core.py#L258-L344 | train |
tamasgal/km3pipe | km3pipe/core.py | Pipeline._check_service_requirements | def _check_service_requirements(self):
"""Final comparison of provided and required modules"""
missing = self.services.get_missing_services(
self.required_services.keys()
)
if missing:
self.log.critical(
"Following services are required and missing: {}".format(
', '.join(missing)
)
)
return False
return True | python | def _check_service_requirements(self):
"""Final comparison of provided and required modules"""
missing = self.services.get_missing_services(
self.required_services.keys()
)
if missing:
self.log.critical(
"Following services are required and missing: {}".format(
', '.join(missing)
)
)
return False
return True | [
"def",
"_check_service_requirements",
"(",
"self",
")",
":",
"missing",
"=",
"self",
".",
"services",
".",
"get_missing_services",
"(",
"self",
".",
"required_services",
".",
"keys",
"(",
")",
")",
"if",
"missing",
":",
"self",
".",
"log",
".",
"critical",
"(",
"\"Following services are required and missing: {}\"",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"missing",
")",
")",
")",
"return",
"False",
"return",
"True"
] | Final comparison of provided and required modules | [
"Final",
"comparison",
"of",
"provided",
"and",
"required",
"modules"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/core.py#L346-L358 | train |
tamasgal/km3pipe | km3pipe/core.py | Pipeline.drain | def drain(self, cycles=None):
"""Execute _drain while trapping KeyboardInterrupt"""
if not self._check_service_requirements():
self.init_timer.stop()
return self.finish()
if self.anybar: self.anybar.change("orange")
self.init_timer.stop()
log.info("Trapping CTRL+C and starting to drain.")
signal.signal(signal.SIGINT, self._handle_ctrl_c)
with ignored(KeyboardInterrupt):
return self._drain(cycles) | python | def drain(self, cycles=None):
"""Execute _drain while trapping KeyboardInterrupt"""
if not self._check_service_requirements():
self.init_timer.stop()
return self.finish()
if self.anybar: self.anybar.change("orange")
self.init_timer.stop()
log.info("Trapping CTRL+C and starting to drain.")
signal.signal(signal.SIGINT, self._handle_ctrl_c)
with ignored(KeyboardInterrupt):
return self._drain(cycles) | [
"def",
"drain",
"(",
"self",
",",
"cycles",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_check_service_requirements",
"(",
")",
":",
"self",
".",
"init_timer",
".",
"stop",
"(",
")",
"return",
"self",
".",
"finish",
"(",
")",
"if",
"self",
".",
"anybar",
":",
"self",
".",
"anybar",
".",
"change",
"(",
"\"orange\"",
")",
"self",
".",
"init_timer",
".",
"stop",
"(",
")",
"log",
".",
"info",
"(",
"\"Trapping CTRL+C and starting to drain.\"",
")",
"signal",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"self",
".",
"_handle_ctrl_c",
")",
"with",
"ignored",
"(",
"KeyboardInterrupt",
")",
":",
"return",
"self",
".",
"_drain",
"(",
"cycles",
")"
] | Execute _drain while trapping KeyboardInterrupt | [
"Execute",
"_drain",
"while",
"trapping",
"KeyboardInterrupt"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/core.py#L360-L371 | train |
tamasgal/km3pipe | km3pipe/core.py | Pipeline._handle_ctrl_c | def _handle_ctrl_c(self, *args):
"""Handle the keyboard interrupts."""
if self.anybar: self.anybar.change("exclamation")
if self._stop:
print("\nForced shutdown...")
raise SystemExit
if not self._stop:
hline = 42 * '='
print(
'\n' + hline + "\nGot CTRL+C, waiting for current cycle...\n"
"Press CTRL+C again if you're in hurry!\n" + hline
)
self._stop = True | python | def _handle_ctrl_c(self, *args):
"""Handle the keyboard interrupts."""
if self.anybar: self.anybar.change("exclamation")
if self._stop:
print("\nForced shutdown...")
raise SystemExit
if not self._stop:
hline = 42 * '='
print(
'\n' + hline + "\nGot CTRL+C, waiting for current cycle...\n"
"Press CTRL+C again if you're in hurry!\n" + hline
)
self._stop = True | [
"def",
"_handle_ctrl_c",
"(",
"self",
",",
"*",
"args",
")",
":",
"if",
"self",
".",
"anybar",
":",
"self",
".",
"anybar",
".",
"change",
"(",
"\"exclamation\"",
")",
"if",
"self",
".",
"_stop",
":",
"print",
"(",
"\"\\nForced shutdown...\"",
")",
"raise",
"SystemExit",
"if",
"not",
"self",
".",
"_stop",
":",
"hline",
"=",
"42",
"*",
"'='",
"print",
"(",
"'\\n'",
"+",
"hline",
"+",
"\"\\nGot CTRL+C, waiting for current cycle...\\n\"",
"\"Press CTRL+C again if you're in hurry!\\n\"",
"+",
"hline",
")",
"self",
".",
"_stop",
"=",
"True"
] | Handle the keyboard interrupts. | [
"Handle",
"the",
"keyboard",
"interrupts",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/core.py#L397-L410 | train |
tamasgal/km3pipe | km3pipe/core.py | Module.get | def get(self, name, default=None):
"""Return the value of the requested parameter or `default` if None."""
value = self.parameters.get(name)
self._processed_parameters.append(name)
if value is None:
return default
return value | python | def get(self, name, default=None):
"""Return the value of the requested parameter or `default` if None."""
value = self.parameters.get(name)
self._processed_parameters.append(name)
if value is None:
return default
return value | [
"def",
"get",
"(",
"self",
",",
"name",
",",
"default",
"=",
"None",
")",
":",
"value",
"=",
"self",
".",
"parameters",
".",
"get",
"(",
"name",
")",
"self",
".",
"_processed_parameters",
".",
"append",
"(",
"name",
")",
"if",
"value",
"is",
"None",
":",
"return",
"default",
"return",
"value"
] | Return the value of the requested parameter or `default` if None. | [
"Return",
"the",
"value",
"of",
"the",
"requested",
"parameter",
"or",
"default",
"if",
"None",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/core.py#L531-L537 | train |
tamasgal/km3pipe | km3pipe/core.py | Module.require | def require(self, name):
"""Return the value of the requested parameter or raise an error."""
value = self.get(name)
if value is None:
raise TypeError(
"{0} requires the parameter '{1}'.".format(
self.__class__, name
)
)
return value | python | def require(self, name):
"""Return the value of the requested parameter or raise an error."""
value = self.get(name)
if value is None:
raise TypeError(
"{0} requires the parameter '{1}'.".format(
self.__class__, name
)
)
return value | [
"def",
"require",
"(",
"self",
",",
"name",
")",
":",
"value",
"=",
"self",
".",
"get",
"(",
"name",
")",
"if",
"value",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"\"{0} requires the parameter '{1}'.\"",
".",
"format",
"(",
"self",
".",
"__class__",
",",
"name",
")",
")",
"return",
"value"
] | Return the value of the requested parameter or raise an error. | [
"Return",
"the",
"value",
"of",
"the",
"requested",
"parameter",
"or",
"raise",
"an",
"error",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/core.py#L539-L548 | train |
tamasgal/km3pipe | km3pipe/core.py | Module._check_unused_parameters | def _check_unused_parameters(self):
"""Check if any of the parameters passed in are ignored"""
all_params = set(self.parameters.keys())
processed_params = set(self._processed_parameters)
unused_params = all_params - processed_params - RESERVED_ARGS
if unused_params:
self.log.warning(
"The following parameters were ignored: {}".format(
', '.join(sorted(unused_params))
)
) | python | def _check_unused_parameters(self):
"""Check if any of the parameters passed in are ignored"""
all_params = set(self.parameters.keys())
processed_params = set(self._processed_parameters)
unused_params = all_params - processed_params - RESERVED_ARGS
if unused_params:
self.log.warning(
"The following parameters were ignored: {}".format(
', '.join(sorted(unused_params))
)
) | [
"def",
"_check_unused_parameters",
"(",
"self",
")",
":",
"all_params",
"=",
"set",
"(",
"self",
".",
"parameters",
".",
"keys",
"(",
")",
")",
"processed_params",
"=",
"set",
"(",
"self",
".",
"_processed_parameters",
")",
"unused_params",
"=",
"all_params",
"-",
"processed_params",
"-",
"RESERVED_ARGS",
"if",
"unused_params",
":",
"self",
".",
"log",
".",
"warning",
"(",
"\"The following parameters were ignored: {}\"",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"sorted",
"(",
"unused_params",
")",
")",
")",
")"
] | Check if any of the parameters passed in are ignored | [
"Check",
"if",
"any",
"of",
"the",
"parameters",
"passed",
"in",
"are",
"ignored"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/core.py#L565-L576 | train |
tamasgal/km3pipe | km3pipe/core.py | Pump.open_file | def open_file(self, filename):
"""Open the file with filename"""
try:
if filename.endswith('.gz'):
self.blob_file = gzip.open(filename, 'rb')
else:
self.blob_file = open(filename, 'rb')
except TypeError:
log.error("Please specify a valid filename.")
raise SystemExit
except IOError as error_message:
log.error(error_message)
raise SystemExit | python | def open_file(self, filename):
"""Open the file with filename"""
try:
if filename.endswith('.gz'):
self.blob_file = gzip.open(filename, 'rb')
else:
self.blob_file = open(filename, 'rb')
except TypeError:
log.error("Please specify a valid filename.")
raise SystemExit
except IOError as error_message:
log.error(error_message)
raise SystemExit | [
"def",
"open_file",
"(",
"self",
",",
"filename",
")",
":",
"try",
":",
"if",
"filename",
".",
"endswith",
"(",
"'.gz'",
")",
":",
"self",
".",
"blob_file",
"=",
"gzip",
".",
"open",
"(",
"filename",
",",
"'rb'",
")",
"else",
":",
"self",
".",
"blob_file",
"=",
"open",
"(",
"filename",
",",
"'rb'",
")",
"except",
"TypeError",
":",
"log",
".",
"error",
"(",
"\"Please specify a valid filename.\"",
")",
"raise",
"SystemExit",
"except",
"IOError",
"as",
"error_message",
":",
"log",
".",
"error",
"(",
"error_message",
")",
"raise",
"SystemExit"
] | Open the file with filename | [
"Open",
"the",
"file",
"with",
"filename"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/core.py#L599-L611 | train |
ioos/pyoos | pyoos/utils/asatime.py | AsaTime.parse | def parse(cls, date_string):
"""
Parse any time string. Use a custom timezone matching if
the original matching does not pull one out.
"""
try:
date = dateparser.parse(date_string)
if date.tzinfo is None:
date = dateparser.parse(date_string, tzinfos=cls.tzd)
return date
except Exception:
raise ValueError("Could not parse date string!") | python | def parse(cls, date_string):
"""
Parse any time string. Use a custom timezone matching if
the original matching does not pull one out.
"""
try:
date = dateparser.parse(date_string)
if date.tzinfo is None:
date = dateparser.parse(date_string, tzinfos=cls.tzd)
return date
except Exception:
raise ValueError("Could not parse date string!") | [
"def",
"parse",
"(",
"cls",
",",
"date_string",
")",
":",
"try",
":",
"date",
"=",
"dateparser",
".",
"parse",
"(",
"date_string",
")",
"if",
"date",
".",
"tzinfo",
"is",
"None",
":",
"date",
"=",
"dateparser",
".",
"parse",
"(",
"date_string",
",",
"tzinfos",
"=",
"cls",
".",
"tzd",
")",
"return",
"date",
"except",
"Exception",
":",
"raise",
"ValueError",
"(",
"\"Could not parse date string!\"",
")"
] | Parse any time string. Use a custom timezone matching if
the original matching does not pull one out. | [
"Parse",
"any",
"time",
"string",
".",
"Use",
"a",
"custom",
"timezone",
"matching",
"if",
"the",
"original",
"matching",
"does",
"not",
"pull",
"one",
"out",
"."
] | 908660385029ecd8eccda8ab3a6b20b47b915c77 | https://github.com/ioos/pyoos/blob/908660385029ecd8eccda8ab3a6b20b47b915c77/pyoos/utils/asatime.py#L54-L65 | train |
developmentseed/sentinel-s3 | sentinel_s3/converter.py | epsg_code | def epsg_code(geojson):
""" get the espg code from the crs system """
if isinstance(geojson, dict):
if 'crs' in geojson:
urn = geojson['crs']['properties']['name'].split(':')
if 'EPSG' in urn:
try:
return int(urn[-1])
except (TypeError, ValueError):
return None
return None | python | def epsg_code(geojson):
""" get the espg code from the crs system """
if isinstance(geojson, dict):
if 'crs' in geojson:
urn = geojson['crs']['properties']['name'].split(':')
if 'EPSG' in urn:
try:
return int(urn[-1])
except (TypeError, ValueError):
return None
return None | [
"def",
"epsg_code",
"(",
"geojson",
")",
":",
"if",
"isinstance",
"(",
"geojson",
",",
"dict",
")",
":",
"if",
"'crs'",
"in",
"geojson",
":",
"urn",
"=",
"geojson",
"[",
"'crs'",
"]",
"[",
"'properties'",
"]",
"[",
"'name'",
"]",
".",
"split",
"(",
"':'",
")",
"if",
"'EPSG'",
"in",
"urn",
":",
"try",
":",
"return",
"int",
"(",
"urn",
"[",
"-",
"1",
"]",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"return",
"None",
"return",
"None"
] | get the espg code from the crs system | [
"get",
"the",
"espg",
"code",
"from",
"the",
"crs",
"system"
] | 02bf2f9cb6aff527e492b39518a54f0b4613ddda | https://github.com/developmentseed/sentinel-s3/blob/02bf2f9cb6aff527e492b39518a54f0b4613ddda/sentinel_s3/converter.py#L24-L36 | train |
developmentseed/sentinel-s3 | sentinel_s3/converter.py | convert_coordinates | def convert_coordinates(coords, origin, wgs84, wrapped):
""" Convert coordinates from one crs to another """
if isinstance(coords, list) or isinstance(coords, tuple):
try:
if isinstance(coords[0], list) or isinstance(coords[0], tuple):
return [convert_coordinates(list(c), origin, wgs84, wrapped) for c in coords]
elif isinstance(coords[0], float):
c = list(transform(origin, wgs84, *coords))
if wrapped and c[0] < -170:
c[0] = c[0] + 360
return c
except IndexError:
pass
return None | python | def convert_coordinates(coords, origin, wgs84, wrapped):
""" Convert coordinates from one crs to another """
if isinstance(coords, list) or isinstance(coords, tuple):
try:
if isinstance(coords[0], list) or isinstance(coords[0], tuple):
return [convert_coordinates(list(c), origin, wgs84, wrapped) for c in coords]
elif isinstance(coords[0], float):
c = list(transform(origin, wgs84, *coords))
if wrapped and c[0] < -170:
c[0] = c[0] + 360
return c
except IndexError:
pass
return None | [
"def",
"convert_coordinates",
"(",
"coords",
",",
"origin",
",",
"wgs84",
",",
"wrapped",
")",
":",
"if",
"isinstance",
"(",
"coords",
",",
"list",
")",
"or",
"isinstance",
"(",
"coords",
",",
"tuple",
")",
":",
"try",
":",
"if",
"isinstance",
"(",
"coords",
"[",
"0",
"]",
",",
"list",
")",
"or",
"isinstance",
"(",
"coords",
"[",
"0",
"]",
",",
"tuple",
")",
":",
"return",
"[",
"convert_coordinates",
"(",
"list",
"(",
"c",
")",
",",
"origin",
",",
"wgs84",
",",
"wrapped",
")",
"for",
"c",
"in",
"coords",
"]",
"elif",
"isinstance",
"(",
"coords",
"[",
"0",
"]",
",",
"float",
")",
":",
"c",
"=",
"list",
"(",
"transform",
"(",
"origin",
",",
"wgs84",
",",
"*",
"coords",
")",
")",
"if",
"wrapped",
"and",
"c",
"[",
"0",
"]",
"<",
"-",
"170",
":",
"c",
"[",
"0",
"]",
"=",
"c",
"[",
"0",
"]",
"+",
"360",
"return",
"c",
"except",
"IndexError",
":",
"pass",
"return",
"None"
] | Convert coordinates from one crs to another | [
"Convert",
"coordinates",
"from",
"one",
"crs",
"to",
"another"
] | 02bf2f9cb6aff527e492b39518a54f0b4613ddda | https://github.com/developmentseed/sentinel-s3/blob/02bf2f9cb6aff527e492b39518a54f0b4613ddda/sentinel_s3/converter.py#L56-L71 | train |
developmentseed/sentinel-s3 | sentinel_s3/converter.py | to_latlon | def to_latlon(geojson, origin_espg=None):
"""
Convert a given geojson to wgs84. The original epsg must be included insde the crs
tag of geojson
"""
if isinstance(geojson, dict):
# get epsg code:
if origin_espg:
code = origin_espg
else:
code = epsg_code(geojson)
if code:
origin = Proj(init='epsg:%s' % code)
wgs84 = Proj(init='epsg:4326')
wrapped = test_wrap_coordinates(geojson['coordinates'], origin, wgs84)
new_coords = convert_coordinates(geojson['coordinates'], origin, wgs84, wrapped)
if new_coords:
geojson['coordinates'] = new_coords
try:
del geojson['crs']
except KeyError:
pass
return geojson | python | def to_latlon(geojson, origin_espg=None):
"""
Convert a given geojson to wgs84. The original epsg must be included insde the crs
tag of geojson
"""
if isinstance(geojson, dict):
# get epsg code:
if origin_espg:
code = origin_espg
else:
code = epsg_code(geojson)
if code:
origin = Proj(init='epsg:%s' % code)
wgs84 = Proj(init='epsg:4326')
wrapped = test_wrap_coordinates(geojson['coordinates'], origin, wgs84)
new_coords = convert_coordinates(geojson['coordinates'], origin, wgs84, wrapped)
if new_coords:
geojson['coordinates'] = new_coords
try:
del geojson['crs']
except KeyError:
pass
return geojson | [
"def",
"to_latlon",
"(",
"geojson",
",",
"origin_espg",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"geojson",
",",
"dict",
")",
":",
"# get epsg code:",
"if",
"origin_espg",
":",
"code",
"=",
"origin_espg",
"else",
":",
"code",
"=",
"epsg_code",
"(",
"geojson",
")",
"if",
"code",
":",
"origin",
"=",
"Proj",
"(",
"init",
"=",
"'epsg:%s'",
"%",
"code",
")",
"wgs84",
"=",
"Proj",
"(",
"init",
"=",
"'epsg:4326'",
")",
"wrapped",
"=",
"test_wrap_coordinates",
"(",
"geojson",
"[",
"'coordinates'",
"]",
",",
"origin",
",",
"wgs84",
")",
"new_coords",
"=",
"convert_coordinates",
"(",
"geojson",
"[",
"'coordinates'",
"]",
",",
"origin",
",",
"wgs84",
",",
"wrapped",
")",
"if",
"new_coords",
":",
"geojson",
"[",
"'coordinates'",
"]",
"=",
"new_coords",
"try",
":",
"del",
"geojson",
"[",
"'crs'",
"]",
"except",
"KeyError",
":",
"pass",
"return",
"geojson"
] | Convert a given geojson to wgs84. The original epsg must be included insde the crs
tag of geojson | [
"Convert",
"a",
"given",
"geojson",
"to",
"wgs84",
".",
"The",
"original",
"epsg",
"must",
"be",
"included",
"insde",
"the",
"crs",
"tag",
"of",
"geojson"
] | 02bf2f9cb6aff527e492b39518a54f0b4613ddda | https://github.com/developmentseed/sentinel-s3/blob/02bf2f9cb6aff527e492b39518a54f0b4613ddda/sentinel_s3/converter.py#L74-L99 | train |
developmentseed/sentinel-s3 | sentinel_s3/converter.py | camelcase_underscore | def camelcase_underscore(name):
""" Convert camelcase names to underscore """
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() | python | def camelcase_underscore(name):
""" Convert camelcase names to underscore """
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() | [
"def",
"camelcase_underscore",
"(",
"name",
")",
":",
"s1",
"=",
"re",
".",
"sub",
"(",
"'(.)([A-Z][a-z]+)'",
",",
"r'\\1_\\2'",
",",
"name",
")",
"return",
"re",
".",
"sub",
"(",
"'([a-z0-9])([A-Z])'",
",",
"r'\\1_\\2'",
",",
"s1",
")",
".",
"lower",
"(",
")"
] | Convert camelcase names to underscore | [
"Convert",
"camelcase",
"names",
"to",
"underscore"
] | 02bf2f9cb6aff527e492b39518a54f0b4613ddda | https://github.com/developmentseed/sentinel-s3/blob/02bf2f9cb6aff527e492b39518a54f0b4613ddda/sentinel_s3/converter.py#L102-L105 | train |
developmentseed/sentinel-s3 | sentinel_s3/converter.py | get_tiles_list | def get_tiles_list(element):
"""
Returns the list of all tile names from Product_Organisation element
in metadata.xml
"""
tiles = {}
for el in element:
g = (el.findall('.//Granules') or el.findall('.//Granule'))[0]
name = g.attrib['granuleIdentifier']
name_parts = name.split('_')
mgs = name_parts[-2]
tiles[mgs] = name
return tiles | python | def get_tiles_list(element):
"""
Returns the list of all tile names from Product_Organisation element
in metadata.xml
"""
tiles = {}
for el in element:
g = (el.findall('.//Granules') or el.findall('.//Granule'))[0]
name = g.attrib['granuleIdentifier']
name_parts = name.split('_')
mgs = name_parts[-2]
tiles[mgs] = name
return tiles | [
"def",
"get_tiles_list",
"(",
"element",
")",
":",
"tiles",
"=",
"{",
"}",
"for",
"el",
"in",
"element",
":",
"g",
"=",
"(",
"el",
".",
"findall",
"(",
"'.//Granules'",
")",
"or",
"el",
".",
"findall",
"(",
"'.//Granule'",
")",
")",
"[",
"0",
"]",
"name",
"=",
"g",
".",
"attrib",
"[",
"'granuleIdentifier'",
"]",
"name_parts",
"=",
"name",
".",
"split",
"(",
"'_'",
")",
"mgs",
"=",
"name_parts",
"[",
"-",
"2",
"]",
"tiles",
"[",
"mgs",
"]",
"=",
"name",
"return",
"tiles"
] | Returns the list of all tile names from Product_Organisation element
in metadata.xml | [
"Returns",
"the",
"list",
"of",
"all",
"tile",
"names",
"from",
"Product_Organisation",
"element",
"in",
"metadata",
".",
"xml"
] | 02bf2f9cb6aff527e492b39518a54f0b4613ddda | https://github.com/developmentseed/sentinel-s3/blob/02bf2f9cb6aff527e492b39518a54f0b4613ddda/sentinel_s3/converter.py#L108-L124 | train |
developmentseed/sentinel-s3 | sentinel_s3/converter.py | metadata_to_dict | def metadata_to_dict(metadata):
""" Looks at metadata.xml file of sentinel product and extract useful keys
Returns a python dict """
tree = etree.parse(metadata)
root = tree.getroot()
meta = OrderedDict()
keys = [
'SPACECRAFT_NAME',
'PRODUCT_STOP_TIME',
'Cloud_Coverage_Assessment',
'PROCESSING_LEVEL',
'PRODUCT_TYPE',
'PROCESSING_BASELINE',
'SENSING_ORBIT_NUMBER',
'SENSING_ORBIT_DIRECTION',
'PRODUCT_FORMAT',
]
# grab important keys from the file
for key in keys:
try:
meta[key.lower()] = root.findall('.//' + key)[0].text
except IndexError:
meta[key.lower()] = None
meta['product_cloud_coverage_assessment'] = float(meta.pop('cloud_coverage_assessment'))
meta['sensing_orbit_number'] = int(meta['sensing_orbit_number'])
# get tile list
meta['tiles'] = get_tiles_list(root.findall('.//Product_Organisation')[0])
# get available bands
if root.findall('.//Band_List'):
bands = root.findall('.//Band_List')[0]
meta['band_list'] = []
for b in bands:
band = b.text.replace('B', '')
if len(band) == 1:
band = 'B' + pad(band, 2)
else:
band = b.text
meta['band_list'].append(band)
else:
bands = root.findall('.//Spectral_Information_List')[0]
meta['band_list'] = []
for b in bands:
band = b.attrib['physicalBand'].replace('B', '')
if len(band) == 1:
band = 'B' + pad(band, 2)
else:
band = b.attrib['physicalBand']
meta['band_list'].append(band)
return meta | python | def metadata_to_dict(metadata):
""" Looks at metadata.xml file of sentinel product and extract useful keys
Returns a python dict """
tree = etree.parse(metadata)
root = tree.getroot()
meta = OrderedDict()
keys = [
'SPACECRAFT_NAME',
'PRODUCT_STOP_TIME',
'Cloud_Coverage_Assessment',
'PROCESSING_LEVEL',
'PRODUCT_TYPE',
'PROCESSING_BASELINE',
'SENSING_ORBIT_NUMBER',
'SENSING_ORBIT_DIRECTION',
'PRODUCT_FORMAT',
]
# grab important keys from the file
for key in keys:
try:
meta[key.lower()] = root.findall('.//' + key)[0].text
except IndexError:
meta[key.lower()] = None
meta['product_cloud_coverage_assessment'] = float(meta.pop('cloud_coverage_assessment'))
meta['sensing_orbit_number'] = int(meta['sensing_orbit_number'])
# get tile list
meta['tiles'] = get_tiles_list(root.findall('.//Product_Organisation')[0])
# get available bands
if root.findall('.//Band_List'):
bands = root.findall('.//Band_List')[0]
meta['band_list'] = []
for b in bands:
band = b.text.replace('B', '')
if len(band) == 1:
band = 'B' + pad(band, 2)
else:
band = b.text
meta['band_list'].append(band)
else:
bands = root.findall('.//Spectral_Information_List')[0]
meta['band_list'] = []
for b in bands:
band = b.attrib['physicalBand'].replace('B', '')
if len(band) == 1:
band = 'B' + pad(band, 2)
else:
band = b.attrib['physicalBand']
meta['band_list'].append(band)
return meta | [
"def",
"metadata_to_dict",
"(",
"metadata",
")",
":",
"tree",
"=",
"etree",
".",
"parse",
"(",
"metadata",
")",
"root",
"=",
"tree",
".",
"getroot",
"(",
")",
"meta",
"=",
"OrderedDict",
"(",
")",
"keys",
"=",
"[",
"'SPACECRAFT_NAME'",
",",
"'PRODUCT_STOP_TIME'",
",",
"'Cloud_Coverage_Assessment'",
",",
"'PROCESSING_LEVEL'",
",",
"'PRODUCT_TYPE'",
",",
"'PROCESSING_BASELINE'",
",",
"'SENSING_ORBIT_NUMBER'",
",",
"'SENSING_ORBIT_DIRECTION'",
",",
"'PRODUCT_FORMAT'",
",",
"]",
"# grab important keys from the file",
"for",
"key",
"in",
"keys",
":",
"try",
":",
"meta",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"root",
".",
"findall",
"(",
"'.//'",
"+",
"key",
")",
"[",
"0",
"]",
".",
"text",
"except",
"IndexError",
":",
"meta",
"[",
"key",
".",
"lower",
"(",
")",
"]",
"=",
"None",
"meta",
"[",
"'product_cloud_coverage_assessment'",
"]",
"=",
"float",
"(",
"meta",
".",
"pop",
"(",
"'cloud_coverage_assessment'",
")",
")",
"meta",
"[",
"'sensing_orbit_number'",
"]",
"=",
"int",
"(",
"meta",
"[",
"'sensing_orbit_number'",
"]",
")",
"# get tile list",
"meta",
"[",
"'tiles'",
"]",
"=",
"get_tiles_list",
"(",
"root",
".",
"findall",
"(",
"'.//Product_Organisation'",
")",
"[",
"0",
"]",
")",
"# get available bands",
"if",
"root",
".",
"findall",
"(",
"'.//Band_List'",
")",
":",
"bands",
"=",
"root",
".",
"findall",
"(",
"'.//Band_List'",
")",
"[",
"0",
"]",
"meta",
"[",
"'band_list'",
"]",
"=",
"[",
"]",
"for",
"b",
"in",
"bands",
":",
"band",
"=",
"b",
".",
"text",
".",
"replace",
"(",
"'B'",
",",
"''",
")",
"if",
"len",
"(",
"band",
")",
"==",
"1",
":",
"band",
"=",
"'B'",
"+",
"pad",
"(",
"band",
",",
"2",
")",
"else",
":",
"band",
"=",
"b",
".",
"text",
"meta",
"[",
"'band_list'",
"]",
".",
"append",
"(",
"band",
")",
"else",
":",
"bands",
"=",
"root",
".",
"findall",
"(",
"'.//Spectral_Information_List'",
")",
"[",
"0",
"]",
"meta",
"[",
"'band_list'",
"]",
"=",
"[",
"]",
"for",
"b",
"in",
"bands",
":",
"band",
"=",
"b",
".",
"attrib",
"[",
"'physicalBand'",
"]",
".",
"replace",
"(",
"'B'",
",",
"''",
")",
"if",
"len",
"(",
"band",
")",
"==",
"1",
":",
"band",
"=",
"'B'",
"+",
"pad",
"(",
"band",
",",
"2",
")",
"else",
":",
"band",
"=",
"b",
".",
"attrib",
"[",
"'physicalBand'",
"]",
"meta",
"[",
"'band_list'",
"]",
".",
"append",
"(",
"band",
")",
"return",
"meta"
] | Looks at metadata.xml file of sentinel product and extract useful keys
Returns a python dict | [
"Looks",
"at",
"metadata",
".",
"xml",
"file",
"of",
"sentinel",
"product",
"and",
"extract",
"useful",
"keys",
"Returns",
"a",
"python",
"dict"
] | 02bf2f9cb6aff527e492b39518a54f0b4613ddda | https://github.com/developmentseed/sentinel-s3/blob/02bf2f9cb6aff527e492b39518a54f0b4613ddda/sentinel_s3/converter.py#L127-L184 | train |
developmentseed/sentinel-s3 | sentinel_s3/converter.py | get_tile_geometry | def get_tile_geometry(path, origin_espg, tolerance=500):
""" Calculate the data and tile geometry for sentinel-2 tiles """
with rasterio.open(path) as src:
# Get tile geometry
b = src.bounds
tile_shape = Polygon([(b[0], b[1]), (b[2], b[1]), (b[2], b[3]), (b[0], b[3]), (b[0], b[1])])
tile_geojson = mapping(tile_shape)
# read first band of the image
image = src.read(1)
# create a mask of zero values
mask = image == 0.
# generate shapes of the mask
novalue_shape = shapes(image, mask=mask, transform=src.affine)
# generate polygons using shapely
novalue_shape = [Polygon(s['coordinates'][0]) for (s, v) in novalue_shape]
if novalue_shape:
# Make sure polygons are united
# also simplify the resulting polygon
union = cascaded_union(novalue_shape)
# generates a geojson
data_shape = tile_shape.difference(union)
# If there are multipolygons, select the largest one
if data_shape.geom_type == 'MultiPolygon':
areas = {p.area: i for i, p in enumerate(data_shape)}
largest = max(areas.keys())
data_shape = data_shape[areas[largest]]
# if the polygon has interior rings, remove them
if list(data_shape.interiors):
data_shape = Polygon(data_shape.exterior.coords)
data_shape = data_shape.simplify(tolerance, preserve_topology=False)
data_geojson = mapping(data_shape)
else:
data_geojson = tile_geojson
# convert cooridnates to degrees
return (to_latlon(tile_geojson, origin_espg), to_latlon(data_geojson, origin_espg)) | python | def get_tile_geometry(path, origin_espg, tolerance=500):
""" Calculate the data and tile geometry for sentinel-2 tiles """
with rasterio.open(path) as src:
# Get tile geometry
b = src.bounds
tile_shape = Polygon([(b[0], b[1]), (b[2], b[1]), (b[2], b[3]), (b[0], b[3]), (b[0], b[1])])
tile_geojson = mapping(tile_shape)
# read first band of the image
image = src.read(1)
# create a mask of zero values
mask = image == 0.
# generate shapes of the mask
novalue_shape = shapes(image, mask=mask, transform=src.affine)
# generate polygons using shapely
novalue_shape = [Polygon(s['coordinates'][0]) for (s, v) in novalue_shape]
if novalue_shape:
# Make sure polygons are united
# also simplify the resulting polygon
union = cascaded_union(novalue_shape)
# generates a geojson
data_shape = tile_shape.difference(union)
# If there are multipolygons, select the largest one
if data_shape.geom_type == 'MultiPolygon':
areas = {p.area: i for i, p in enumerate(data_shape)}
largest = max(areas.keys())
data_shape = data_shape[areas[largest]]
# if the polygon has interior rings, remove them
if list(data_shape.interiors):
data_shape = Polygon(data_shape.exterior.coords)
data_shape = data_shape.simplify(tolerance, preserve_topology=False)
data_geojson = mapping(data_shape)
else:
data_geojson = tile_geojson
# convert cooridnates to degrees
return (to_latlon(tile_geojson, origin_espg), to_latlon(data_geojson, origin_espg)) | [
"def",
"get_tile_geometry",
"(",
"path",
",",
"origin_espg",
",",
"tolerance",
"=",
"500",
")",
":",
"with",
"rasterio",
".",
"open",
"(",
"path",
")",
"as",
"src",
":",
"# Get tile geometry",
"b",
"=",
"src",
".",
"bounds",
"tile_shape",
"=",
"Polygon",
"(",
"[",
"(",
"b",
"[",
"0",
"]",
",",
"b",
"[",
"1",
"]",
")",
",",
"(",
"b",
"[",
"2",
"]",
",",
"b",
"[",
"1",
"]",
")",
",",
"(",
"b",
"[",
"2",
"]",
",",
"b",
"[",
"3",
"]",
")",
",",
"(",
"b",
"[",
"0",
"]",
",",
"b",
"[",
"3",
"]",
")",
",",
"(",
"b",
"[",
"0",
"]",
",",
"b",
"[",
"1",
"]",
")",
"]",
")",
"tile_geojson",
"=",
"mapping",
"(",
"tile_shape",
")",
"# read first band of the image",
"image",
"=",
"src",
".",
"read",
"(",
"1",
")",
"# create a mask of zero values",
"mask",
"=",
"image",
"==",
"0.",
"# generate shapes of the mask",
"novalue_shape",
"=",
"shapes",
"(",
"image",
",",
"mask",
"=",
"mask",
",",
"transform",
"=",
"src",
".",
"affine",
")",
"# generate polygons using shapely",
"novalue_shape",
"=",
"[",
"Polygon",
"(",
"s",
"[",
"'coordinates'",
"]",
"[",
"0",
"]",
")",
"for",
"(",
"s",
",",
"v",
")",
"in",
"novalue_shape",
"]",
"if",
"novalue_shape",
":",
"# Make sure polygons are united",
"# also simplify the resulting polygon",
"union",
"=",
"cascaded_union",
"(",
"novalue_shape",
")",
"# generates a geojson",
"data_shape",
"=",
"tile_shape",
".",
"difference",
"(",
"union",
")",
"# If there are multipolygons, select the largest one",
"if",
"data_shape",
".",
"geom_type",
"==",
"'MultiPolygon'",
":",
"areas",
"=",
"{",
"p",
".",
"area",
":",
"i",
"for",
"i",
",",
"p",
"in",
"enumerate",
"(",
"data_shape",
")",
"}",
"largest",
"=",
"max",
"(",
"areas",
".",
"keys",
"(",
")",
")",
"data_shape",
"=",
"data_shape",
"[",
"areas",
"[",
"largest",
"]",
"]",
"# if the polygon has interior rings, remove them",
"if",
"list",
"(",
"data_shape",
".",
"interiors",
")",
":",
"data_shape",
"=",
"Polygon",
"(",
"data_shape",
".",
"exterior",
".",
"coords",
")",
"data_shape",
"=",
"data_shape",
".",
"simplify",
"(",
"tolerance",
",",
"preserve_topology",
"=",
"False",
")",
"data_geojson",
"=",
"mapping",
"(",
"data_shape",
")",
"else",
":",
"data_geojson",
"=",
"tile_geojson",
"# convert cooridnates to degrees",
"return",
"(",
"to_latlon",
"(",
"tile_geojson",
",",
"origin_espg",
")",
",",
"to_latlon",
"(",
"data_geojson",
",",
"origin_espg",
")",
")"
] | Calculate the data and tile geometry for sentinel-2 tiles | [
"Calculate",
"the",
"data",
"and",
"tile",
"geometry",
"for",
"sentinel",
"-",
"2",
"tiles"
] | 02bf2f9cb6aff527e492b39518a54f0b4613ddda | https://github.com/developmentseed/sentinel-s3/blob/02bf2f9cb6aff527e492b39518a54f0b4613ddda/sentinel_s3/converter.py#L187-L235 | train |
developmentseed/sentinel-s3 | sentinel_s3/converter.py | tile_metadata | def tile_metadata(tile, product, geometry_check=None):
""" Generate metadata for a given tile
- geometry_check is a function the determines whether to calculate the geometry by downloading
B01 and override provided geometry in tilejson. The meta object is passed to this function.
The function return a True or False response.
"""
grid = 'T{0}{1}{2}'.format(pad(tile['utmZone'], 2), tile['latitudeBand'], tile['gridSquare'])
meta = OrderedDict({
'tile_name': product['tiles'][grid]
})
logger.info('%s Processing tile %s' % (threading.current_thread().name, tile['path']))
meta['date'] = tile['timestamp'].split('T')[0]
meta['thumbnail'] = '{1}/{0}/preview.jp2'.format(tile['path'], s3_url)
# remove unnecessary keys
product.pop('tiles')
tile.pop('datastrip')
bands = product.pop('band_list')
for k, v in iteritems(tile):
meta[camelcase_underscore(k)] = v
meta.update(product)
# construct download links
links = ['{2}/{0}/{1}.jp2'.format(meta['path'], b, s3_url) for b in bands]
meta['download_links'] = {
'aws_s3': links
}
meta['original_tile_meta'] = '{0}/{1}/tileInfo.json'.format(s3_url, meta['path'])
def internal_latlon(meta):
keys = ['tile_origin', 'tile_geometry', 'tile_data_geometry']
for key in keys:
if key in meta:
meta[key] = to_latlon(meta[key])
return meta
# change coordinates to wsg4 degrees
if geometry_check:
if geometry_check(meta):
meta = get_tile_geometry_from_s3(meta)
else:
meta = internal_latlon(meta)
else:
meta = internal_latlon(meta)
# rename path key to aws_path
meta['aws_path'] = meta.pop('path')
return meta | python | def tile_metadata(tile, product, geometry_check=None):
""" Generate metadata for a given tile
- geometry_check is a function the determines whether to calculate the geometry by downloading
B01 and override provided geometry in tilejson. The meta object is passed to this function.
The function return a True or False response.
"""
grid = 'T{0}{1}{2}'.format(pad(tile['utmZone'], 2), tile['latitudeBand'], tile['gridSquare'])
meta = OrderedDict({
'tile_name': product['tiles'][grid]
})
logger.info('%s Processing tile %s' % (threading.current_thread().name, tile['path']))
meta['date'] = tile['timestamp'].split('T')[0]
meta['thumbnail'] = '{1}/{0}/preview.jp2'.format(tile['path'], s3_url)
# remove unnecessary keys
product.pop('tiles')
tile.pop('datastrip')
bands = product.pop('band_list')
for k, v in iteritems(tile):
meta[camelcase_underscore(k)] = v
meta.update(product)
# construct download links
links = ['{2}/{0}/{1}.jp2'.format(meta['path'], b, s3_url) for b in bands]
meta['download_links'] = {
'aws_s3': links
}
meta['original_tile_meta'] = '{0}/{1}/tileInfo.json'.format(s3_url, meta['path'])
def internal_latlon(meta):
keys = ['tile_origin', 'tile_geometry', 'tile_data_geometry']
for key in keys:
if key in meta:
meta[key] = to_latlon(meta[key])
return meta
# change coordinates to wsg4 degrees
if geometry_check:
if geometry_check(meta):
meta = get_tile_geometry_from_s3(meta)
else:
meta = internal_latlon(meta)
else:
meta = internal_latlon(meta)
# rename path key to aws_path
meta['aws_path'] = meta.pop('path')
return meta | [
"def",
"tile_metadata",
"(",
"tile",
",",
"product",
",",
"geometry_check",
"=",
"None",
")",
":",
"grid",
"=",
"'T{0}{1}{2}'",
".",
"format",
"(",
"pad",
"(",
"tile",
"[",
"'utmZone'",
"]",
",",
"2",
")",
",",
"tile",
"[",
"'latitudeBand'",
"]",
",",
"tile",
"[",
"'gridSquare'",
"]",
")",
"meta",
"=",
"OrderedDict",
"(",
"{",
"'tile_name'",
":",
"product",
"[",
"'tiles'",
"]",
"[",
"grid",
"]",
"}",
")",
"logger",
".",
"info",
"(",
"'%s Processing tile %s'",
"%",
"(",
"threading",
".",
"current_thread",
"(",
")",
".",
"name",
",",
"tile",
"[",
"'path'",
"]",
")",
")",
"meta",
"[",
"'date'",
"]",
"=",
"tile",
"[",
"'timestamp'",
"]",
".",
"split",
"(",
"'T'",
")",
"[",
"0",
"]",
"meta",
"[",
"'thumbnail'",
"]",
"=",
"'{1}/{0}/preview.jp2'",
".",
"format",
"(",
"tile",
"[",
"'path'",
"]",
",",
"s3_url",
")",
"# remove unnecessary keys",
"product",
".",
"pop",
"(",
"'tiles'",
")",
"tile",
".",
"pop",
"(",
"'datastrip'",
")",
"bands",
"=",
"product",
".",
"pop",
"(",
"'band_list'",
")",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"tile",
")",
":",
"meta",
"[",
"camelcase_underscore",
"(",
"k",
")",
"]",
"=",
"v",
"meta",
".",
"update",
"(",
"product",
")",
"# construct download links",
"links",
"=",
"[",
"'{2}/{0}/{1}.jp2'",
".",
"format",
"(",
"meta",
"[",
"'path'",
"]",
",",
"b",
",",
"s3_url",
")",
"for",
"b",
"in",
"bands",
"]",
"meta",
"[",
"'download_links'",
"]",
"=",
"{",
"'aws_s3'",
":",
"links",
"}",
"meta",
"[",
"'original_tile_meta'",
"]",
"=",
"'{0}/{1}/tileInfo.json'",
".",
"format",
"(",
"s3_url",
",",
"meta",
"[",
"'path'",
"]",
")",
"def",
"internal_latlon",
"(",
"meta",
")",
":",
"keys",
"=",
"[",
"'tile_origin'",
",",
"'tile_geometry'",
",",
"'tile_data_geometry'",
"]",
"for",
"key",
"in",
"keys",
":",
"if",
"key",
"in",
"meta",
":",
"meta",
"[",
"key",
"]",
"=",
"to_latlon",
"(",
"meta",
"[",
"key",
"]",
")",
"return",
"meta",
"# change coordinates to wsg4 degrees",
"if",
"geometry_check",
":",
"if",
"geometry_check",
"(",
"meta",
")",
":",
"meta",
"=",
"get_tile_geometry_from_s3",
"(",
"meta",
")",
"else",
":",
"meta",
"=",
"internal_latlon",
"(",
"meta",
")",
"else",
":",
"meta",
"=",
"internal_latlon",
"(",
"meta",
")",
"# rename path key to aws_path",
"meta",
"[",
"'aws_path'",
"]",
"=",
"meta",
".",
"pop",
"(",
"'path'",
")",
"return",
"meta"
] | Generate metadata for a given tile
- geometry_check is a function the determines whether to calculate the geometry by downloading
B01 and override provided geometry in tilejson. The meta object is passed to this function.
The function return a True or False response. | [
"Generate",
"metadata",
"for",
"a",
"given",
"tile"
] | 02bf2f9cb6aff527e492b39518a54f0b4613ddda | https://github.com/developmentseed/sentinel-s3/blob/02bf2f9cb6aff527e492b39518a54f0b4613ddda/sentinel_s3/converter.py#L266-L324 | train |
vslutov/turingmarkov | turingmarkov/__main__.py | load_markov | def load_markov(argv, stdin):
"""Load and return markov algorithm."""
if len(argv) > 3:
with open(argv[3]) as input_file:
return Algorithm(input_file.readlines())
else:
return Algorithm(stdin.readlines()) | python | def load_markov(argv, stdin):
"""Load and return markov algorithm."""
if len(argv) > 3:
with open(argv[3]) as input_file:
return Algorithm(input_file.readlines())
else:
return Algorithm(stdin.readlines()) | [
"def",
"load_markov",
"(",
"argv",
",",
"stdin",
")",
":",
"if",
"len",
"(",
"argv",
")",
">",
"3",
":",
"with",
"open",
"(",
"argv",
"[",
"3",
"]",
")",
"as",
"input_file",
":",
"return",
"Algorithm",
"(",
"input_file",
".",
"readlines",
"(",
")",
")",
"else",
":",
"return",
"Algorithm",
"(",
"stdin",
".",
"readlines",
"(",
")",
")"
] | Load and return markov algorithm. | [
"Load",
"and",
"return",
"markov",
"algorithm",
"."
] | 63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce | https://github.com/vslutov/turingmarkov/blob/63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce/turingmarkov/__main__.py#L21-L27 | train |
vslutov/turingmarkov | turingmarkov/__main__.py | load_turing | def load_turing(argv, stdin):
"""Load and return turing machine."""
if len(argv) > 3:
with open(argv[3]) as input_file:
return build_machine(input_file.readlines())
else:
return build_machine(stdin.readlines()) | python | def load_turing(argv, stdin):
"""Load and return turing machine."""
if len(argv) > 3:
with open(argv[3]) as input_file:
return build_machine(input_file.readlines())
else:
return build_machine(stdin.readlines()) | [
"def",
"load_turing",
"(",
"argv",
",",
"stdin",
")",
":",
"if",
"len",
"(",
"argv",
")",
">",
"3",
":",
"with",
"open",
"(",
"argv",
"[",
"3",
"]",
")",
"as",
"input_file",
":",
"return",
"build_machine",
"(",
"input_file",
".",
"readlines",
"(",
")",
")",
"else",
":",
"return",
"build_machine",
"(",
"stdin",
".",
"readlines",
"(",
")",
")"
] | Load and return turing machine. | [
"Load",
"and",
"return",
"turing",
"machine",
"."
] | 63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce | https://github.com/vslutov/turingmarkov/blob/63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce/turingmarkov/__main__.py#L29-L35 | train |
vslutov/turingmarkov | turingmarkov/__main__.py | main | def main(argv, stdin, stdout):
"""Execute, when user call turingmarkov."""
if len(argv) > 1 and argv[1:3] == ["compile", "markov"]:
algo = load_markov(argv, stdin)
print(algo.compile(), file=stdout)
elif len(argv) == 4 and argv[1:3] == ["run", "markov"]:
algo = load_markov(argv, stdin)
for line in stdin:
print(algo.execute(''.join(line.split())), file=stdout)
elif len(argv) > 1 and argv[1:3] == ["compile", "turing"]:
machine = load_turing(argv, stdin)
print(machine.compile(), file=stdout)
elif len(argv) == 4 and argv[1:3] == ["run", "turing"]:
machine = load_turing(argv, stdin)
for line in stdin:
print(machine.execute(line), file=stdout)
elif len(argv) == 2 and argv[1] == "test":
path = os.path.abspath(os.path.dirname(__file__))
argv[1] = path
pytest.main()
elif len(argv) == 2 and argv[1] == "version":
print("TuringMarkov", VERSION, file=stdout)
else:
print(USAGE, file=stdout)
if not (len(argv) == 2 and argv[1] == "help"):
exit(1) | python | def main(argv, stdin, stdout):
"""Execute, when user call turingmarkov."""
if len(argv) > 1 and argv[1:3] == ["compile", "markov"]:
algo = load_markov(argv, stdin)
print(algo.compile(), file=stdout)
elif len(argv) == 4 and argv[1:3] == ["run", "markov"]:
algo = load_markov(argv, stdin)
for line in stdin:
print(algo.execute(''.join(line.split())), file=stdout)
elif len(argv) > 1 and argv[1:3] == ["compile", "turing"]:
machine = load_turing(argv, stdin)
print(machine.compile(), file=stdout)
elif len(argv) == 4 and argv[1:3] == ["run", "turing"]:
machine = load_turing(argv, stdin)
for line in stdin:
print(machine.execute(line), file=stdout)
elif len(argv) == 2 and argv[1] == "test":
path = os.path.abspath(os.path.dirname(__file__))
argv[1] = path
pytest.main()
elif len(argv) == 2 and argv[1] == "version":
print("TuringMarkov", VERSION, file=stdout)
else:
print(USAGE, file=stdout)
if not (len(argv) == 2 and argv[1] == "help"):
exit(1) | [
"def",
"main",
"(",
"argv",
",",
"stdin",
",",
"stdout",
")",
":",
"if",
"len",
"(",
"argv",
")",
">",
"1",
"and",
"argv",
"[",
"1",
":",
"3",
"]",
"==",
"[",
"\"compile\"",
",",
"\"markov\"",
"]",
":",
"algo",
"=",
"load_markov",
"(",
"argv",
",",
"stdin",
")",
"print",
"(",
"algo",
".",
"compile",
"(",
")",
",",
"file",
"=",
"stdout",
")",
"elif",
"len",
"(",
"argv",
")",
"==",
"4",
"and",
"argv",
"[",
"1",
":",
"3",
"]",
"==",
"[",
"\"run\"",
",",
"\"markov\"",
"]",
":",
"algo",
"=",
"load_markov",
"(",
"argv",
",",
"stdin",
")",
"for",
"line",
"in",
"stdin",
":",
"print",
"(",
"algo",
".",
"execute",
"(",
"''",
".",
"join",
"(",
"line",
".",
"split",
"(",
")",
")",
")",
",",
"file",
"=",
"stdout",
")",
"elif",
"len",
"(",
"argv",
")",
">",
"1",
"and",
"argv",
"[",
"1",
":",
"3",
"]",
"==",
"[",
"\"compile\"",
",",
"\"turing\"",
"]",
":",
"machine",
"=",
"load_turing",
"(",
"argv",
",",
"stdin",
")",
"print",
"(",
"machine",
".",
"compile",
"(",
")",
",",
"file",
"=",
"stdout",
")",
"elif",
"len",
"(",
"argv",
")",
"==",
"4",
"and",
"argv",
"[",
"1",
":",
"3",
"]",
"==",
"[",
"\"run\"",
",",
"\"turing\"",
"]",
":",
"machine",
"=",
"load_turing",
"(",
"argv",
",",
"stdin",
")",
"for",
"line",
"in",
"stdin",
":",
"print",
"(",
"machine",
".",
"execute",
"(",
"line",
")",
",",
"file",
"=",
"stdout",
")",
"elif",
"len",
"(",
"argv",
")",
"==",
"2",
"and",
"argv",
"[",
"1",
"]",
"==",
"\"test\"",
":",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"argv",
"[",
"1",
"]",
"=",
"path",
"pytest",
".",
"main",
"(",
")",
"elif",
"len",
"(",
"argv",
")",
"==",
"2",
"and",
"argv",
"[",
"1",
"]",
"==",
"\"version\"",
":",
"print",
"(",
"\"TuringMarkov\"",
",",
"VERSION",
",",
"file",
"=",
"stdout",
")",
"else",
":",
"print",
"(",
"USAGE",
",",
"file",
"=",
"stdout",
")",
"if",
"not",
"(",
"len",
"(",
"argv",
")",
"==",
"2",
"and",
"argv",
"[",
"1",
"]",
"==",
"\"help\"",
")",
":",
"exit",
"(",
"1",
")"
] | Execute, when user call turingmarkov. | [
"Execute",
"when",
"user",
"call",
"turingmarkov",
"."
] | 63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce | https://github.com/vslutov/turingmarkov/blob/63e2ba255d7d0d868cbc4bf3e568b1c1bbcf38ce/turingmarkov/__main__.py#L37-L65 | train |
tamasgal/km3pipe | km3pipe/cmd.py | detectors | def detectors(regex=None, sep='\t', temporary=False):
"""Print the detectors table"""
db = DBManager(temporary=temporary)
dt = db.detectors
if regex is not None:
try:
re.compile(regex)
except re.error:
log.error("Invalid regex!")
return
dt = dt[dt['OID'].str.contains(regex) | dt['CITY'].str.contains(regex)]
dt.to_csv(sys.stdout, sep=sep) | python | def detectors(regex=None, sep='\t', temporary=False):
"""Print the detectors table"""
db = DBManager(temporary=temporary)
dt = db.detectors
if regex is not None:
try:
re.compile(regex)
except re.error:
log.error("Invalid regex!")
return
dt = dt[dt['OID'].str.contains(regex) | dt['CITY'].str.contains(regex)]
dt.to_csv(sys.stdout, sep=sep) | [
"def",
"detectors",
"(",
"regex",
"=",
"None",
",",
"sep",
"=",
"'\\t'",
",",
"temporary",
"=",
"False",
")",
":",
"db",
"=",
"DBManager",
"(",
"temporary",
"=",
"temporary",
")",
"dt",
"=",
"db",
".",
"detectors",
"if",
"regex",
"is",
"not",
"None",
":",
"try",
":",
"re",
".",
"compile",
"(",
"regex",
")",
"except",
"re",
".",
"error",
":",
"log",
".",
"error",
"(",
"\"Invalid regex!\"",
")",
"return",
"dt",
"=",
"dt",
"[",
"dt",
"[",
"'OID'",
"]",
".",
"str",
".",
"contains",
"(",
"regex",
")",
"|",
"dt",
"[",
"'CITY'",
"]",
".",
"str",
".",
"contains",
"(",
"regex",
")",
"]",
"dt",
".",
"to_csv",
"(",
"sys",
".",
"stdout",
",",
"sep",
"=",
"sep",
")"
] | Print the detectors table | [
"Print",
"the",
"detectors",
"table"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/cmd.py#L112-L123 | train |
developmentseed/sentinel-s3 | sentinel_s3/crawler.py | get_product_metadata_path | def get_product_metadata_path(product_name):
""" gets a single products metadata """
string_date = product_name.split('_')[-1]
date = datetime.datetime.strptime(string_date, '%Y%m%dT%H%M%S')
path = 'products/{0}/{1}/{2}/{3}'.format(date.year, date.month, date.day, product_name)
return {
product_name: {
'metadata': '{0}/{1}'.format(path, 'metadata.xml'),
'tiles': get_tile_metadata_path('{0}/{1}'.format(path, 'productInfo.json'))
}
} | python | def get_product_metadata_path(product_name):
""" gets a single products metadata """
string_date = product_name.split('_')[-1]
date = datetime.datetime.strptime(string_date, '%Y%m%dT%H%M%S')
path = 'products/{0}/{1}/{2}/{3}'.format(date.year, date.month, date.day, product_name)
return {
product_name: {
'metadata': '{0}/{1}'.format(path, 'metadata.xml'),
'tiles': get_tile_metadata_path('{0}/{1}'.format(path, 'productInfo.json'))
}
} | [
"def",
"get_product_metadata_path",
"(",
"product_name",
")",
":",
"string_date",
"=",
"product_name",
".",
"split",
"(",
"'_'",
")",
"[",
"-",
"1",
"]",
"date",
"=",
"datetime",
".",
"datetime",
".",
"strptime",
"(",
"string_date",
",",
"'%Y%m%dT%H%M%S'",
")",
"path",
"=",
"'products/{0}/{1}/{2}/{3}'",
".",
"format",
"(",
"date",
".",
"year",
",",
"date",
".",
"month",
",",
"date",
".",
"day",
",",
"product_name",
")",
"return",
"{",
"product_name",
":",
"{",
"'metadata'",
":",
"'{0}/{1}'",
".",
"format",
"(",
"path",
",",
"'metadata.xml'",
")",
",",
"'tiles'",
":",
"get_tile_metadata_path",
"(",
"'{0}/{1}'",
".",
"format",
"(",
"path",
",",
"'productInfo.json'",
")",
")",
"}",
"}"
] | gets a single products metadata | [
"gets",
"a",
"single",
"products",
"metadata"
] | 02bf2f9cb6aff527e492b39518a54f0b4613ddda | https://github.com/developmentseed/sentinel-s3/blob/02bf2f9cb6aff527e492b39518a54f0b4613ddda/sentinel_s3/crawler.py#L24-L36 | train |
developmentseed/sentinel-s3 | sentinel_s3/crawler.py | get_products_metadata_path | def get_products_metadata_path(year, month, day):
""" Get paths to multiple products metadata """
products = {}
path = 'products/{0}/{1}/{2}/'.format(year, month, day)
for key in bucket.objects.filter(Prefix=path):
product_path = key.key.replace(path, '').split('/')
name = product_path[0]
if name not in products:
products[name] = {}
if product_path[1] == 'metadata.xml':
products[name]['metadata'] = key.key
if product_path[1] == 'productInfo.json':
products[name]['tiles'] = get_tile_metadata_path(key.key)
return products | python | def get_products_metadata_path(year, month, day):
""" Get paths to multiple products metadata """
products = {}
path = 'products/{0}/{1}/{2}/'.format(year, month, day)
for key in bucket.objects.filter(Prefix=path):
product_path = key.key.replace(path, '').split('/')
name = product_path[0]
if name not in products:
products[name] = {}
if product_path[1] == 'metadata.xml':
products[name]['metadata'] = key.key
if product_path[1] == 'productInfo.json':
products[name]['tiles'] = get_tile_metadata_path(key.key)
return products | [
"def",
"get_products_metadata_path",
"(",
"year",
",",
"month",
",",
"day",
")",
":",
"products",
"=",
"{",
"}",
"path",
"=",
"'products/{0}/{1}/{2}/'",
".",
"format",
"(",
"year",
",",
"month",
",",
"day",
")",
"for",
"key",
"in",
"bucket",
".",
"objects",
".",
"filter",
"(",
"Prefix",
"=",
"path",
")",
":",
"product_path",
"=",
"key",
".",
"key",
".",
"replace",
"(",
"path",
",",
"''",
")",
".",
"split",
"(",
"'/'",
")",
"name",
"=",
"product_path",
"[",
"0",
"]",
"if",
"name",
"not",
"in",
"products",
":",
"products",
"[",
"name",
"]",
"=",
"{",
"}",
"if",
"product_path",
"[",
"1",
"]",
"==",
"'metadata.xml'",
":",
"products",
"[",
"name",
"]",
"[",
"'metadata'",
"]",
"=",
"key",
".",
"key",
"if",
"product_path",
"[",
"1",
"]",
"==",
"'productInfo.json'",
":",
"products",
"[",
"name",
"]",
"[",
"'tiles'",
"]",
"=",
"get_tile_metadata_path",
"(",
"key",
".",
"key",
")",
"return",
"products"
] | Get paths to multiple products metadata | [
"Get",
"paths",
"to",
"multiple",
"products",
"metadata"
] | 02bf2f9cb6aff527e492b39518a54f0b4613ddda | https://github.com/developmentseed/sentinel-s3/blob/02bf2f9cb6aff527e492b39518a54f0b4613ddda/sentinel_s3/crawler.py#L39-L56 | train |
astooke/gtimer | gtimer/public/timer.py | start | def start(backdate=None):
"""
Mark the start of timing, overwriting the automatic start data written on
import, or the automatic start at the beginning of a subdivision.
Notes:
Backdating: For subdivisions only. Backdate time must be in the past
but more recent than the latest stamp in the parent timer.
Args:
backdate (float, optional): time to use for start instead of current.
Returns:
float: The current time.
Raises:
BackdateError: If given backdate time is out of range or used in root timer.
StartError: If the timer is not in a pristine state (if any stamps or
subdivisions, must reset instead).
StoppedError: If the timer is already stopped (must reset instead).
TypeError: If given backdate value is not type float.
"""
if f.s.cum:
raise StartError("Already have stamps, can't start again (must reset).")
if f.t.subdvsn_awaiting or f.t.par_subdvsn_awaiting:
raise StartError("Already have subdivisions, can't start again (must reset).")
if f.t.stopped:
raise StoppedError("Timer already stopped (must open new or reset).")
t = timer()
if backdate is None:
t_start = t
else:
if f.t is f.root:
raise BackdateError("Cannot backdate start of root timer.")
if not isinstance(backdate, float):
raise TypeError("Backdate must be type float.")
if backdate > t:
raise BackdateError("Cannot backdate to future time.")
if backdate < f.tm1.last_t:
raise BackdateError("Cannot backdate start to time previous to latest stamp in parent timer.")
t_start = backdate
f.t.paused = False
f.t.tmp_total = 0. # (In case previously paused.)
f.t.start_t = t_start
f.t.last_t = t_start
return t | python | def start(backdate=None):
"""
Mark the start of timing, overwriting the automatic start data written on
import, or the automatic start at the beginning of a subdivision.
Notes:
Backdating: For subdivisions only. Backdate time must be in the past
but more recent than the latest stamp in the parent timer.
Args:
backdate (float, optional): time to use for start instead of current.
Returns:
float: The current time.
Raises:
BackdateError: If given backdate time is out of range or used in root timer.
StartError: If the timer is not in a pristine state (if any stamps or
subdivisions, must reset instead).
StoppedError: If the timer is already stopped (must reset instead).
TypeError: If given backdate value is not type float.
"""
if f.s.cum:
raise StartError("Already have stamps, can't start again (must reset).")
if f.t.subdvsn_awaiting or f.t.par_subdvsn_awaiting:
raise StartError("Already have subdivisions, can't start again (must reset).")
if f.t.stopped:
raise StoppedError("Timer already stopped (must open new or reset).")
t = timer()
if backdate is None:
t_start = t
else:
if f.t is f.root:
raise BackdateError("Cannot backdate start of root timer.")
if not isinstance(backdate, float):
raise TypeError("Backdate must be type float.")
if backdate > t:
raise BackdateError("Cannot backdate to future time.")
if backdate < f.tm1.last_t:
raise BackdateError("Cannot backdate start to time previous to latest stamp in parent timer.")
t_start = backdate
f.t.paused = False
f.t.tmp_total = 0. # (In case previously paused.)
f.t.start_t = t_start
f.t.last_t = t_start
return t | [
"def",
"start",
"(",
"backdate",
"=",
"None",
")",
":",
"if",
"f",
".",
"s",
".",
"cum",
":",
"raise",
"StartError",
"(",
"\"Already have stamps, can't start again (must reset).\"",
")",
"if",
"f",
".",
"t",
".",
"subdvsn_awaiting",
"or",
"f",
".",
"t",
".",
"par_subdvsn_awaiting",
":",
"raise",
"StartError",
"(",
"\"Already have subdivisions, can't start again (must reset).\"",
")",
"if",
"f",
".",
"t",
".",
"stopped",
":",
"raise",
"StoppedError",
"(",
"\"Timer already stopped (must open new or reset).\"",
")",
"t",
"=",
"timer",
"(",
")",
"if",
"backdate",
"is",
"None",
":",
"t_start",
"=",
"t",
"else",
":",
"if",
"f",
".",
"t",
"is",
"f",
".",
"root",
":",
"raise",
"BackdateError",
"(",
"\"Cannot backdate start of root timer.\"",
")",
"if",
"not",
"isinstance",
"(",
"backdate",
",",
"float",
")",
":",
"raise",
"TypeError",
"(",
"\"Backdate must be type float.\"",
")",
"if",
"backdate",
">",
"t",
":",
"raise",
"BackdateError",
"(",
"\"Cannot backdate to future time.\"",
")",
"if",
"backdate",
"<",
"f",
".",
"tm1",
".",
"last_t",
":",
"raise",
"BackdateError",
"(",
"\"Cannot backdate start to time previous to latest stamp in parent timer.\"",
")",
"t_start",
"=",
"backdate",
"f",
".",
"t",
".",
"paused",
"=",
"False",
"f",
".",
"t",
".",
"tmp_total",
"=",
"0.",
"# (In case previously paused.)",
"f",
".",
"t",
".",
"start_t",
"=",
"t_start",
"f",
".",
"t",
".",
"last_t",
"=",
"t_start",
"return",
"t"
] | Mark the start of timing, overwriting the automatic start data written on
import, or the automatic start at the beginning of a subdivision.
Notes:
Backdating: For subdivisions only. Backdate time must be in the past
but more recent than the latest stamp in the parent timer.
Args:
backdate (float, optional): time to use for start instead of current.
Returns:
float: The current time.
Raises:
BackdateError: If given backdate time is out of range or used in root timer.
StartError: If the timer is not in a pristine state (if any stamps or
subdivisions, must reset instead).
StoppedError: If the timer is already stopped (must reset instead).
TypeError: If given backdate value is not type float. | [
"Mark",
"the",
"start",
"of",
"timing",
"overwriting",
"the",
"automatic",
"start",
"data",
"written",
"on",
"import",
"or",
"the",
"automatic",
"start",
"at",
"the",
"beginning",
"of",
"a",
"subdivision",
"."
] | 2146dab459e5d959feb291821733d3d3ba7c523c | https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/public/timer.py#L40-L85 | train |
astooke/gtimer | gtimer/public/timer.py | stamp | def stamp(name, backdate=None,
unique=None, keep_subdivisions=None, quick_print=None,
un=None, ks=None, qp=None):
"""
Mark the end of a timing interval.
Notes:
If keeping subdivisions, each subdivision currently awaiting
assignment to a stamp (i.e. ended since the last stamp in this level)
will be assigned to this one. Otherwise, all awaiting ones will be
discarded after aggregating their self times into the current timer.
If both long- and short-form are present, they are OR'ed together. If
neither are present, the current global default is used.
Backdating: record a stamp as if it happened at an earlier time.
Backdate time must be in the past but more recent than the latest stamp.
(This can be useful for parallel applications, wherein a sub- process
can return times of interest to the master process.)
Warning:
When backdating, awaiting subdivisions will be assigned as normal, with
no additional checks for validity.
Args:
name (any): The identifier for this interval, processed through str()
backdate (float, optional): time to use for stamp instead of current
unique (bool, optional): enforce uniqueness
keep_subdivisions (bool, optional): keep awaiting subdivisions
quick_print (bool, optional): print elapsed interval time
un (bool, optional): short-form for unique
ks (bool, optional): short-form for keep_subdivisions
qp (bool, optional): short-form for quick_print
Returns:
float: The current time.
Raises:
BackdateError: If the given backdate time is out of range.
PausedError: If the timer is paused.
StoppedError: If the timer is stopped.
TypeError: If the given backdate value is not type float.
"""
t = timer()
if f.t.stopped:
raise StoppedError("Cannot stamp stopped timer.")
if f.t.paused:
raise PausedError("Cannot stamp paused timer.")
if backdate is None:
t_stamp = t
else:
if not isinstance(backdate, float):
raise TypeError("Backdate must be type float.")
if backdate > t:
raise BackdateError("Cannot backdate to future time.")
if backdate < f.t.last_t:
raise BackdateError("Cannot backdate to time earlier than last stamp.")
t_stamp = backdate
elapsed = t_stamp - f.t.last_t
# Logic: default unless either arg used. if both args used, 'or' them.
unique = SET['UN'] if (unique is None and un is None) else bool(unique or un) # bool(None) becomes False
keep_subdivisions = SET['KS'] if (keep_subdivisions is None and ks is None) else bool(keep_subdivisions or ks)
quick_print = SET['QP'] if (quick_print is None and qp is None) else bool(quick_print or qp)
_stamp(name, elapsed, unique, keep_subdivisions, quick_print)
tmp_self = timer() - t
f.t.self_cut += tmp_self
f.t.last_t = t_stamp + tmp_self
return t | python | def stamp(name, backdate=None,
unique=None, keep_subdivisions=None, quick_print=None,
un=None, ks=None, qp=None):
"""
Mark the end of a timing interval.
Notes:
If keeping subdivisions, each subdivision currently awaiting
assignment to a stamp (i.e. ended since the last stamp in this level)
will be assigned to this one. Otherwise, all awaiting ones will be
discarded after aggregating their self times into the current timer.
If both long- and short-form are present, they are OR'ed together. If
neither are present, the current global default is used.
Backdating: record a stamp as if it happened at an earlier time.
Backdate time must be in the past but more recent than the latest stamp.
(This can be useful for parallel applications, wherein a sub- process
can return times of interest to the master process.)
Warning:
When backdating, awaiting subdivisions will be assigned as normal, with
no additional checks for validity.
Args:
name (any): The identifier for this interval, processed through str()
backdate (float, optional): time to use for stamp instead of current
unique (bool, optional): enforce uniqueness
keep_subdivisions (bool, optional): keep awaiting subdivisions
quick_print (bool, optional): print elapsed interval time
un (bool, optional): short-form for unique
ks (bool, optional): short-form for keep_subdivisions
qp (bool, optional): short-form for quick_print
Returns:
float: The current time.
Raises:
BackdateError: If the given backdate time is out of range.
PausedError: If the timer is paused.
StoppedError: If the timer is stopped.
TypeError: If the given backdate value is not type float.
"""
t = timer()
if f.t.stopped:
raise StoppedError("Cannot stamp stopped timer.")
if f.t.paused:
raise PausedError("Cannot stamp paused timer.")
if backdate is None:
t_stamp = t
else:
if not isinstance(backdate, float):
raise TypeError("Backdate must be type float.")
if backdate > t:
raise BackdateError("Cannot backdate to future time.")
if backdate < f.t.last_t:
raise BackdateError("Cannot backdate to time earlier than last stamp.")
t_stamp = backdate
elapsed = t_stamp - f.t.last_t
# Logic: default unless either arg used. if both args used, 'or' them.
unique = SET['UN'] if (unique is None and un is None) else bool(unique or un) # bool(None) becomes False
keep_subdivisions = SET['KS'] if (keep_subdivisions is None and ks is None) else bool(keep_subdivisions or ks)
quick_print = SET['QP'] if (quick_print is None and qp is None) else bool(quick_print or qp)
_stamp(name, elapsed, unique, keep_subdivisions, quick_print)
tmp_self = timer() - t
f.t.self_cut += tmp_self
f.t.last_t = t_stamp + tmp_self
return t | [
"def",
"stamp",
"(",
"name",
",",
"backdate",
"=",
"None",
",",
"unique",
"=",
"None",
",",
"keep_subdivisions",
"=",
"None",
",",
"quick_print",
"=",
"None",
",",
"un",
"=",
"None",
",",
"ks",
"=",
"None",
",",
"qp",
"=",
"None",
")",
":",
"t",
"=",
"timer",
"(",
")",
"if",
"f",
".",
"t",
".",
"stopped",
":",
"raise",
"StoppedError",
"(",
"\"Cannot stamp stopped timer.\"",
")",
"if",
"f",
".",
"t",
".",
"paused",
":",
"raise",
"PausedError",
"(",
"\"Cannot stamp paused timer.\"",
")",
"if",
"backdate",
"is",
"None",
":",
"t_stamp",
"=",
"t",
"else",
":",
"if",
"not",
"isinstance",
"(",
"backdate",
",",
"float",
")",
":",
"raise",
"TypeError",
"(",
"\"Backdate must be type float.\"",
")",
"if",
"backdate",
">",
"t",
":",
"raise",
"BackdateError",
"(",
"\"Cannot backdate to future time.\"",
")",
"if",
"backdate",
"<",
"f",
".",
"t",
".",
"last_t",
":",
"raise",
"BackdateError",
"(",
"\"Cannot backdate to time earlier than last stamp.\"",
")",
"t_stamp",
"=",
"backdate",
"elapsed",
"=",
"t_stamp",
"-",
"f",
".",
"t",
".",
"last_t",
"# Logic: default unless either arg used. if both args used, 'or' them.",
"unique",
"=",
"SET",
"[",
"'UN'",
"]",
"if",
"(",
"unique",
"is",
"None",
"and",
"un",
"is",
"None",
")",
"else",
"bool",
"(",
"unique",
"or",
"un",
")",
"# bool(None) becomes False",
"keep_subdivisions",
"=",
"SET",
"[",
"'KS'",
"]",
"if",
"(",
"keep_subdivisions",
"is",
"None",
"and",
"ks",
"is",
"None",
")",
"else",
"bool",
"(",
"keep_subdivisions",
"or",
"ks",
")",
"quick_print",
"=",
"SET",
"[",
"'QP'",
"]",
"if",
"(",
"quick_print",
"is",
"None",
"and",
"qp",
"is",
"None",
")",
"else",
"bool",
"(",
"quick_print",
"or",
"qp",
")",
"_stamp",
"(",
"name",
",",
"elapsed",
",",
"unique",
",",
"keep_subdivisions",
",",
"quick_print",
")",
"tmp_self",
"=",
"timer",
"(",
")",
"-",
"t",
"f",
".",
"t",
".",
"self_cut",
"+=",
"tmp_self",
"f",
".",
"t",
".",
"last_t",
"=",
"t_stamp",
"+",
"tmp_self",
"return",
"t"
] | Mark the end of a timing interval.
Notes:
If keeping subdivisions, each subdivision currently awaiting
assignment to a stamp (i.e. ended since the last stamp in this level)
will be assigned to this one. Otherwise, all awaiting ones will be
discarded after aggregating their self times into the current timer.
If both long- and short-form are present, they are OR'ed together. If
neither are present, the current global default is used.
Backdating: record a stamp as if it happened at an earlier time.
Backdate time must be in the past but more recent than the latest stamp.
(This can be useful for parallel applications, wherein a sub- process
can return times of interest to the master process.)
Warning:
When backdating, awaiting subdivisions will be assigned as normal, with
no additional checks for validity.
Args:
name (any): The identifier for this interval, processed through str()
backdate (float, optional): time to use for stamp instead of current
unique (bool, optional): enforce uniqueness
keep_subdivisions (bool, optional): keep awaiting subdivisions
quick_print (bool, optional): print elapsed interval time
un (bool, optional): short-form for unique
ks (bool, optional): short-form for keep_subdivisions
qp (bool, optional): short-form for quick_print
Returns:
float: The current time.
Raises:
BackdateError: If the given backdate time is out of range.
PausedError: If the timer is paused.
StoppedError: If the timer is stopped.
TypeError: If the given backdate value is not type float. | [
"Mark",
"the",
"end",
"of",
"a",
"timing",
"interval",
"."
] | 2146dab459e5d959feb291821733d3d3ba7c523c | https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/public/timer.py#L88-L155 | train |
astooke/gtimer | gtimer/public/timer.py | stop | def stop(name=None, backdate=None,
unique=None, keep_subdivisions=None, quick_print=None,
un=None, ks=None, qp=None):
"""
Mark the end of timing. Optionally performs a stamp, hence accepts the
same arguments.
Notes:
If keeping subdivisions and not calling a stamp, any awaiting subdivisions
will be assigned to a special 'UNASSIGNED' position to indicate that they
are not properly accounted for in the hierarchy (these can happen at
different places and may be combined inadvertently).
Backdating: For subdivisions only. Backdate time must be in the past
but more recent than the latest stamp.
Args:
name (any, optional): If used, passed to a call to stamp()
backdate (float, optional): time to use for stop instead of current
unique (bool, optional): see stamp()
keep_subdivisions (bool, optional): keep awaiting subdivisions
quick_print (bool, optional): boolean, print total time
un (bool, optional): see stamp()
ks (bool, optional): see stamp()
qp (bool, optional): see stamp()
Returns:
float: The current time.
Raises:
BackdateError: If given backdate is out of range, or if used in root timer.
PausedError: If attempting stamp in paused timer.
StoppedError: If timer already stopped.
TypeError: If given backdate value is not type float.
"""
t = timer()
if f.t.stopped:
raise StoppedError("Timer already stopped.")
if backdate is None:
t_stop = t
else:
if f.t is f.root:
raise BackdateError("Cannot backdate stop of root timer.")
if not isinstance(backdate, float):
raise TypeError("Backdate must be type float.")
if backdate > t:
raise BackdateError("Cannot backdate to future time.")
if backdate < f.t.last_t:
raise BackdateError("Cannot backdate to time earlier than last stamp.")
t_stop = backdate
unique = SET['UN'] if (unique is None and un is None) else bool(unique or un) # bool(None) becomes False
keep_subdivisions = SET['KS'] if (keep_subdivisions is None and ks is None) else bool(keep_subdivisions or ks)
quick_print = SET['QP'] if (quick_print is None and qp is None) else bool(quick_print or qp)
if name is not None:
if f.t.paused:
raise PausedError("Cannot stamp paused timer.")
elapsed = t_stop - f.t.last_t
_stamp(name, elapsed, unique, keep_subdivisions, quick_print)
else:
times_priv.assign_subdivisions(UNASGN, keep_subdivisions)
for s in f.t.rgstr_stamps:
if s not in f.s.cum:
f.s.cum[s] = 0.
f.s.order.append(s)
if not f.t.paused:
f.t.tmp_total += t_stop - f.t.start_t
f.t.tmp_total -= f.t.self_cut
f.t.self_cut += timer() - t # AFTER subtraction from tmp_total, before dump
times_priv.dump_times()
f.t.stopped = True
if quick_print:
print("({}) Total: {:.4f}".format(f.t.name, f.r.total))
return t | python | def stop(name=None, backdate=None,
unique=None, keep_subdivisions=None, quick_print=None,
un=None, ks=None, qp=None):
"""
Mark the end of timing. Optionally performs a stamp, hence accepts the
same arguments.
Notes:
If keeping subdivisions and not calling a stamp, any awaiting subdivisions
will be assigned to a special 'UNASSIGNED' position to indicate that they
are not properly accounted for in the hierarchy (these can happen at
different places and may be combined inadvertently).
Backdating: For subdivisions only. Backdate time must be in the past
but more recent than the latest stamp.
Args:
name (any, optional): If used, passed to a call to stamp()
backdate (float, optional): time to use for stop instead of current
unique (bool, optional): see stamp()
keep_subdivisions (bool, optional): keep awaiting subdivisions
quick_print (bool, optional): boolean, print total time
un (bool, optional): see stamp()
ks (bool, optional): see stamp()
qp (bool, optional): see stamp()
Returns:
float: The current time.
Raises:
BackdateError: If given backdate is out of range, or if used in root timer.
PausedError: If attempting stamp in paused timer.
StoppedError: If timer already stopped.
TypeError: If given backdate value is not type float.
"""
t = timer()
if f.t.stopped:
raise StoppedError("Timer already stopped.")
if backdate is None:
t_stop = t
else:
if f.t is f.root:
raise BackdateError("Cannot backdate stop of root timer.")
if not isinstance(backdate, float):
raise TypeError("Backdate must be type float.")
if backdate > t:
raise BackdateError("Cannot backdate to future time.")
if backdate < f.t.last_t:
raise BackdateError("Cannot backdate to time earlier than last stamp.")
t_stop = backdate
unique = SET['UN'] if (unique is None and un is None) else bool(unique or un) # bool(None) becomes False
keep_subdivisions = SET['KS'] if (keep_subdivisions is None and ks is None) else bool(keep_subdivisions or ks)
quick_print = SET['QP'] if (quick_print is None and qp is None) else bool(quick_print or qp)
if name is not None:
if f.t.paused:
raise PausedError("Cannot stamp paused timer.")
elapsed = t_stop - f.t.last_t
_stamp(name, elapsed, unique, keep_subdivisions, quick_print)
else:
times_priv.assign_subdivisions(UNASGN, keep_subdivisions)
for s in f.t.rgstr_stamps:
if s not in f.s.cum:
f.s.cum[s] = 0.
f.s.order.append(s)
if not f.t.paused:
f.t.tmp_total += t_stop - f.t.start_t
f.t.tmp_total -= f.t.self_cut
f.t.self_cut += timer() - t # AFTER subtraction from tmp_total, before dump
times_priv.dump_times()
f.t.stopped = True
if quick_print:
print("({}) Total: {:.4f}".format(f.t.name, f.r.total))
return t | [
"def",
"stop",
"(",
"name",
"=",
"None",
",",
"backdate",
"=",
"None",
",",
"unique",
"=",
"None",
",",
"keep_subdivisions",
"=",
"None",
",",
"quick_print",
"=",
"None",
",",
"un",
"=",
"None",
",",
"ks",
"=",
"None",
",",
"qp",
"=",
"None",
")",
":",
"t",
"=",
"timer",
"(",
")",
"if",
"f",
".",
"t",
".",
"stopped",
":",
"raise",
"StoppedError",
"(",
"\"Timer already stopped.\"",
")",
"if",
"backdate",
"is",
"None",
":",
"t_stop",
"=",
"t",
"else",
":",
"if",
"f",
".",
"t",
"is",
"f",
".",
"root",
":",
"raise",
"BackdateError",
"(",
"\"Cannot backdate stop of root timer.\"",
")",
"if",
"not",
"isinstance",
"(",
"backdate",
",",
"float",
")",
":",
"raise",
"TypeError",
"(",
"\"Backdate must be type float.\"",
")",
"if",
"backdate",
">",
"t",
":",
"raise",
"BackdateError",
"(",
"\"Cannot backdate to future time.\"",
")",
"if",
"backdate",
"<",
"f",
".",
"t",
".",
"last_t",
":",
"raise",
"BackdateError",
"(",
"\"Cannot backdate to time earlier than last stamp.\"",
")",
"t_stop",
"=",
"backdate",
"unique",
"=",
"SET",
"[",
"'UN'",
"]",
"if",
"(",
"unique",
"is",
"None",
"and",
"un",
"is",
"None",
")",
"else",
"bool",
"(",
"unique",
"or",
"un",
")",
"# bool(None) becomes False",
"keep_subdivisions",
"=",
"SET",
"[",
"'KS'",
"]",
"if",
"(",
"keep_subdivisions",
"is",
"None",
"and",
"ks",
"is",
"None",
")",
"else",
"bool",
"(",
"keep_subdivisions",
"or",
"ks",
")",
"quick_print",
"=",
"SET",
"[",
"'QP'",
"]",
"if",
"(",
"quick_print",
"is",
"None",
"and",
"qp",
"is",
"None",
")",
"else",
"bool",
"(",
"quick_print",
"or",
"qp",
")",
"if",
"name",
"is",
"not",
"None",
":",
"if",
"f",
".",
"t",
".",
"paused",
":",
"raise",
"PausedError",
"(",
"\"Cannot stamp paused timer.\"",
")",
"elapsed",
"=",
"t_stop",
"-",
"f",
".",
"t",
".",
"last_t",
"_stamp",
"(",
"name",
",",
"elapsed",
",",
"unique",
",",
"keep_subdivisions",
",",
"quick_print",
")",
"else",
":",
"times_priv",
".",
"assign_subdivisions",
"(",
"UNASGN",
",",
"keep_subdivisions",
")",
"for",
"s",
"in",
"f",
".",
"t",
".",
"rgstr_stamps",
":",
"if",
"s",
"not",
"in",
"f",
".",
"s",
".",
"cum",
":",
"f",
".",
"s",
".",
"cum",
"[",
"s",
"]",
"=",
"0.",
"f",
".",
"s",
".",
"order",
".",
"append",
"(",
"s",
")",
"if",
"not",
"f",
".",
"t",
".",
"paused",
":",
"f",
".",
"t",
".",
"tmp_total",
"+=",
"t_stop",
"-",
"f",
".",
"t",
".",
"start_t",
"f",
".",
"t",
".",
"tmp_total",
"-=",
"f",
".",
"t",
".",
"self_cut",
"f",
".",
"t",
".",
"self_cut",
"+=",
"timer",
"(",
")",
"-",
"t",
"# AFTER subtraction from tmp_total, before dump",
"times_priv",
".",
"dump_times",
"(",
")",
"f",
".",
"t",
".",
"stopped",
"=",
"True",
"if",
"quick_print",
":",
"print",
"(",
"\"({}) Total: {:.4f}\"",
".",
"format",
"(",
"f",
".",
"t",
".",
"name",
",",
"f",
".",
"r",
".",
"total",
")",
")",
"return",
"t"
] | Mark the end of timing. Optionally performs a stamp, hence accepts the
same arguments.
Notes:
If keeping subdivisions and not calling a stamp, any awaiting subdivisions
will be assigned to a special 'UNASSIGNED' position to indicate that they
are not properly accounted for in the hierarchy (these can happen at
different places and may be combined inadvertently).
Backdating: For subdivisions only. Backdate time must be in the past
but more recent than the latest stamp.
Args:
name (any, optional): If used, passed to a call to stamp()
backdate (float, optional): time to use for stop instead of current
unique (bool, optional): see stamp()
keep_subdivisions (bool, optional): keep awaiting subdivisions
quick_print (bool, optional): boolean, print total time
un (bool, optional): see stamp()
ks (bool, optional): see stamp()
qp (bool, optional): see stamp()
Returns:
float: The current time.
Raises:
BackdateError: If given backdate is out of range, or if used in root timer.
PausedError: If attempting stamp in paused timer.
StoppedError: If timer already stopped.
TypeError: If given backdate value is not type float. | [
"Mark",
"the",
"end",
"of",
"timing",
".",
"Optionally",
"performs",
"a",
"stamp",
"hence",
"accepts",
"the",
"same",
"arguments",
"."
] | 2146dab459e5d959feb291821733d3d3ba7c523c | https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/public/timer.py#L158-L231 | train |
astooke/gtimer | gtimer/public/timer.py | pause | def pause():
"""
Pause the timer, preventing subsequent time from accumulating in the
total. Renders the timer inactive, disabling other timing commands.
Returns:
float: The current time.
Raises:
PausedError: If timer already paused.
StoppedError: If timer already stopped.
"""
t = timer()
if f.t.stopped:
raise StoppedError("Cannot pause stopped timer.")
if f.t.paused:
raise PausedError("Timer already paused.")
f.t.paused = True
f.t.tmp_total += t - f.t.start_t
f.t.start_t = None
f.t.last_t = None
return t | python | def pause():
"""
Pause the timer, preventing subsequent time from accumulating in the
total. Renders the timer inactive, disabling other timing commands.
Returns:
float: The current time.
Raises:
PausedError: If timer already paused.
StoppedError: If timer already stopped.
"""
t = timer()
if f.t.stopped:
raise StoppedError("Cannot pause stopped timer.")
if f.t.paused:
raise PausedError("Timer already paused.")
f.t.paused = True
f.t.tmp_total += t - f.t.start_t
f.t.start_t = None
f.t.last_t = None
return t | [
"def",
"pause",
"(",
")",
":",
"t",
"=",
"timer",
"(",
")",
"if",
"f",
".",
"t",
".",
"stopped",
":",
"raise",
"StoppedError",
"(",
"\"Cannot pause stopped timer.\"",
")",
"if",
"f",
".",
"t",
".",
"paused",
":",
"raise",
"PausedError",
"(",
"\"Timer already paused.\"",
")",
"f",
".",
"t",
".",
"paused",
"=",
"True",
"f",
".",
"t",
".",
"tmp_total",
"+=",
"t",
"-",
"f",
".",
"t",
".",
"start_t",
"f",
".",
"t",
".",
"start_t",
"=",
"None",
"f",
".",
"t",
".",
"last_t",
"=",
"None",
"return",
"t"
] | Pause the timer, preventing subsequent time from accumulating in the
total. Renders the timer inactive, disabling other timing commands.
Returns:
float: The current time.
Raises:
PausedError: If timer already paused.
StoppedError: If timer already stopped. | [
"Pause",
"the",
"timer",
"preventing",
"subsequent",
"time",
"from",
"accumulating",
"in",
"the",
"total",
".",
"Renders",
"the",
"timer",
"inactive",
"disabling",
"other",
"timing",
"commands",
"."
] | 2146dab459e5d959feb291821733d3d3ba7c523c | https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/public/timer.py#L234-L255 | train |
astooke/gtimer | gtimer/public/timer.py | resume | def resume():
"""
Resume a paused timer, re-activating it. Subsequent time accumulates in
the total.
Returns:
float: The current time.
Raises:
PausedError: If timer was not in paused state.
StoppedError: If timer was already stopped.
"""
t = timer()
if f.t.stopped:
raise StoppedError("Cannot resume stopped timer.")
if not f.t.paused:
raise PausedError("Cannot resume timer that is not paused.")
f.t.paused = False
f.t.start_t = t
f.t.last_t = t
return t | python | def resume():
"""
Resume a paused timer, re-activating it. Subsequent time accumulates in
the total.
Returns:
float: The current time.
Raises:
PausedError: If timer was not in paused state.
StoppedError: If timer was already stopped.
"""
t = timer()
if f.t.stopped:
raise StoppedError("Cannot resume stopped timer.")
if not f.t.paused:
raise PausedError("Cannot resume timer that is not paused.")
f.t.paused = False
f.t.start_t = t
f.t.last_t = t
return t | [
"def",
"resume",
"(",
")",
":",
"t",
"=",
"timer",
"(",
")",
"if",
"f",
".",
"t",
".",
"stopped",
":",
"raise",
"StoppedError",
"(",
"\"Cannot resume stopped timer.\"",
")",
"if",
"not",
"f",
".",
"t",
".",
"paused",
":",
"raise",
"PausedError",
"(",
"\"Cannot resume timer that is not paused.\"",
")",
"f",
".",
"t",
".",
"paused",
"=",
"False",
"f",
".",
"t",
".",
"start_t",
"=",
"t",
"f",
".",
"t",
".",
"last_t",
"=",
"t",
"return",
"t"
] | Resume a paused timer, re-activating it. Subsequent time accumulates in
the total.
Returns:
float: The current time.
Raises:
PausedError: If timer was not in paused state.
StoppedError: If timer was already stopped. | [
"Resume",
"a",
"paused",
"timer",
"re",
"-",
"activating",
"it",
".",
"Subsequent",
"time",
"accumulates",
"in",
"the",
"total",
"."
] | 2146dab459e5d959feb291821733d3d3ba7c523c | https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/public/timer.py#L258-L278 | train |
astooke/gtimer | gtimer/private/collapse.py | collapse_times | def collapse_times():
"""Make copies of everything, assign to global shortcuts so functions work
on them, extract the times, then restore the running stacks.
"""
orig_ts = f.timer_stack
orig_ls = f.loop_stack
copy_ts = _copy_timer_stack()
copy_ls = copy.deepcopy(f.loop_stack)
f.timer_stack = copy_ts
f.loop_stack = copy_ls
f.refresh_shortcuts()
while (len(f.timer_stack) > 1) or f.t.in_loop:
_collapse_subdivision()
timer_pub.stop()
collapsed_times = f.r
f.timer_stack = orig_ts # (loops throw error if not same object!)
f.loop_stack = orig_ls
f.refresh_shortcuts()
return collapsed_times | python | def collapse_times():
"""Make copies of everything, assign to global shortcuts so functions work
on them, extract the times, then restore the running stacks.
"""
orig_ts = f.timer_stack
orig_ls = f.loop_stack
copy_ts = _copy_timer_stack()
copy_ls = copy.deepcopy(f.loop_stack)
f.timer_stack = copy_ts
f.loop_stack = copy_ls
f.refresh_shortcuts()
while (len(f.timer_stack) > 1) or f.t.in_loop:
_collapse_subdivision()
timer_pub.stop()
collapsed_times = f.r
f.timer_stack = orig_ts # (loops throw error if not same object!)
f.loop_stack = orig_ls
f.refresh_shortcuts()
return collapsed_times | [
"def",
"collapse_times",
"(",
")",
":",
"orig_ts",
"=",
"f",
".",
"timer_stack",
"orig_ls",
"=",
"f",
".",
"loop_stack",
"copy_ts",
"=",
"_copy_timer_stack",
"(",
")",
"copy_ls",
"=",
"copy",
".",
"deepcopy",
"(",
"f",
".",
"loop_stack",
")",
"f",
".",
"timer_stack",
"=",
"copy_ts",
"f",
".",
"loop_stack",
"=",
"copy_ls",
"f",
".",
"refresh_shortcuts",
"(",
")",
"while",
"(",
"len",
"(",
"f",
".",
"timer_stack",
")",
">",
"1",
")",
"or",
"f",
".",
"t",
".",
"in_loop",
":",
"_collapse_subdivision",
"(",
")",
"timer_pub",
".",
"stop",
"(",
")",
"collapsed_times",
"=",
"f",
".",
"r",
"f",
".",
"timer_stack",
"=",
"orig_ts",
"# (loops throw error if not same object!)",
"f",
".",
"loop_stack",
"=",
"orig_ls",
"f",
".",
"refresh_shortcuts",
"(",
")",
"return",
"collapsed_times"
] | Make copies of everything, assign to global shortcuts so functions work
on them, extract the times, then restore the running stacks. | [
"Make",
"copies",
"of",
"everything",
"assign",
"to",
"global",
"shortcuts",
"so",
"functions",
"work",
"on",
"them",
"extract",
"the",
"times",
"then",
"restore",
"the",
"running",
"stacks",
"."
] | 2146dab459e5d959feb291821733d3d3ba7c523c | https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/private/collapse.py#L15-L33 | train |
IRC-SPHERE/HyperStream | hyperstream/plate/plate_manager.py | PlateManager.create_plate | def create_plate(self, plate_id, description, meta_data_id, values, complement, parent_plate):
"""
Create a new plate, and commit it to the database
:param plate_id: The plate id - required to be unique
:param description: A human readable description
:param meta_data_id: The meta data id, which should correspond to the tag in the global meta data
:param values: Either a list of string values, or the empty list (for use with complement)
:param complement: If complement is true, then the complement of the values list will be used when getting
values from the global meta data
:param parent_plate: The parent plate identifier
:return: The newly created plate
:type plate_id: str | unicode
:type complement: bool
:type values: list | tuple
"""
# Make sure the plate id doesn't already exist
with switch_db(PlateDefinitionModel, db_alias='hyperstream'):
try:
p = PlateDefinitionModel.objects.get(plate_id=plate_id)
if p:
logging.info("Plate with id {} already exists".format(plate_id))
return self.plates[plate_id]
except DoesNotExist:
pass
except MultipleObjectsReturned:
raise
plate_definition = PlateDefinitionModel(
plate_id=plate_id,
description=description,
meta_data_id=meta_data_id,
values=values,
complement=complement,
parent_plate=parent_plate
)
self.add_plate(plate_definition)
plate_definition.save()
return self.plates[plate_id] | python | def create_plate(self, plate_id, description, meta_data_id, values, complement, parent_plate):
"""
Create a new plate, and commit it to the database
:param plate_id: The plate id - required to be unique
:param description: A human readable description
:param meta_data_id: The meta data id, which should correspond to the tag in the global meta data
:param values: Either a list of string values, or the empty list (for use with complement)
:param complement: If complement is true, then the complement of the values list will be used when getting
values from the global meta data
:param parent_plate: The parent plate identifier
:return: The newly created plate
:type plate_id: str | unicode
:type complement: bool
:type values: list | tuple
"""
# Make sure the plate id doesn't already exist
with switch_db(PlateDefinitionModel, db_alias='hyperstream'):
try:
p = PlateDefinitionModel.objects.get(plate_id=plate_id)
if p:
logging.info("Plate with id {} already exists".format(plate_id))
return self.plates[plate_id]
except DoesNotExist:
pass
except MultipleObjectsReturned:
raise
plate_definition = PlateDefinitionModel(
plate_id=plate_id,
description=description,
meta_data_id=meta_data_id,
values=values,
complement=complement,
parent_plate=parent_plate
)
self.add_plate(plate_definition)
plate_definition.save()
return self.plates[plate_id] | [
"def",
"create_plate",
"(",
"self",
",",
"plate_id",
",",
"description",
",",
"meta_data_id",
",",
"values",
",",
"complement",
",",
"parent_plate",
")",
":",
"# Make sure the plate id doesn't already exist",
"with",
"switch_db",
"(",
"PlateDefinitionModel",
",",
"db_alias",
"=",
"'hyperstream'",
")",
":",
"try",
":",
"p",
"=",
"PlateDefinitionModel",
".",
"objects",
".",
"get",
"(",
"plate_id",
"=",
"plate_id",
")",
"if",
"p",
":",
"logging",
".",
"info",
"(",
"\"Plate with id {} already exists\"",
".",
"format",
"(",
"plate_id",
")",
")",
"return",
"self",
".",
"plates",
"[",
"plate_id",
"]",
"except",
"DoesNotExist",
":",
"pass",
"except",
"MultipleObjectsReturned",
":",
"raise",
"plate_definition",
"=",
"PlateDefinitionModel",
"(",
"plate_id",
"=",
"plate_id",
",",
"description",
"=",
"description",
",",
"meta_data_id",
"=",
"meta_data_id",
",",
"values",
"=",
"values",
",",
"complement",
"=",
"complement",
",",
"parent_plate",
"=",
"parent_plate",
")",
"self",
".",
"add_plate",
"(",
"plate_definition",
")",
"plate_definition",
".",
"save",
"(",
")",
"return",
"self",
".",
"plates",
"[",
"plate_id",
"]"
] | Create a new plate, and commit it to the database
:param plate_id: The plate id - required to be unique
:param description: A human readable description
:param meta_data_id: The meta data id, which should correspond to the tag in the global meta data
:param values: Either a list of string values, or the empty list (for use with complement)
:param complement: If complement is true, then the complement of the values list will be used when getting
values from the global meta data
:param parent_plate: The parent plate identifier
:return: The newly created plate
:type plate_id: str | unicode
:type complement: bool
:type values: list | tuple | [
"Create",
"a",
"new",
"plate",
"and",
"commit",
"it",
"to",
"the",
"database"
] | 98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780 | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/plate/plate_manager.py#L100-L139 | train |
astooke/gtimer | gtimer/public/timedloop.py | timed_loop | def timed_loop(name=None,
rgstr_stamps=None,
save_itrs=SET['SI'],
loop_end_stamp=None,
end_stamp_unique=SET['UN'],
keep_prev_subdivisions=SET['KS'],
keep_end_subdivisions=SET['KS'],
quick_print=SET['QP']):
"""
Instantiate a TimedLoop object for measuring loop iteration timing data.
Can be used with either for or while loops.
Example::
loop = timed_loop()
while x > 0: # or for x in <iterable>:
next(loop) # or loop.next()
<body of loop, with gtimer stamps>
loop.exit()
Notes:
Can be used as a context manager around the loop, without requiring
separate call to exit(). Redundant calls to exit() do no harm. Loop
functionality is implemented in the next() or __next__() methods.
Each instance can only be used once, so for an inner loop, this function
must be called within the outer loop.
Any awaiting subdivisions kept at entrance to a loop section will go to
the 'UNASSIGNED' position to indicate that they are not properly accounted
for in the hierarchy. Likewise for any awaiting subdivisions kept at the
end of loop iterations without a named stamp.
Args:
name (any, optional): Identifier (makes the loop a subdivision), passed
through str().
rgstr_stamps (list, tuple, optional): Identifiers, see subdivision().
save_itrs (bool, optional): see subdivision().
loop_end_stamp (any, optional): Identifier, automatic stamp at end of
every iteration.
end_stamp_unique (bool, optional): see stamp().
keep_prev_subdivisions (bool, optional): Keep awaiting subdivisions on
entering loop.
keep_end_subdivisions (bool, optional): Keep awaiting subdivisions at
end of iterations.
quick_print (bool, optional): Named loop only, print at end of each iteration.
Returns:
TimedLoop: Custom gtimer object for measuring loops.
"""
return TimedLoop(name=name,
rgstr_stamps=rgstr_stamps,
save_itrs=save_itrs,
loop_end_stamp=loop_end_stamp,
end_stamp_unique=end_stamp_unique,
keep_prev_subdivisions=keep_prev_subdivisions,
keep_end_subdivisions=keep_end_subdivisions) | python | def timed_loop(name=None,
rgstr_stamps=None,
save_itrs=SET['SI'],
loop_end_stamp=None,
end_stamp_unique=SET['UN'],
keep_prev_subdivisions=SET['KS'],
keep_end_subdivisions=SET['KS'],
quick_print=SET['QP']):
"""
Instantiate a TimedLoop object for measuring loop iteration timing data.
Can be used with either for or while loops.
Example::
loop = timed_loop()
while x > 0: # or for x in <iterable>:
next(loop) # or loop.next()
<body of loop, with gtimer stamps>
loop.exit()
Notes:
Can be used as a context manager around the loop, without requiring
separate call to exit(). Redundant calls to exit() do no harm. Loop
functionality is implemented in the next() or __next__() methods.
Each instance can only be used once, so for an inner loop, this function
must be called within the outer loop.
Any awaiting subdivisions kept at entrance to a loop section will go to
the 'UNASSIGNED' position to indicate that they are not properly accounted
for in the hierarchy. Likewise for any awaiting subdivisions kept at the
end of loop iterations without a named stamp.
Args:
name (any, optional): Identifier (makes the loop a subdivision), passed
through str().
rgstr_stamps (list, tuple, optional): Identifiers, see subdivision().
save_itrs (bool, optional): see subdivision().
loop_end_stamp (any, optional): Identifier, automatic stamp at end of
every iteration.
end_stamp_unique (bool, optional): see stamp().
keep_prev_subdivisions (bool, optional): Keep awaiting subdivisions on
entering loop.
keep_end_subdivisions (bool, optional): Keep awaiting subdivisions at
end of iterations.
quick_print (bool, optional): Named loop only, print at end of each iteration.
Returns:
TimedLoop: Custom gtimer object for measuring loops.
"""
return TimedLoop(name=name,
rgstr_stamps=rgstr_stamps,
save_itrs=save_itrs,
loop_end_stamp=loop_end_stamp,
end_stamp_unique=end_stamp_unique,
keep_prev_subdivisions=keep_prev_subdivisions,
keep_end_subdivisions=keep_end_subdivisions) | [
"def",
"timed_loop",
"(",
"name",
"=",
"None",
",",
"rgstr_stamps",
"=",
"None",
",",
"save_itrs",
"=",
"SET",
"[",
"'SI'",
"]",
",",
"loop_end_stamp",
"=",
"None",
",",
"end_stamp_unique",
"=",
"SET",
"[",
"'UN'",
"]",
",",
"keep_prev_subdivisions",
"=",
"SET",
"[",
"'KS'",
"]",
",",
"keep_end_subdivisions",
"=",
"SET",
"[",
"'KS'",
"]",
",",
"quick_print",
"=",
"SET",
"[",
"'QP'",
"]",
")",
":",
"return",
"TimedLoop",
"(",
"name",
"=",
"name",
",",
"rgstr_stamps",
"=",
"rgstr_stamps",
",",
"save_itrs",
"=",
"save_itrs",
",",
"loop_end_stamp",
"=",
"loop_end_stamp",
",",
"end_stamp_unique",
"=",
"end_stamp_unique",
",",
"keep_prev_subdivisions",
"=",
"keep_prev_subdivisions",
",",
"keep_end_subdivisions",
"=",
"keep_end_subdivisions",
")"
] | Instantiate a TimedLoop object for measuring loop iteration timing data.
Can be used with either for or while loops.
Example::
loop = timed_loop()
while x > 0: # or for x in <iterable>:
next(loop) # or loop.next()
<body of loop, with gtimer stamps>
loop.exit()
Notes:
Can be used as a context manager around the loop, without requiring
separate call to exit(). Redundant calls to exit() do no harm. Loop
functionality is implemented in the next() or __next__() methods.
Each instance can only be used once, so for an inner loop, this function
must be called within the outer loop.
Any awaiting subdivisions kept at entrance to a loop section will go to
the 'UNASSIGNED' position to indicate that they are not properly accounted
for in the hierarchy. Likewise for any awaiting subdivisions kept at the
end of loop iterations without a named stamp.
Args:
name (any, optional): Identifier (makes the loop a subdivision), passed
through str().
rgstr_stamps (list, tuple, optional): Identifiers, see subdivision().
save_itrs (bool, optional): see subdivision().
loop_end_stamp (any, optional): Identifier, automatic stamp at end of
every iteration.
end_stamp_unique (bool, optional): see stamp().
keep_prev_subdivisions (bool, optional): Keep awaiting subdivisions on
entering loop.
keep_end_subdivisions (bool, optional): Keep awaiting subdivisions at
end of iterations.
quick_print (bool, optional): Named loop only, print at end of each iteration.
Returns:
TimedLoop: Custom gtimer object for measuring loops. | [
"Instantiate",
"a",
"TimedLoop",
"object",
"for",
"measuring",
"loop",
"iteration",
"timing",
"data",
".",
"Can",
"be",
"used",
"with",
"either",
"for",
"or",
"while",
"loops",
"."
] | 2146dab459e5d959feb291821733d3d3ba7c523c | https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/public/timedloop.py#L13-L69 | train |
astooke/gtimer | gtimer/public/timedloop.py | timed_for | def timed_for(iterable,
name=None,
rgstr_stamps=None,
save_itrs=SET['SI'],
loop_end_stamp=None,
end_stamp_unique=SET['UN'],
keep_prev_subdivisions=SET['KS'],
keep_end_subdivisions=SET['KS'],
quick_print=SET['QP']):
"""
Instantiate a TimedLoop object for measuring for loop iteration timing data.
Can be used only on for loops.
Example::
for i in gtimer.timed_for(iterable, ..):
<body of loop with gtimer stamps>
Notes:
Can be used as a context manager around the loop. When breaking out of
the loop, requires usage either as a context manager or with a reference
to the object on which to call the exit() method after leaving the loop
body. Redundant calls to exit() do no harm. Loop functionality is
implemented in the __iter__() method.
Each instance can only be used once, so for an inner loop, this function
must be called within the outer loop.
Any awaiting subdivisions kept at entrance to a loop section will go to
the 'UNASSIGNED' position to indicate that they are not properly accounted
for in the hierarchy. Likewise for any awaiting subdivisions kept at the
end of loop iterations without a named stamp.
Args:
iterable: Same as provided to regular 'for' command.
name (any, optional): Identifier (makes the loop a subdivision), passed
through str().
rgstr_stamps (list,tuple, optional): Identifiers, see subdivision().
save_itrs (bool, optional): see subdivision().
loop_end_stamp (any, optional): Identifier, automatic stamp at end of
every iteration, passed through str().
end_stamp_unique (bool, optional): see stamp().
keep_prev_subdivisions (bool, optional): Keep awaiting subdivisions on
entering loop.
keep_end_subdivisions (bool, optional): Keep awaiting subdivisions at
end of iterations.
quick_print (bool, optional): Named loop only, print at end of each iteration.
Returns:
TimedFor: Custom gtimer object for measuring for loops.
"""
return TimedFor(iterable,
name=name,
rgstr_stamps=rgstr_stamps,
save_itrs=save_itrs,
loop_end_stamp=loop_end_stamp,
end_stamp_unique=end_stamp_unique,
keep_prev_subdivisions=keep_prev_subdivisions,
keep_end_subdivisions=keep_end_subdivisions) | python | def timed_for(iterable,
name=None,
rgstr_stamps=None,
save_itrs=SET['SI'],
loop_end_stamp=None,
end_stamp_unique=SET['UN'],
keep_prev_subdivisions=SET['KS'],
keep_end_subdivisions=SET['KS'],
quick_print=SET['QP']):
"""
Instantiate a TimedLoop object for measuring for loop iteration timing data.
Can be used only on for loops.
Example::
for i in gtimer.timed_for(iterable, ..):
<body of loop with gtimer stamps>
Notes:
Can be used as a context manager around the loop. When breaking out of
the loop, requires usage either as a context manager or with a reference
to the object on which to call the exit() method after leaving the loop
body. Redundant calls to exit() do no harm. Loop functionality is
implemented in the __iter__() method.
Each instance can only be used once, so for an inner loop, this function
must be called within the outer loop.
Any awaiting subdivisions kept at entrance to a loop section will go to
the 'UNASSIGNED' position to indicate that they are not properly accounted
for in the hierarchy. Likewise for any awaiting subdivisions kept at the
end of loop iterations without a named stamp.
Args:
iterable: Same as provided to regular 'for' command.
name (any, optional): Identifier (makes the loop a subdivision), passed
through str().
rgstr_stamps (list,tuple, optional): Identifiers, see subdivision().
save_itrs (bool, optional): see subdivision().
loop_end_stamp (any, optional): Identifier, automatic stamp at end of
every iteration, passed through str().
end_stamp_unique (bool, optional): see stamp().
keep_prev_subdivisions (bool, optional): Keep awaiting subdivisions on
entering loop.
keep_end_subdivisions (bool, optional): Keep awaiting subdivisions at
end of iterations.
quick_print (bool, optional): Named loop only, print at end of each iteration.
Returns:
TimedFor: Custom gtimer object for measuring for loops.
"""
return TimedFor(iterable,
name=name,
rgstr_stamps=rgstr_stamps,
save_itrs=save_itrs,
loop_end_stamp=loop_end_stamp,
end_stamp_unique=end_stamp_unique,
keep_prev_subdivisions=keep_prev_subdivisions,
keep_end_subdivisions=keep_end_subdivisions) | [
"def",
"timed_for",
"(",
"iterable",
",",
"name",
"=",
"None",
",",
"rgstr_stamps",
"=",
"None",
",",
"save_itrs",
"=",
"SET",
"[",
"'SI'",
"]",
",",
"loop_end_stamp",
"=",
"None",
",",
"end_stamp_unique",
"=",
"SET",
"[",
"'UN'",
"]",
",",
"keep_prev_subdivisions",
"=",
"SET",
"[",
"'KS'",
"]",
",",
"keep_end_subdivisions",
"=",
"SET",
"[",
"'KS'",
"]",
",",
"quick_print",
"=",
"SET",
"[",
"'QP'",
"]",
")",
":",
"return",
"TimedFor",
"(",
"iterable",
",",
"name",
"=",
"name",
",",
"rgstr_stamps",
"=",
"rgstr_stamps",
",",
"save_itrs",
"=",
"save_itrs",
",",
"loop_end_stamp",
"=",
"loop_end_stamp",
",",
"end_stamp_unique",
"=",
"end_stamp_unique",
",",
"keep_prev_subdivisions",
"=",
"keep_prev_subdivisions",
",",
"keep_end_subdivisions",
"=",
"keep_end_subdivisions",
")"
] | Instantiate a TimedLoop object for measuring for loop iteration timing data.
Can be used only on for loops.
Example::
for i in gtimer.timed_for(iterable, ..):
<body of loop with gtimer stamps>
Notes:
Can be used as a context manager around the loop. When breaking out of
the loop, requires usage either as a context manager or with a reference
to the object on which to call the exit() method after leaving the loop
body. Redundant calls to exit() do no harm. Loop functionality is
implemented in the __iter__() method.
Each instance can only be used once, so for an inner loop, this function
must be called within the outer loop.
Any awaiting subdivisions kept at entrance to a loop section will go to
the 'UNASSIGNED' position to indicate that they are not properly accounted
for in the hierarchy. Likewise for any awaiting subdivisions kept at the
end of loop iterations without a named stamp.
Args:
iterable: Same as provided to regular 'for' command.
name (any, optional): Identifier (makes the loop a subdivision), passed
through str().
rgstr_stamps (list,tuple, optional): Identifiers, see subdivision().
save_itrs (bool, optional): see subdivision().
loop_end_stamp (any, optional): Identifier, automatic stamp at end of
every iteration, passed through str().
end_stamp_unique (bool, optional): see stamp().
keep_prev_subdivisions (bool, optional): Keep awaiting subdivisions on
entering loop.
keep_end_subdivisions (bool, optional): Keep awaiting subdivisions at
end of iterations.
quick_print (bool, optional): Named loop only, print at end of each iteration.
Returns:
TimedFor: Custom gtimer object for measuring for loops. | [
"Instantiate",
"a",
"TimedLoop",
"object",
"for",
"measuring",
"for",
"loop",
"iteration",
"timing",
"data",
".",
"Can",
"be",
"used",
"only",
"on",
"for",
"loops",
"."
] | 2146dab459e5d959feb291821733d3d3ba7c523c | https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/public/timedloop.py#L72-L131 | train |
tamasgal/km3pipe | km3pipe/utils/calibrate.py | write_calibration | def write_calibration(calib, f, loc):
"""Write calibration set to file"""
for i, node in enumerate(
[p + '_' + s for p in ['pos', 'dir'] for s in 'xyz']):
h5loc = loc + '/' + node
ca = f.get_node(h5loc)
ca.append(calib[:, i])
du = f.get_node(loc + '/du')
du.append(calib[:, 7].astype('u1'))
floor = f.get_node(loc + '/floor')
floor.append(calib[:, 8].astype('u1'))
t0 = f.get_node(loc + '/t0')
t0.append(calib[:, 6])
if loc == "/hits":
time = f.get_node(loc + "/time")
offset = len(time)
chunk_size = len(calib)
time[offset - chunk_size:offset] += calib[:, 6] | python | def write_calibration(calib, f, loc):
"""Write calibration set to file"""
for i, node in enumerate(
[p + '_' + s for p in ['pos', 'dir'] for s in 'xyz']):
h5loc = loc + '/' + node
ca = f.get_node(h5loc)
ca.append(calib[:, i])
du = f.get_node(loc + '/du')
du.append(calib[:, 7].astype('u1'))
floor = f.get_node(loc + '/floor')
floor.append(calib[:, 8].astype('u1'))
t0 = f.get_node(loc + '/t0')
t0.append(calib[:, 6])
if loc == "/hits":
time = f.get_node(loc + "/time")
offset = len(time)
chunk_size = len(calib)
time[offset - chunk_size:offset] += calib[:, 6] | [
"def",
"write_calibration",
"(",
"calib",
",",
"f",
",",
"loc",
")",
":",
"for",
"i",
",",
"node",
"in",
"enumerate",
"(",
"[",
"p",
"+",
"'_'",
"+",
"s",
"for",
"p",
"in",
"[",
"'pos'",
",",
"'dir'",
"]",
"for",
"s",
"in",
"'xyz'",
"]",
")",
":",
"h5loc",
"=",
"loc",
"+",
"'/'",
"+",
"node",
"ca",
"=",
"f",
".",
"get_node",
"(",
"h5loc",
")",
"ca",
".",
"append",
"(",
"calib",
"[",
":",
",",
"i",
"]",
")",
"du",
"=",
"f",
".",
"get_node",
"(",
"loc",
"+",
"'/du'",
")",
"du",
".",
"append",
"(",
"calib",
"[",
":",
",",
"7",
"]",
".",
"astype",
"(",
"'u1'",
")",
")",
"floor",
"=",
"f",
".",
"get_node",
"(",
"loc",
"+",
"'/floor'",
")",
"floor",
".",
"append",
"(",
"calib",
"[",
":",
",",
"8",
"]",
".",
"astype",
"(",
"'u1'",
")",
")",
"t0",
"=",
"f",
".",
"get_node",
"(",
"loc",
"+",
"'/t0'",
")",
"t0",
".",
"append",
"(",
"calib",
"[",
":",
",",
"6",
"]",
")",
"if",
"loc",
"==",
"\"/hits\"",
":",
"time",
"=",
"f",
".",
"get_node",
"(",
"loc",
"+",
"\"/time\"",
")",
"offset",
"=",
"len",
"(",
"time",
")",
"chunk_size",
"=",
"len",
"(",
"calib",
")",
"time",
"[",
"offset",
"-",
"chunk_size",
":",
"offset",
"]",
"+=",
"calib",
"[",
":",
",",
"6",
"]"
] | Write calibration set to file | [
"Write",
"calibration",
"set",
"to",
"file"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/calibrate.py#L87-L108 | train |
tamasgal/km3pipe | km3pipe/utils/calibrate.py | initialise_arrays | def initialise_arrays(group, f):
"""Create EArrays for calibrated hits"""
for node in ['pos_x', 'pos_y', 'pos_z', 'dir_x', 'dir_y', 'dir_z', 'du',
'floor', 't0']:
if node in ['floor', 'du']:
atom = U1_ATOM
else:
atom = F4_ATOM
f.create_earray(group, node, atom, (0, ), filters=FILTERS) | python | def initialise_arrays(group, f):
"""Create EArrays for calibrated hits"""
for node in ['pos_x', 'pos_y', 'pos_z', 'dir_x', 'dir_y', 'dir_z', 'du',
'floor', 't0']:
if node in ['floor', 'du']:
atom = U1_ATOM
else:
atom = F4_ATOM
f.create_earray(group, node, atom, (0, ), filters=FILTERS) | [
"def",
"initialise_arrays",
"(",
"group",
",",
"f",
")",
":",
"for",
"node",
"in",
"[",
"'pos_x'",
",",
"'pos_y'",
",",
"'pos_z'",
",",
"'dir_x'",
",",
"'dir_y'",
",",
"'dir_z'",
",",
"'du'",
",",
"'floor'",
",",
"'t0'",
"]",
":",
"if",
"node",
"in",
"[",
"'floor'",
",",
"'du'",
"]",
":",
"atom",
"=",
"U1_ATOM",
"else",
":",
"atom",
"=",
"F4_ATOM",
"f",
".",
"create_earray",
"(",
"group",
",",
"node",
",",
"atom",
",",
"(",
"0",
",",
")",
",",
"filters",
"=",
"FILTERS",
")"
] | Create EArrays for calibrated hits | [
"Create",
"EArrays",
"for",
"calibrated",
"hits"
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/calibrate.py#L111-L119 | train |
tamasgal/km3pipe | km3pipe/io/aanet.py | AanetPump.blob_counter | def blob_counter(self):
"""Create a blob counter."""
import aa # pylint: disablF0401 # noqa
from ROOT import EventFile # pylint: disable F0401
try:
event_file = EventFile(self.filename)
except Exception:
raise SystemExit("Could not open file")
num_blobs = 0
for event in event_file:
num_blobs += 1
return num_blobs | python | def blob_counter(self):
"""Create a blob counter."""
import aa # pylint: disablF0401 # noqa
from ROOT import EventFile # pylint: disable F0401
try:
event_file = EventFile(self.filename)
except Exception:
raise SystemExit("Could not open file")
num_blobs = 0
for event in event_file:
num_blobs += 1
return num_blobs | [
"def",
"blob_counter",
"(",
"self",
")",
":",
"import",
"aa",
"# pylint: disablF0401 # noqa",
"from",
"ROOT",
"import",
"EventFile",
"# pylint: disable F0401",
"try",
":",
"event_file",
"=",
"EventFile",
"(",
"self",
".",
"filename",
")",
"except",
"Exception",
":",
"raise",
"SystemExit",
"(",
"\"Could not open file\"",
")",
"num_blobs",
"=",
"0",
"for",
"event",
"in",
"event_file",
":",
"num_blobs",
"+=",
"1",
"return",
"num_blobs"
] | Create a blob counter. | [
"Create",
"a",
"blob",
"counter",
"."
] | 7a9b59ac899a28775b5bdc5d391d9a5340d08040 | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/io/aanet.py#L248-L262 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.