repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
Erotemic/utool | utool/util_cache.py | tryload_cache_list_with_compute | def tryload_cache_list_with_compute(use_cache, dpath, fname, cfgstr_list,
compute_fn, *args):
"""
tries to load data, but computes it if it can't give a compute function
"""
# Load precomputed values
if use_cache is False:
data_list = [None] * len(cfgstr_list)
ismiss_list = [True] * len(cfgstr_list)
# Don't load or save, just compute
data_list = compute_fn(ismiss_list, *args)
return data_list
else:
data_list, ismiss_list = tryload_cache_list(dpath, fname, cfgstr_list,
verbose=False)
num_total = len(cfgstr_list)
if any(ismiss_list):
# Compute missing values
newdata_list = compute_fn(ismiss_list, *args)
newcfgstr_list = util_list.compress(cfgstr_list, ismiss_list)
index_list = util_list.list_where(ismiss_list)
print('[cache] %d/%d cache hits for %s in %s' % (num_total -
len(index_list),
num_total, fname,
util_path.tail(dpath)))
# Cache write
for newcfgstr, newdata in zip(newcfgstr_list, newdata_list):
save_cache(dpath, fname, newcfgstr, newdata, verbose=False)
# Populate missing result
for index, newdata in zip(index_list, newdata_list):
data_list[index] = newdata
else:
print('[cache] %d/%d cache hits for %s in %s' % (num_total, num_total,
fname,
util_path.tail(dpath)))
return data_list | python | def tryload_cache_list_with_compute(use_cache, dpath, fname, cfgstr_list,
compute_fn, *args):
"""
tries to load data, but computes it if it can't give a compute function
"""
# Load precomputed values
if use_cache is False:
data_list = [None] * len(cfgstr_list)
ismiss_list = [True] * len(cfgstr_list)
# Don't load or save, just compute
data_list = compute_fn(ismiss_list, *args)
return data_list
else:
data_list, ismiss_list = tryload_cache_list(dpath, fname, cfgstr_list,
verbose=False)
num_total = len(cfgstr_list)
if any(ismiss_list):
# Compute missing values
newdata_list = compute_fn(ismiss_list, *args)
newcfgstr_list = util_list.compress(cfgstr_list, ismiss_list)
index_list = util_list.list_where(ismiss_list)
print('[cache] %d/%d cache hits for %s in %s' % (num_total -
len(index_list),
num_total, fname,
util_path.tail(dpath)))
# Cache write
for newcfgstr, newdata in zip(newcfgstr_list, newdata_list):
save_cache(dpath, fname, newcfgstr, newdata, verbose=False)
# Populate missing result
for index, newdata in zip(index_list, newdata_list):
data_list[index] = newdata
else:
print('[cache] %d/%d cache hits for %s in %s' % (num_total, num_total,
fname,
util_path.tail(dpath)))
return data_list | [
"def",
"tryload_cache_list_with_compute",
"(",
"use_cache",
",",
"dpath",
",",
"fname",
",",
"cfgstr_list",
",",
"compute_fn",
",",
"*",
"args",
")",
":",
"# Load precomputed values",
"if",
"use_cache",
"is",
"False",
":",
"data_list",
"=",
"[",
"None",
"]",
"*",
"len",
"(",
"cfgstr_list",
")",
"ismiss_list",
"=",
"[",
"True",
"]",
"*",
"len",
"(",
"cfgstr_list",
")",
"# Don't load or save, just compute",
"data_list",
"=",
"compute_fn",
"(",
"ismiss_list",
",",
"*",
"args",
")",
"return",
"data_list",
"else",
":",
"data_list",
",",
"ismiss_list",
"=",
"tryload_cache_list",
"(",
"dpath",
",",
"fname",
",",
"cfgstr_list",
",",
"verbose",
"=",
"False",
")",
"num_total",
"=",
"len",
"(",
"cfgstr_list",
")",
"if",
"any",
"(",
"ismiss_list",
")",
":",
"# Compute missing values",
"newdata_list",
"=",
"compute_fn",
"(",
"ismiss_list",
",",
"*",
"args",
")",
"newcfgstr_list",
"=",
"util_list",
".",
"compress",
"(",
"cfgstr_list",
",",
"ismiss_list",
")",
"index_list",
"=",
"util_list",
".",
"list_where",
"(",
"ismiss_list",
")",
"print",
"(",
"'[cache] %d/%d cache hits for %s in %s'",
"%",
"(",
"num_total",
"-",
"len",
"(",
"index_list",
")",
",",
"num_total",
",",
"fname",
",",
"util_path",
".",
"tail",
"(",
"dpath",
")",
")",
")",
"# Cache write",
"for",
"newcfgstr",
",",
"newdata",
"in",
"zip",
"(",
"newcfgstr_list",
",",
"newdata_list",
")",
":",
"save_cache",
"(",
"dpath",
",",
"fname",
",",
"newcfgstr",
",",
"newdata",
",",
"verbose",
"=",
"False",
")",
"# Populate missing result",
"for",
"index",
",",
"newdata",
"in",
"zip",
"(",
"index_list",
",",
"newdata_list",
")",
":",
"data_list",
"[",
"index",
"]",
"=",
"newdata",
"else",
":",
"print",
"(",
"'[cache] %d/%d cache hits for %s in %s'",
"%",
"(",
"num_total",
",",
"num_total",
",",
"fname",
",",
"util_path",
".",
"tail",
"(",
"dpath",
")",
")",
")",
"return",
"data_list"
] | tries to load data, but computes it if it can't give a compute function | [
"tries",
"to",
"load",
"data",
"but",
"computes",
"it",
"if",
"it",
"can",
"t",
"give",
"a",
"compute",
"function"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L284-L319 | train |
Erotemic/utool | utool/util_cache.py | to_json | def to_json(val, allow_pickle=False, pretty=False):
r"""
Converts a python object to a JSON string using the utool convention
Args:
val (object):
Returns:
str: json_str
References:
http://stackoverflow.com/questions/11561932/why-does-json-dumpslistnp
CommandLine:
python -m utool.util_cache --test-to_json
python3 -m utool.util_cache --test-to_json
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_cache import * # NOQA
>>> import utool as ut
>>> import numpy as np
>>> import uuid
>>> val = [
>>> '{"foo": "not a dict"}',
>>> 1.3,
>>> [1],
>>> # {1: 1, 2: 2, 3: 3}, cant use integer keys
>>> {1, 2, 3},
>>> slice(1, None, 1),
>>> b'an ascii string',
>>> np.array([1, 2, 3]),
>>> ut.get_zero_uuid(),
>>> ut.LazyDict(x='fo'),
>>> ut.LazyDict,
>>> {'x': {'a', 'b', 'cde'}, 'y': [1]}
>>> ]
>>> #val = ut.LazyDict(x='fo')
>>> allow_pickle = True
>>> if not allow_pickle:
>>> val = val[:-2]
>>> json_str = ut.to_json(val, allow_pickle=allow_pickle)
>>> result = ut.repr3(json_str)
>>> reload_val = ut.from_json(json_str, allow_pickle=allow_pickle)
>>> # Make sure pickle doesnt happen by default
>>> try:
>>> json_str = ut.to_json(val)
>>> assert False or not allow_pickle, 'expected a type error'
>>> except TypeError:
>>> print('Correctly got type error')
>>> try:
>>> json_str = ut.from_json(val)
>>> assert False, 'expected a type error'
>>> except TypeError:
>>> print('Correctly got type error')
>>> print(result)
>>> print('original = ' + ut.repr3(val, nl=1))
>>> print('reconstructed = ' + ut.repr3(reload_val, nl=1))
>>> assert reload_val[6] == val[6].tolist()
>>> assert reload_val[6] is not val[6]
"""
UtoolJSONEncoder = make_utool_json_encoder(allow_pickle)
json_kw = {}
json_kw['cls'] = UtoolJSONEncoder
if pretty:
json_kw['indent'] = 4
json_kw['separators'] = (',', ': ')
json_str = json.dumps(val, **json_kw)
return json_str | python | def to_json(val, allow_pickle=False, pretty=False):
r"""
Converts a python object to a JSON string using the utool convention
Args:
val (object):
Returns:
str: json_str
References:
http://stackoverflow.com/questions/11561932/why-does-json-dumpslistnp
CommandLine:
python -m utool.util_cache --test-to_json
python3 -m utool.util_cache --test-to_json
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_cache import * # NOQA
>>> import utool as ut
>>> import numpy as np
>>> import uuid
>>> val = [
>>> '{"foo": "not a dict"}',
>>> 1.3,
>>> [1],
>>> # {1: 1, 2: 2, 3: 3}, cant use integer keys
>>> {1, 2, 3},
>>> slice(1, None, 1),
>>> b'an ascii string',
>>> np.array([1, 2, 3]),
>>> ut.get_zero_uuid(),
>>> ut.LazyDict(x='fo'),
>>> ut.LazyDict,
>>> {'x': {'a', 'b', 'cde'}, 'y': [1]}
>>> ]
>>> #val = ut.LazyDict(x='fo')
>>> allow_pickle = True
>>> if not allow_pickle:
>>> val = val[:-2]
>>> json_str = ut.to_json(val, allow_pickle=allow_pickle)
>>> result = ut.repr3(json_str)
>>> reload_val = ut.from_json(json_str, allow_pickle=allow_pickle)
>>> # Make sure pickle doesnt happen by default
>>> try:
>>> json_str = ut.to_json(val)
>>> assert False or not allow_pickle, 'expected a type error'
>>> except TypeError:
>>> print('Correctly got type error')
>>> try:
>>> json_str = ut.from_json(val)
>>> assert False, 'expected a type error'
>>> except TypeError:
>>> print('Correctly got type error')
>>> print(result)
>>> print('original = ' + ut.repr3(val, nl=1))
>>> print('reconstructed = ' + ut.repr3(reload_val, nl=1))
>>> assert reload_val[6] == val[6].tolist()
>>> assert reload_val[6] is not val[6]
"""
UtoolJSONEncoder = make_utool_json_encoder(allow_pickle)
json_kw = {}
json_kw['cls'] = UtoolJSONEncoder
if pretty:
json_kw['indent'] = 4
json_kw['separators'] = (',', ': ')
json_str = json.dumps(val, **json_kw)
return json_str | [
"def",
"to_json",
"(",
"val",
",",
"allow_pickle",
"=",
"False",
",",
"pretty",
"=",
"False",
")",
":",
"UtoolJSONEncoder",
"=",
"make_utool_json_encoder",
"(",
"allow_pickle",
")",
"json_kw",
"=",
"{",
"}",
"json_kw",
"[",
"'cls'",
"]",
"=",
"UtoolJSONEncoder",
"if",
"pretty",
":",
"json_kw",
"[",
"'indent'",
"]",
"=",
"4",
"json_kw",
"[",
"'separators'",
"]",
"=",
"(",
"','",
",",
"': '",
")",
"json_str",
"=",
"json",
".",
"dumps",
"(",
"val",
",",
"*",
"*",
"json_kw",
")",
"return",
"json_str"
] | r"""
Converts a python object to a JSON string using the utool convention
Args:
val (object):
Returns:
str: json_str
References:
http://stackoverflow.com/questions/11561932/why-does-json-dumpslistnp
CommandLine:
python -m utool.util_cache --test-to_json
python3 -m utool.util_cache --test-to_json
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_cache import * # NOQA
>>> import utool as ut
>>> import numpy as np
>>> import uuid
>>> val = [
>>> '{"foo": "not a dict"}',
>>> 1.3,
>>> [1],
>>> # {1: 1, 2: 2, 3: 3}, cant use integer keys
>>> {1, 2, 3},
>>> slice(1, None, 1),
>>> b'an ascii string',
>>> np.array([1, 2, 3]),
>>> ut.get_zero_uuid(),
>>> ut.LazyDict(x='fo'),
>>> ut.LazyDict,
>>> {'x': {'a', 'b', 'cde'}, 'y': [1]}
>>> ]
>>> #val = ut.LazyDict(x='fo')
>>> allow_pickle = True
>>> if not allow_pickle:
>>> val = val[:-2]
>>> json_str = ut.to_json(val, allow_pickle=allow_pickle)
>>> result = ut.repr3(json_str)
>>> reload_val = ut.from_json(json_str, allow_pickle=allow_pickle)
>>> # Make sure pickle doesnt happen by default
>>> try:
>>> json_str = ut.to_json(val)
>>> assert False or not allow_pickle, 'expected a type error'
>>> except TypeError:
>>> print('Correctly got type error')
>>> try:
>>> json_str = ut.from_json(val)
>>> assert False, 'expected a type error'
>>> except TypeError:
>>> print('Correctly got type error')
>>> print(result)
>>> print('original = ' + ut.repr3(val, nl=1))
>>> print('reconstructed = ' + ut.repr3(reload_val, nl=1))
>>> assert reload_val[6] == val[6].tolist()
>>> assert reload_val[6] is not val[6] | [
"r",
"Converts",
"a",
"python",
"object",
"to",
"a",
"JSON",
"string",
"using",
"the",
"utool",
"convention"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L532-L600 | train |
Erotemic/utool | utool/util_cache.py | from_json | def from_json(json_str, allow_pickle=False):
"""
Decodes a JSON object specified in the utool convention
Args:
json_str (str):
allow_pickle (bool): (default = False)
Returns:
object: val
CommandLine:
python -m utool.util_cache from_json --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_cache import * # NOQA
>>> import utool as ut
>>> json_str = 'just a normal string'
>>> json_str = '["just a normal string"]'
>>> allow_pickle = False
>>> val = from_json(json_str, allow_pickle)
>>> result = ('val = %s' % (ut.repr2(val),))
>>> print(result)
"""
if six.PY3:
if isinstance(json_str, bytes):
json_str = json_str.decode('utf-8')
UtoolJSONEncoder = make_utool_json_encoder(allow_pickle)
object_hook = UtoolJSONEncoder._json_object_hook
val = json.loads(json_str, object_hook=object_hook)
return val | python | def from_json(json_str, allow_pickle=False):
"""
Decodes a JSON object specified in the utool convention
Args:
json_str (str):
allow_pickle (bool): (default = False)
Returns:
object: val
CommandLine:
python -m utool.util_cache from_json --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_cache import * # NOQA
>>> import utool as ut
>>> json_str = 'just a normal string'
>>> json_str = '["just a normal string"]'
>>> allow_pickle = False
>>> val = from_json(json_str, allow_pickle)
>>> result = ('val = %s' % (ut.repr2(val),))
>>> print(result)
"""
if six.PY3:
if isinstance(json_str, bytes):
json_str = json_str.decode('utf-8')
UtoolJSONEncoder = make_utool_json_encoder(allow_pickle)
object_hook = UtoolJSONEncoder._json_object_hook
val = json.loads(json_str, object_hook=object_hook)
return val | [
"def",
"from_json",
"(",
"json_str",
",",
"allow_pickle",
"=",
"False",
")",
":",
"if",
"six",
".",
"PY3",
":",
"if",
"isinstance",
"(",
"json_str",
",",
"bytes",
")",
":",
"json_str",
"=",
"json_str",
".",
"decode",
"(",
"'utf-8'",
")",
"UtoolJSONEncoder",
"=",
"make_utool_json_encoder",
"(",
"allow_pickle",
")",
"object_hook",
"=",
"UtoolJSONEncoder",
".",
"_json_object_hook",
"val",
"=",
"json",
".",
"loads",
"(",
"json_str",
",",
"object_hook",
"=",
"object_hook",
")",
"return",
"val"
] | Decodes a JSON object specified in the utool convention
Args:
json_str (str):
allow_pickle (bool): (default = False)
Returns:
object: val
CommandLine:
python -m utool.util_cache from_json --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_cache import * # NOQA
>>> import utool as ut
>>> json_str = 'just a normal string'
>>> json_str = '["just a normal string"]'
>>> allow_pickle = False
>>> val = from_json(json_str, allow_pickle)
>>> result = ('val = %s' % (ut.repr2(val),))
>>> print(result) | [
"Decodes",
"a",
"JSON",
"object",
"specified",
"in",
"the",
"utool",
"convention"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L603-L634 | train |
Erotemic/utool | utool/util_cache.py | cachestr_repr | def cachestr_repr(val):
"""
Representation of an object as a cache string.
"""
try:
memview = memoryview(val)
return memview.tobytes()
except Exception:
try:
return to_json(val)
except Exception:
# SUPER HACK
if repr(val.__class__) == "<class 'ibeis.control.IBEISControl.IBEISController'>":
return val.get_dbname() | python | def cachestr_repr(val):
"""
Representation of an object as a cache string.
"""
try:
memview = memoryview(val)
return memview.tobytes()
except Exception:
try:
return to_json(val)
except Exception:
# SUPER HACK
if repr(val.__class__) == "<class 'ibeis.control.IBEISControl.IBEISController'>":
return val.get_dbname() | [
"def",
"cachestr_repr",
"(",
"val",
")",
":",
"try",
":",
"memview",
"=",
"memoryview",
"(",
"val",
")",
"return",
"memview",
".",
"tobytes",
"(",
")",
"except",
"Exception",
":",
"try",
":",
"return",
"to_json",
"(",
"val",
")",
"except",
"Exception",
":",
"# SUPER HACK",
"if",
"repr",
"(",
"val",
".",
"__class__",
")",
"==",
"\"<class 'ibeis.control.IBEISControl.IBEISController'>\"",
":",
"return",
"val",
".",
"get_dbname",
"(",
")"
] | Representation of an object as a cache string. | [
"Representation",
"of",
"an",
"object",
"as",
"a",
"cache",
"string",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L678-L691 | train |
Erotemic/utool | utool/util_cache.py | cached_func | def cached_func(fname=None, cache_dir='default', appname='utool', key_argx=None,
key_kwds=None, use_cache=None, verbose=None):
r"""
Wraps a function with a Cacher object
uses a hash of arguments as input
Args:
fname (str): file name (defaults to function name)
cache_dir (unicode): (default = u'default')
appname (unicode): (default = u'utool')
key_argx (None): (default = None)
key_kwds (None): (default = None)
use_cache (bool): turns on disk based caching(default = None)
CommandLine:
python -m utool.util_cache --exec-cached_func
Example:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> def costly_func(a, b, c='d', *args, **kwargs):
... return ([a] * b, c, args, kwargs)
>>> ans0 = costly_func(41, 3)
>>> ans1 = costly_func(42, 3)
>>> closure_ = ut.cached_func('costly_func', appname='utool_test',
>>> key_argx=[0, 1])
>>> efficient_func = closure_(costly_func)
>>> ans2 = efficient_func(42, 3)
>>> ans3 = efficient_func(42, 3)
>>> ans4 = efficient_func(41, 3)
>>> ans5 = efficient_func(41, 3)
>>> assert ans1 == ans2
>>> assert ans2 == ans3
>>> assert ans5 == ans4
>>> assert ans5 == ans0
>>> assert ans1 != ans0
"""
if verbose is None:
verbose = VERBOSE_CACHE
def cached_closure(func):
from utool import util_decor
import utool as ut
fname_ = util_inspect.get_funcname(func) if fname is None else fname
kwdefaults = util_inspect.get_kwdefaults(func)
argnames = util_inspect.get_argnames(func)
if ut.is_method(func):
# ignore self for methods
argnames = argnames[1:]
cacher = Cacher(fname_, cache_dir=cache_dir, appname=appname,
verbose=verbose)
if use_cache is None:
use_cache_ = not util_arg.get_argflag('--nocache-' + fname_)
else:
use_cache_ = use_cache
#_dbgdict = dict(fname_=fname_, key_kwds=key_kwds, appname=appname,
# key_argx=key_argx, use_cache_=use_cache_)
#@functools.wraps(func)
def cached_wraper(*args, **kwargs):
"""
Cached Wrapper Function
Additional Kwargs:
use_cache (bool) : enables cache
"""
try:
if verbose > 2:
print('[util_cache] computing cached function fname_=%s' %
( fname_,))
# Implicitly adds use_cache to kwargs
cfgstr = get_cfgstr_from_args(func, args, kwargs, key_argx,
key_kwds, kwdefaults, argnames)
if util_cplat.WIN32:
# remove potentially invalid chars
cfgstr = '_' + util_hash.hashstr27(cfgstr)
assert cfgstr is not None, 'cfgstr=%r cannot be None' % (cfgstr,)
use_cache__ = kwargs.pop('use_cache', use_cache_)
if use_cache__:
# Make cfgstr from specified input
data = cacher.tryload(cfgstr)
if data is not None:
return data
# Cached missed compute function
data = func(*args, **kwargs)
# Cache save
#if use_cache__:
# TODO: save_cache
cacher.save(data, cfgstr)
return data
#except ValueError as ex:
# handle protocal error
except Exception as ex:
from utool import util_dbg
_dbgdict2 = dict(key_argx=key_argx, lenargs=len(args),
lenkw=len(kwargs),)
msg = '\n'.join([
'+--- UTOOL --- ERROR IN CACHED FUNCTION',
#'dbgdict = ' + utool.repr4(_dbgdict),
'dbgdict2 = ' + util_str.repr4(_dbgdict2),
])
util_dbg.printex(ex, msg)
raise
# Give function a handle to the cacher object
cached_wraper = util_decor.preserve_sig(cached_wraper, func)
cached_wraper.cacher = cacher
return cached_wraper
return cached_closure | python | def cached_func(fname=None, cache_dir='default', appname='utool', key_argx=None,
key_kwds=None, use_cache=None, verbose=None):
r"""
Wraps a function with a Cacher object
uses a hash of arguments as input
Args:
fname (str): file name (defaults to function name)
cache_dir (unicode): (default = u'default')
appname (unicode): (default = u'utool')
key_argx (None): (default = None)
key_kwds (None): (default = None)
use_cache (bool): turns on disk based caching(default = None)
CommandLine:
python -m utool.util_cache --exec-cached_func
Example:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> def costly_func(a, b, c='d', *args, **kwargs):
... return ([a] * b, c, args, kwargs)
>>> ans0 = costly_func(41, 3)
>>> ans1 = costly_func(42, 3)
>>> closure_ = ut.cached_func('costly_func', appname='utool_test',
>>> key_argx=[0, 1])
>>> efficient_func = closure_(costly_func)
>>> ans2 = efficient_func(42, 3)
>>> ans3 = efficient_func(42, 3)
>>> ans4 = efficient_func(41, 3)
>>> ans5 = efficient_func(41, 3)
>>> assert ans1 == ans2
>>> assert ans2 == ans3
>>> assert ans5 == ans4
>>> assert ans5 == ans0
>>> assert ans1 != ans0
"""
if verbose is None:
verbose = VERBOSE_CACHE
def cached_closure(func):
from utool import util_decor
import utool as ut
fname_ = util_inspect.get_funcname(func) if fname is None else fname
kwdefaults = util_inspect.get_kwdefaults(func)
argnames = util_inspect.get_argnames(func)
if ut.is_method(func):
# ignore self for methods
argnames = argnames[1:]
cacher = Cacher(fname_, cache_dir=cache_dir, appname=appname,
verbose=verbose)
if use_cache is None:
use_cache_ = not util_arg.get_argflag('--nocache-' + fname_)
else:
use_cache_ = use_cache
#_dbgdict = dict(fname_=fname_, key_kwds=key_kwds, appname=appname,
# key_argx=key_argx, use_cache_=use_cache_)
#@functools.wraps(func)
def cached_wraper(*args, **kwargs):
"""
Cached Wrapper Function
Additional Kwargs:
use_cache (bool) : enables cache
"""
try:
if verbose > 2:
print('[util_cache] computing cached function fname_=%s' %
( fname_,))
# Implicitly adds use_cache to kwargs
cfgstr = get_cfgstr_from_args(func, args, kwargs, key_argx,
key_kwds, kwdefaults, argnames)
if util_cplat.WIN32:
# remove potentially invalid chars
cfgstr = '_' + util_hash.hashstr27(cfgstr)
assert cfgstr is not None, 'cfgstr=%r cannot be None' % (cfgstr,)
use_cache__ = kwargs.pop('use_cache', use_cache_)
if use_cache__:
# Make cfgstr from specified input
data = cacher.tryload(cfgstr)
if data is not None:
return data
# Cached missed compute function
data = func(*args, **kwargs)
# Cache save
#if use_cache__:
# TODO: save_cache
cacher.save(data, cfgstr)
return data
#except ValueError as ex:
# handle protocal error
except Exception as ex:
from utool import util_dbg
_dbgdict2 = dict(key_argx=key_argx, lenargs=len(args),
lenkw=len(kwargs),)
msg = '\n'.join([
'+--- UTOOL --- ERROR IN CACHED FUNCTION',
#'dbgdict = ' + utool.repr4(_dbgdict),
'dbgdict2 = ' + util_str.repr4(_dbgdict2),
])
util_dbg.printex(ex, msg)
raise
# Give function a handle to the cacher object
cached_wraper = util_decor.preserve_sig(cached_wraper, func)
cached_wraper.cacher = cacher
return cached_wraper
return cached_closure | [
"def",
"cached_func",
"(",
"fname",
"=",
"None",
",",
"cache_dir",
"=",
"'default'",
",",
"appname",
"=",
"'utool'",
",",
"key_argx",
"=",
"None",
",",
"key_kwds",
"=",
"None",
",",
"use_cache",
"=",
"None",
",",
"verbose",
"=",
"None",
")",
":",
"if",
"verbose",
"is",
"None",
":",
"verbose",
"=",
"VERBOSE_CACHE",
"def",
"cached_closure",
"(",
"func",
")",
":",
"from",
"utool",
"import",
"util_decor",
"import",
"utool",
"as",
"ut",
"fname_",
"=",
"util_inspect",
".",
"get_funcname",
"(",
"func",
")",
"if",
"fname",
"is",
"None",
"else",
"fname",
"kwdefaults",
"=",
"util_inspect",
".",
"get_kwdefaults",
"(",
"func",
")",
"argnames",
"=",
"util_inspect",
".",
"get_argnames",
"(",
"func",
")",
"if",
"ut",
".",
"is_method",
"(",
"func",
")",
":",
"# ignore self for methods",
"argnames",
"=",
"argnames",
"[",
"1",
":",
"]",
"cacher",
"=",
"Cacher",
"(",
"fname_",
",",
"cache_dir",
"=",
"cache_dir",
",",
"appname",
"=",
"appname",
",",
"verbose",
"=",
"verbose",
")",
"if",
"use_cache",
"is",
"None",
":",
"use_cache_",
"=",
"not",
"util_arg",
".",
"get_argflag",
"(",
"'--nocache-'",
"+",
"fname_",
")",
"else",
":",
"use_cache_",
"=",
"use_cache",
"#_dbgdict = dict(fname_=fname_, key_kwds=key_kwds, appname=appname,",
"# key_argx=key_argx, use_cache_=use_cache_)",
"#@functools.wraps(func)",
"def",
"cached_wraper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"\n Cached Wrapper Function\n\n Additional Kwargs:\n use_cache (bool) : enables cache\n \"\"\"",
"try",
":",
"if",
"verbose",
">",
"2",
":",
"print",
"(",
"'[util_cache] computing cached function fname_=%s'",
"%",
"(",
"fname_",
",",
")",
")",
"# Implicitly adds use_cache to kwargs",
"cfgstr",
"=",
"get_cfgstr_from_args",
"(",
"func",
",",
"args",
",",
"kwargs",
",",
"key_argx",
",",
"key_kwds",
",",
"kwdefaults",
",",
"argnames",
")",
"if",
"util_cplat",
".",
"WIN32",
":",
"# remove potentially invalid chars",
"cfgstr",
"=",
"'_'",
"+",
"util_hash",
".",
"hashstr27",
"(",
"cfgstr",
")",
"assert",
"cfgstr",
"is",
"not",
"None",
",",
"'cfgstr=%r cannot be None'",
"%",
"(",
"cfgstr",
",",
")",
"use_cache__",
"=",
"kwargs",
".",
"pop",
"(",
"'use_cache'",
",",
"use_cache_",
")",
"if",
"use_cache__",
":",
"# Make cfgstr from specified input",
"data",
"=",
"cacher",
".",
"tryload",
"(",
"cfgstr",
")",
"if",
"data",
"is",
"not",
"None",
":",
"return",
"data",
"# Cached missed compute function",
"data",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Cache save",
"#if use_cache__:",
"# TODO: save_cache",
"cacher",
".",
"save",
"(",
"data",
",",
"cfgstr",
")",
"return",
"data",
"#except ValueError as ex:",
"# handle protocal error",
"except",
"Exception",
"as",
"ex",
":",
"from",
"utool",
"import",
"util_dbg",
"_dbgdict2",
"=",
"dict",
"(",
"key_argx",
"=",
"key_argx",
",",
"lenargs",
"=",
"len",
"(",
"args",
")",
",",
"lenkw",
"=",
"len",
"(",
"kwargs",
")",
",",
")",
"msg",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"'+--- UTOOL --- ERROR IN CACHED FUNCTION'",
",",
"#'dbgdict = ' + utool.repr4(_dbgdict),",
"'dbgdict2 = '",
"+",
"util_str",
".",
"repr4",
"(",
"_dbgdict2",
")",
",",
"]",
")",
"util_dbg",
".",
"printex",
"(",
"ex",
",",
"msg",
")",
"raise",
"# Give function a handle to the cacher object",
"cached_wraper",
"=",
"util_decor",
".",
"preserve_sig",
"(",
"cached_wraper",
",",
"func",
")",
"cached_wraper",
".",
"cacher",
"=",
"cacher",
"return",
"cached_wraper",
"return",
"cached_closure"
] | r"""
Wraps a function with a Cacher object
uses a hash of arguments as input
Args:
fname (str): file name (defaults to function name)
cache_dir (unicode): (default = u'default')
appname (unicode): (default = u'utool')
key_argx (None): (default = None)
key_kwds (None): (default = None)
use_cache (bool): turns on disk based caching(default = None)
CommandLine:
python -m utool.util_cache --exec-cached_func
Example:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> def costly_func(a, b, c='d', *args, **kwargs):
... return ([a] * b, c, args, kwargs)
>>> ans0 = costly_func(41, 3)
>>> ans1 = costly_func(42, 3)
>>> closure_ = ut.cached_func('costly_func', appname='utool_test',
>>> key_argx=[0, 1])
>>> efficient_func = closure_(costly_func)
>>> ans2 = efficient_func(42, 3)
>>> ans3 = efficient_func(42, 3)
>>> ans4 = efficient_func(41, 3)
>>> ans5 = efficient_func(41, 3)
>>> assert ans1 == ans2
>>> assert ans2 == ans3
>>> assert ans5 == ans4
>>> assert ans5 == ans0
>>> assert ans1 != ans0 | [
"r",
"Wraps",
"a",
"function",
"with",
"a",
"Cacher",
"object"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L778-L884 | train |
Erotemic/utool | utool/util_cache.py | get_global_shelf_fpath | def get_global_shelf_fpath(appname='default', ensure=False):
""" Returns the filepath to the global shelf """
global_cache_dir = get_global_cache_dir(appname, ensure=ensure)
shelf_fpath = join(global_cache_dir, meta_util_constants.global_cache_fname)
return shelf_fpath | python | def get_global_shelf_fpath(appname='default', ensure=False):
""" Returns the filepath to the global shelf """
global_cache_dir = get_global_cache_dir(appname, ensure=ensure)
shelf_fpath = join(global_cache_dir, meta_util_constants.global_cache_fname)
return shelf_fpath | [
"def",
"get_global_shelf_fpath",
"(",
"appname",
"=",
"'default'",
",",
"ensure",
"=",
"False",
")",
":",
"global_cache_dir",
"=",
"get_global_cache_dir",
"(",
"appname",
",",
"ensure",
"=",
"ensure",
")",
"shelf_fpath",
"=",
"join",
"(",
"global_cache_dir",
",",
"meta_util_constants",
".",
"global_cache_fname",
")",
"return",
"shelf_fpath"
] | Returns the filepath to the global shelf | [
"Returns",
"the",
"filepath",
"to",
"the",
"global",
"shelf"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L906-L910 | train |
Erotemic/utool | utool/util_cache.py | global_cache_write | def global_cache_write(key, val, appname='default'):
""" Writes cache files to a safe place in each operating system """
with GlobalShelfContext(appname) as shelf:
shelf[key] = val | python | def global_cache_write(key, val, appname='default'):
""" Writes cache files to a safe place in each operating system """
with GlobalShelfContext(appname) as shelf:
shelf[key] = val | [
"def",
"global_cache_write",
"(",
"key",
",",
"val",
",",
"appname",
"=",
"'default'",
")",
":",
"with",
"GlobalShelfContext",
"(",
"appname",
")",
"as",
"shelf",
":",
"shelf",
"[",
"key",
"]",
"=",
"val"
] | Writes cache files to a safe place in each operating system | [
"Writes",
"cache",
"files",
"to",
"a",
"safe",
"place",
"in",
"each",
"operating",
"system"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L983-L986 | train |
Erotemic/utool | utool/util_cache.py | delete_global_cache | def delete_global_cache(appname='default'):
""" Reads cache files to a safe place in each operating system """
#close_global_shelf(appname)
shelf_fpath = get_global_shelf_fpath(appname)
util_path.remove_file(shelf_fpath, verbose=True, dryrun=False) | python | def delete_global_cache(appname='default'):
""" Reads cache files to a safe place in each operating system """
#close_global_shelf(appname)
shelf_fpath = get_global_shelf_fpath(appname)
util_path.remove_file(shelf_fpath, verbose=True, dryrun=False) | [
"def",
"delete_global_cache",
"(",
"appname",
"=",
"'default'",
")",
":",
"#close_global_shelf(appname)",
"shelf_fpath",
"=",
"get_global_shelf_fpath",
"(",
"appname",
")",
"util_path",
".",
"remove_file",
"(",
"shelf_fpath",
",",
"verbose",
"=",
"True",
",",
"dryrun",
"=",
"False",
")"
] | Reads cache files to a safe place in each operating system | [
"Reads",
"cache",
"files",
"to",
"a",
"safe",
"place",
"in",
"each",
"operating",
"system"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L989-L993 | train |
Erotemic/utool | utool/util_cache.py | Cacher.existing_versions | def existing_versions(self):
"""
Returns data with different cfgstr values that were previously computed
with this cacher.
"""
import glob
pattern = self.fname + '_*' + self.ext
for fname in glob.glob1(self.dpath, pattern):
fpath = join(self.dpath, fname)
yield fpath | python | def existing_versions(self):
"""
Returns data with different cfgstr values that were previously computed
with this cacher.
"""
import glob
pattern = self.fname + '_*' + self.ext
for fname in glob.glob1(self.dpath, pattern):
fpath = join(self.dpath, fname)
yield fpath | [
"def",
"existing_versions",
"(",
"self",
")",
":",
"import",
"glob",
"pattern",
"=",
"self",
".",
"fname",
"+",
"'_*'",
"+",
"self",
".",
"ext",
"for",
"fname",
"in",
"glob",
".",
"glob1",
"(",
"self",
".",
"dpath",
",",
"pattern",
")",
":",
"fpath",
"=",
"join",
"(",
"self",
".",
"dpath",
",",
"fname",
")",
"yield",
"fpath"
] | Returns data with different cfgstr values that were previously computed
with this cacher. | [
"Returns",
"data",
"with",
"different",
"cfgstr",
"values",
"that",
"were",
"previously",
"computed",
"with",
"this",
"cacher",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L345-L354 | train |
Erotemic/utool | utool/util_cache.py | Cacher.tryload | def tryload(self, cfgstr=None):
"""
Like load, but returns None if the load fails
"""
if cfgstr is None:
cfgstr = self.cfgstr
if cfgstr is None:
import warnings
warnings.warn('No cfgstr given in Cacher constructor or call')
cfgstr = ''
# assert cfgstr is not None, (
# 'must specify cfgstr in constructor or call')
if not self.enabled:
if self.verbose > 0:
print('[cache] ... %s Cacher disabled' % (self.fname))
return None
try:
if self.verbose > 1:
print('[cache] tryload fname=%s' % (self.fname,))
# if self.verbose > 2:
# print('[cache] cfgstr=%r' % (cfgstr,))
return self.load(cfgstr)
except IOError:
if self.verbose > 0:
print('[cache] ... %s Cacher miss' % (self.fname)) | python | def tryload(self, cfgstr=None):
"""
Like load, but returns None if the load fails
"""
if cfgstr is None:
cfgstr = self.cfgstr
if cfgstr is None:
import warnings
warnings.warn('No cfgstr given in Cacher constructor or call')
cfgstr = ''
# assert cfgstr is not None, (
# 'must specify cfgstr in constructor or call')
if not self.enabled:
if self.verbose > 0:
print('[cache] ... %s Cacher disabled' % (self.fname))
return None
try:
if self.verbose > 1:
print('[cache] tryload fname=%s' % (self.fname,))
# if self.verbose > 2:
# print('[cache] cfgstr=%r' % (cfgstr,))
return self.load(cfgstr)
except IOError:
if self.verbose > 0:
print('[cache] ... %s Cacher miss' % (self.fname)) | [
"def",
"tryload",
"(",
"self",
",",
"cfgstr",
"=",
"None",
")",
":",
"if",
"cfgstr",
"is",
"None",
":",
"cfgstr",
"=",
"self",
".",
"cfgstr",
"if",
"cfgstr",
"is",
"None",
":",
"import",
"warnings",
"warnings",
".",
"warn",
"(",
"'No cfgstr given in Cacher constructor or call'",
")",
"cfgstr",
"=",
"''",
"# assert cfgstr is not None, (",
"# 'must specify cfgstr in constructor or call')",
"if",
"not",
"self",
".",
"enabled",
":",
"if",
"self",
".",
"verbose",
">",
"0",
":",
"print",
"(",
"'[cache] ... %s Cacher disabled'",
"%",
"(",
"self",
".",
"fname",
")",
")",
"return",
"None",
"try",
":",
"if",
"self",
".",
"verbose",
">",
"1",
":",
"print",
"(",
"'[cache] tryload fname=%s'",
"%",
"(",
"self",
".",
"fname",
",",
")",
")",
"# if self.verbose > 2:",
"# print('[cache] cfgstr=%r' % (cfgstr,))",
"return",
"self",
".",
"load",
"(",
"cfgstr",
")",
"except",
"IOError",
":",
"if",
"self",
".",
"verbose",
">",
"0",
":",
"print",
"(",
"'[cache] ... %s Cacher miss'",
"%",
"(",
"self",
".",
"fname",
")",
")"
] | Like load, but returns None if the load fails | [
"Like",
"load",
"but",
"returns",
"None",
"if",
"the",
"load",
"fails"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L375-L399 | train |
Erotemic/utool | utool/util_cache.py | Cachable.fuzzyload | def fuzzyload(self, cachedir=None, partial_cfgstr='', **kwargs):
"""
Try and load from a partially specified configuration string
"""
valid_targets = self.glob_valid_targets(cachedir, partial_cfgstr)
if len(valid_targets) != 1:
import utool as ut
msg = 'need to further specify target. valid_targets=%s' % (ut.repr3(valid_targets,))
raise ValueError(msg)
fpath = valid_targets[0]
self.load(fpath=fpath, **kwargs) | python | def fuzzyload(self, cachedir=None, partial_cfgstr='', **kwargs):
"""
Try and load from a partially specified configuration string
"""
valid_targets = self.glob_valid_targets(cachedir, partial_cfgstr)
if len(valid_targets) != 1:
import utool as ut
msg = 'need to further specify target. valid_targets=%s' % (ut.repr3(valid_targets,))
raise ValueError(msg)
fpath = valid_targets[0]
self.load(fpath=fpath, **kwargs) | [
"def",
"fuzzyload",
"(",
"self",
",",
"cachedir",
"=",
"None",
",",
"partial_cfgstr",
"=",
"''",
",",
"*",
"*",
"kwargs",
")",
":",
"valid_targets",
"=",
"self",
".",
"glob_valid_targets",
"(",
"cachedir",
",",
"partial_cfgstr",
")",
"if",
"len",
"(",
"valid_targets",
")",
"!=",
"1",
":",
"import",
"utool",
"as",
"ut",
"msg",
"=",
"'need to further specify target. valid_targets=%s'",
"%",
"(",
"ut",
".",
"repr3",
"(",
"valid_targets",
",",
")",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"fpath",
"=",
"valid_targets",
"[",
"0",
"]",
"self",
".",
"load",
"(",
"fpath",
"=",
"fpath",
",",
"*",
"*",
"kwargs",
")"
] | Try and load from a partially specified configuration string | [
"Try",
"and",
"load",
"from",
"a",
"partially",
"specified",
"configuration",
"string"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L1108-L1118 | train |
Erotemic/utool | utool/util_cache.py | Cachable.load | def load(self, cachedir=None, cfgstr=None, fpath=None, verbose=None,
quiet=QUIET, ignore_keys=None):
"""
Loads the result from the given database
"""
if verbose is None:
verbose = getattr(self, 'verbose', VERBOSE)
if fpath is None:
fpath = self.get_fpath(cachedir, cfgstr=cfgstr)
if verbose:
print('[Cachable] cache tryload: %r' % (basename(fpath),))
try:
self._unsafe_load(fpath, ignore_keys)
if verbose:
print('... self cache hit: %r' % (basename(fpath),))
except ValueError as ex:
import utool as ut
msg = '[!Cachable] Cachable(%s) is likely corrupt' % (self.get_cfgstr())
print('CORRUPT fpath = %s' % (fpath,))
ut.printex(ex, msg, iswarning=True)
raise
#except BadZipFile as ex:
except zipfile.error as ex:
import utool as ut
msg = '[!Cachable] Cachable(%s) has bad zipfile' % (self.get_cfgstr())
print('CORRUPT fpath = %s' % (fpath,))
ut.printex(ex, msg, iswarning=True)
raise
#if exists(fpath):
# #print('[Cachable] Removing corrupted file: %r' % fpath)
# #os.remove(fpath)
# raise hsexcept.HotsNeedsRecomputeError(msg)
#else:
# raise Exception(msg)
except IOError as ex:
import utool as ut
if not exists(fpath):
msg = '... self cache miss: %r' % (basename(fpath),)
if verbose:
print(msg)
raise
print('CORRUPT fpath = %s' % (fpath,))
msg = '[!Cachable] Cachable(%s) is corrupt' % (self.get_cfgstr())
ut.printex(ex, msg, iswarning=True)
raise
except Exception as ex:
import utool as ut
ut.printex(ex, 'unknown exception while loading query result')
raise | python | def load(self, cachedir=None, cfgstr=None, fpath=None, verbose=None,
quiet=QUIET, ignore_keys=None):
"""
Loads the result from the given database
"""
if verbose is None:
verbose = getattr(self, 'verbose', VERBOSE)
if fpath is None:
fpath = self.get_fpath(cachedir, cfgstr=cfgstr)
if verbose:
print('[Cachable] cache tryload: %r' % (basename(fpath),))
try:
self._unsafe_load(fpath, ignore_keys)
if verbose:
print('... self cache hit: %r' % (basename(fpath),))
except ValueError as ex:
import utool as ut
msg = '[!Cachable] Cachable(%s) is likely corrupt' % (self.get_cfgstr())
print('CORRUPT fpath = %s' % (fpath,))
ut.printex(ex, msg, iswarning=True)
raise
#except BadZipFile as ex:
except zipfile.error as ex:
import utool as ut
msg = '[!Cachable] Cachable(%s) has bad zipfile' % (self.get_cfgstr())
print('CORRUPT fpath = %s' % (fpath,))
ut.printex(ex, msg, iswarning=True)
raise
#if exists(fpath):
# #print('[Cachable] Removing corrupted file: %r' % fpath)
# #os.remove(fpath)
# raise hsexcept.HotsNeedsRecomputeError(msg)
#else:
# raise Exception(msg)
except IOError as ex:
import utool as ut
if not exists(fpath):
msg = '... self cache miss: %r' % (basename(fpath),)
if verbose:
print(msg)
raise
print('CORRUPT fpath = %s' % (fpath,))
msg = '[!Cachable] Cachable(%s) is corrupt' % (self.get_cfgstr())
ut.printex(ex, msg, iswarning=True)
raise
except Exception as ex:
import utool as ut
ut.printex(ex, 'unknown exception while loading query result')
raise | [
"def",
"load",
"(",
"self",
",",
"cachedir",
"=",
"None",
",",
"cfgstr",
"=",
"None",
",",
"fpath",
"=",
"None",
",",
"verbose",
"=",
"None",
",",
"quiet",
"=",
"QUIET",
",",
"ignore_keys",
"=",
"None",
")",
":",
"if",
"verbose",
"is",
"None",
":",
"verbose",
"=",
"getattr",
"(",
"self",
",",
"'verbose'",
",",
"VERBOSE",
")",
"if",
"fpath",
"is",
"None",
":",
"fpath",
"=",
"self",
".",
"get_fpath",
"(",
"cachedir",
",",
"cfgstr",
"=",
"cfgstr",
")",
"if",
"verbose",
":",
"print",
"(",
"'[Cachable] cache tryload: %r'",
"%",
"(",
"basename",
"(",
"fpath",
")",
",",
")",
")",
"try",
":",
"self",
".",
"_unsafe_load",
"(",
"fpath",
",",
"ignore_keys",
")",
"if",
"verbose",
":",
"print",
"(",
"'... self cache hit: %r'",
"%",
"(",
"basename",
"(",
"fpath",
")",
",",
")",
")",
"except",
"ValueError",
"as",
"ex",
":",
"import",
"utool",
"as",
"ut",
"msg",
"=",
"'[!Cachable] Cachable(%s) is likely corrupt'",
"%",
"(",
"self",
".",
"get_cfgstr",
"(",
")",
")",
"print",
"(",
"'CORRUPT fpath = %s'",
"%",
"(",
"fpath",
",",
")",
")",
"ut",
".",
"printex",
"(",
"ex",
",",
"msg",
",",
"iswarning",
"=",
"True",
")",
"raise",
"#except BadZipFile as ex:",
"except",
"zipfile",
".",
"error",
"as",
"ex",
":",
"import",
"utool",
"as",
"ut",
"msg",
"=",
"'[!Cachable] Cachable(%s) has bad zipfile'",
"%",
"(",
"self",
".",
"get_cfgstr",
"(",
")",
")",
"print",
"(",
"'CORRUPT fpath = %s'",
"%",
"(",
"fpath",
",",
")",
")",
"ut",
".",
"printex",
"(",
"ex",
",",
"msg",
",",
"iswarning",
"=",
"True",
")",
"raise",
"#if exists(fpath):",
"# #print('[Cachable] Removing corrupted file: %r' % fpath)",
"# #os.remove(fpath)",
"# raise hsexcept.HotsNeedsRecomputeError(msg)",
"#else:",
"# raise Exception(msg)",
"except",
"IOError",
"as",
"ex",
":",
"import",
"utool",
"as",
"ut",
"if",
"not",
"exists",
"(",
"fpath",
")",
":",
"msg",
"=",
"'... self cache miss: %r'",
"%",
"(",
"basename",
"(",
"fpath",
")",
",",
")",
"if",
"verbose",
":",
"print",
"(",
"msg",
")",
"raise",
"print",
"(",
"'CORRUPT fpath = %s'",
"%",
"(",
"fpath",
",",
")",
")",
"msg",
"=",
"'[!Cachable] Cachable(%s) is corrupt'",
"%",
"(",
"self",
".",
"get_cfgstr",
"(",
")",
")",
"ut",
".",
"printex",
"(",
"ex",
",",
"msg",
",",
"iswarning",
"=",
"True",
")",
"raise",
"except",
"Exception",
"as",
"ex",
":",
"import",
"utool",
"as",
"ut",
"ut",
".",
"printex",
"(",
"ex",
",",
"'unknown exception while loading query result'",
")",
"raise"
] | Loads the result from the given database | [
"Loads",
"the",
"result",
"from",
"the",
"given",
"database"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cache.py#L1121-L1169 | train |
Erotemic/utool | utool/util_path.py | truepath_relative | def truepath_relative(path, otherpath=None):
""" Normalizes and returns absolute path with so specs
Args:
path (str): path to file or directory
otherpath (None): (default = None)
Returns:
str: path_
CommandLine:
python -m utool.util_path --exec-truepath_relative --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> path = 'C:/foobar/foobiz'
>>> otherpath = 'C:/foobar'
>>> path_ = truepath_relative(path, otherpath)
>>> result = ('path_ = %s' % (ut.repr2(path_),))
>>> print(result)
path_ = 'foobiz'
"""
if otherpath is None:
otherpath = os.getcwd()
otherpath = truepath(otherpath)
path_ = normpath(relpath(path, otherpath))
return path_ | python | def truepath_relative(path, otherpath=None):
""" Normalizes and returns absolute path with so specs
Args:
path (str): path to file or directory
otherpath (None): (default = None)
Returns:
str: path_
CommandLine:
python -m utool.util_path --exec-truepath_relative --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> path = 'C:/foobar/foobiz'
>>> otherpath = 'C:/foobar'
>>> path_ = truepath_relative(path, otherpath)
>>> result = ('path_ = %s' % (ut.repr2(path_),))
>>> print(result)
path_ = 'foobiz'
"""
if otherpath is None:
otherpath = os.getcwd()
otherpath = truepath(otherpath)
path_ = normpath(relpath(path, otherpath))
return path_ | [
"def",
"truepath_relative",
"(",
"path",
",",
"otherpath",
"=",
"None",
")",
":",
"if",
"otherpath",
"is",
"None",
":",
"otherpath",
"=",
"os",
".",
"getcwd",
"(",
")",
"otherpath",
"=",
"truepath",
"(",
"otherpath",
")",
"path_",
"=",
"normpath",
"(",
"relpath",
"(",
"path",
",",
"otherpath",
")",
")",
"return",
"path_"
] | Normalizes and returns absolute path with so specs
Args:
path (str): path to file or directory
otherpath (None): (default = None)
Returns:
str: path_
CommandLine:
python -m utool.util_path --exec-truepath_relative --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> path = 'C:/foobar/foobiz'
>>> otherpath = 'C:/foobar'
>>> path_ = truepath_relative(path, otherpath)
>>> result = ('path_ = %s' % (ut.repr2(path_),))
>>> print(result)
path_ = 'foobiz' | [
"Normalizes",
"and",
"returns",
"absolute",
"path",
"with",
"so",
"specs"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L109-L137 | train |
Erotemic/utool | utool/util_path.py | tail | def tail(fpath, n=2, trailing=True):
""" Alias for path_ndir_split """
return path_ndir_split(fpath, n=n, trailing=trailing) | python | def tail(fpath, n=2, trailing=True):
""" Alias for path_ndir_split """
return path_ndir_split(fpath, n=n, trailing=trailing) | [
"def",
"tail",
"(",
"fpath",
",",
"n",
"=",
"2",
",",
"trailing",
"=",
"True",
")",
":",
"return",
"path_ndir_split",
"(",
"fpath",
",",
"n",
"=",
"n",
",",
"trailing",
"=",
"trailing",
")"
] | Alias for path_ndir_split | [
"Alias",
"for",
"path_ndir_split"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L140-L142 | train |
Erotemic/utool | utool/util_path.py | unexpanduser | def unexpanduser(path):
r"""
Replaces home directory with '~'
"""
homedir = expanduser('~')
if path.startswith(homedir):
path = '~' + path[len(homedir):]
return path | python | def unexpanduser(path):
r"""
Replaces home directory with '~'
"""
homedir = expanduser('~')
if path.startswith(homedir):
path = '~' + path[len(homedir):]
return path | [
"def",
"unexpanduser",
"(",
"path",
")",
":",
"homedir",
"=",
"expanduser",
"(",
"'~'",
")",
"if",
"path",
".",
"startswith",
"(",
"homedir",
")",
":",
"path",
"=",
"'~'",
"+",
"path",
"[",
"len",
"(",
"homedir",
")",
":",
"]",
"return",
"path"
] | r"""
Replaces home directory with '~' | [
"r",
"Replaces",
"home",
"directory",
"with",
"~"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L145-L152 | train |
Erotemic/utool | utool/util_path.py | path_ndir_split | def path_ndir_split(path_, n, force_unix=True, winroot='C:', trailing=True):
r"""
Shows only a little bit of the path. Up to the n bottom-level directories
TODO: rename to path_tail? ndir_split?
Returns:
(str) the trailing n paths of path.
CommandLine:
python3 -m utool.util_path --test-path_ndir_split
python3 -m utool --tf path_ndir_split
python -m utool --tf path_ndir_split
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> paths = [r'/usr/bin/local/foo/bar',
... r'C:/',
... #r'lonerel',
... #r'reldir/other',
... r'/ham',
... r'./eggs',
... r'/spam/eggs',
... r'C:\Program Files (x86)/foobar/bin']
>>> N = 2
>>> iter_ = ut.iprod(paths, range(1, N + 1))
>>> force_unix = True
>>> tuplist = [(n, path_ndir_split(path_, n)) for path_, n in iter_]
>>> chunklist = list(ut.ichunks(tuplist, N))
>>> list_ = [['n=%r: %s' % (x, ut.reprfunc(y)) for x, y in chunk]
>>> for chunk in chunklist]
>>> line_list = [', '.join(strs) for strs in list_]
>>> result = '\n'.join(line_list)
>>> print(result)
n=1: '.../bar', n=2: '.../foo/bar'
n=1: 'C:/', n=2: 'C:/'
n=1: '.../ham', n=2: '/ham'
n=1: '.../eggs', n=2: './eggs'
n=1: '.../eggs', n=2: '.../spam/eggs'
n=1: '.../bin', n=2: '.../foobar/bin'
"""
if not isinstance(path_, six.string_types):
# Probably given a file pointer
return path_
if n is None:
cplat_path = ensure_crossplat_path(path_)
elif n == 0:
cplat_path = ''
else:
sep = '/' if force_unix else os.sep
ndirs_list = []
head = path_
reached_end = False
for nx in range(n):
head, tail = split(head)
if tail == '':
if head == '':
reached_end = True
else:
root = head if len(ndirs_list) == 0 else head.strip('\\/')
ndirs_list.append(root)
reached_end = True
break
else:
ndirs_list.append(tail)
if trailing and not reached_end:
head, tail = split(head)
if len(tail) == 0:
if len(head) == 0: # or head == '/':
reached_end = True
ndirs = sep.join(ndirs_list[::-1])
cplat_path = ensure_crossplat_path(ndirs)
#if trailing and not reached_end:
if trailing and not reached_end:
cplat_path = '.../' + cplat_path
return cplat_path | python | def path_ndir_split(path_, n, force_unix=True, winroot='C:', trailing=True):
r"""
Shows only a little bit of the path. Up to the n bottom-level directories
TODO: rename to path_tail? ndir_split?
Returns:
(str) the trailing n paths of path.
CommandLine:
python3 -m utool.util_path --test-path_ndir_split
python3 -m utool --tf path_ndir_split
python -m utool --tf path_ndir_split
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> paths = [r'/usr/bin/local/foo/bar',
... r'C:/',
... #r'lonerel',
... #r'reldir/other',
... r'/ham',
... r'./eggs',
... r'/spam/eggs',
... r'C:\Program Files (x86)/foobar/bin']
>>> N = 2
>>> iter_ = ut.iprod(paths, range(1, N + 1))
>>> force_unix = True
>>> tuplist = [(n, path_ndir_split(path_, n)) for path_, n in iter_]
>>> chunklist = list(ut.ichunks(tuplist, N))
>>> list_ = [['n=%r: %s' % (x, ut.reprfunc(y)) for x, y in chunk]
>>> for chunk in chunklist]
>>> line_list = [', '.join(strs) for strs in list_]
>>> result = '\n'.join(line_list)
>>> print(result)
n=1: '.../bar', n=2: '.../foo/bar'
n=1: 'C:/', n=2: 'C:/'
n=1: '.../ham', n=2: '/ham'
n=1: '.../eggs', n=2: './eggs'
n=1: '.../eggs', n=2: '.../spam/eggs'
n=1: '.../bin', n=2: '.../foobar/bin'
"""
if not isinstance(path_, six.string_types):
# Probably given a file pointer
return path_
if n is None:
cplat_path = ensure_crossplat_path(path_)
elif n == 0:
cplat_path = ''
else:
sep = '/' if force_unix else os.sep
ndirs_list = []
head = path_
reached_end = False
for nx in range(n):
head, tail = split(head)
if tail == '':
if head == '':
reached_end = True
else:
root = head if len(ndirs_list) == 0 else head.strip('\\/')
ndirs_list.append(root)
reached_end = True
break
else:
ndirs_list.append(tail)
if trailing and not reached_end:
head, tail = split(head)
if len(tail) == 0:
if len(head) == 0: # or head == '/':
reached_end = True
ndirs = sep.join(ndirs_list[::-1])
cplat_path = ensure_crossplat_path(ndirs)
#if trailing and not reached_end:
if trailing and not reached_end:
cplat_path = '.../' + cplat_path
return cplat_path | [
"def",
"path_ndir_split",
"(",
"path_",
",",
"n",
",",
"force_unix",
"=",
"True",
",",
"winroot",
"=",
"'C:'",
",",
"trailing",
"=",
"True",
")",
":",
"if",
"not",
"isinstance",
"(",
"path_",
",",
"six",
".",
"string_types",
")",
":",
"# Probably given a file pointer",
"return",
"path_",
"if",
"n",
"is",
"None",
":",
"cplat_path",
"=",
"ensure_crossplat_path",
"(",
"path_",
")",
"elif",
"n",
"==",
"0",
":",
"cplat_path",
"=",
"''",
"else",
":",
"sep",
"=",
"'/'",
"if",
"force_unix",
"else",
"os",
".",
"sep",
"ndirs_list",
"=",
"[",
"]",
"head",
"=",
"path_",
"reached_end",
"=",
"False",
"for",
"nx",
"in",
"range",
"(",
"n",
")",
":",
"head",
",",
"tail",
"=",
"split",
"(",
"head",
")",
"if",
"tail",
"==",
"''",
":",
"if",
"head",
"==",
"''",
":",
"reached_end",
"=",
"True",
"else",
":",
"root",
"=",
"head",
"if",
"len",
"(",
"ndirs_list",
")",
"==",
"0",
"else",
"head",
".",
"strip",
"(",
"'\\\\/'",
")",
"ndirs_list",
".",
"append",
"(",
"root",
")",
"reached_end",
"=",
"True",
"break",
"else",
":",
"ndirs_list",
".",
"append",
"(",
"tail",
")",
"if",
"trailing",
"and",
"not",
"reached_end",
":",
"head",
",",
"tail",
"=",
"split",
"(",
"head",
")",
"if",
"len",
"(",
"tail",
")",
"==",
"0",
":",
"if",
"len",
"(",
"head",
")",
"==",
"0",
":",
"# or head == '/':",
"reached_end",
"=",
"True",
"ndirs",
"=",
"sep",
".",
"join",
"(",
"ndirs_list",
"[",
":",
":",
"-",
"1",
"]",
")",
"cplat_path",
"=",
"ensure_crossplat_path",
"(",
"ndirs",
")",
"#if trailing and not reached_end:",
"if",
"trailing",
"and",
"not",
"reached_end",
":",
"cplat_path",
"=",
"'.../'",
"+",
"cplat_path",
"return",
"cplat_path"
] | r"""
Shows only a little bit of the path. Up to the n bottom-level directories
TODO: rename to path_tail? ndir_split?
Returns:
(str) the trailing n paths of path.
CommandLine:
python3 -m utool.util_path --test-path_ndir_split
python3 -m utool --tf path_ndir_split
python -m utool --tf path_ndir_split
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> paths = [r'/usr/bin/local/foo/bar',
... r'C:/',
... #r'lonerel',
... #r'reldir/other',
... r'/ham',
... r'./eggs',
... r'/spam/eggs',
... r'C:\Program Files (x86)/foobar/bin']
>>> N = 2
>>> iter_ = ut.iprod(paths, range(1, N + 1))
>>> force_unix = True
>>> tuplist = [(n, path_ndir_split(path_, n)) for path_, n in iter_]
>>> chunklist = list(ut.ichunks(tuplist, N))
>>> list_ = [['n=%r: %s' % (x, ut.reprfunc(y)) for x, y in chunk]
>>> for chunk in chunklist]
>>> line_list = [', '.join(strs) for strs in list_]
>>> result = '\n'.join(line_list)
>>> print(result)
n=1: '.../bar', n=2: '.../foo/bar'
n=1: 'C:/', n=2: 'C:/'
n=1: '.../ham', n=2: '/ham'
n=1: '.../eggs', n=2: './eggs'
n=1: '.../eggs', n=2: '.../spam/eggs'
n=1: '.../bin', n=2: '.../foobar/bin' | [
"r",
"Shows",
"only",
"a",
"little",
"bit",
"of",
"the",
"path",
".",
"Up",
"to",
"the",
"n",
"bottom",
"-",
"level",
"directories"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L155-L234 | train |
Erotemic/utool | utool/util_path.py | augpath | def augpath(path, augsuf='', augext='', augpref='', augdir=None, newext=None,
newfname=None, ensure=False, prefix=None, suffix=None):
"""
augments end of path before the extension.
augpath
Args:
path (str):
augsuf (str): augment filename before extension
Returns:
str: newpath
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> path = 'somefile.txt'
>>> augsuf = '_aug'
>>> newpath = augpath(path, augsuf)
>>> result = str(newpath)
>>> print(result)
somefile_aug.txt
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> path = 'somefile.txt'
>>> augsuf = '_aug2'
>>> newext = '.bak'
>>> augdir = 'backup'
>>> newpath = augpath(path, augsuf, newext=newext, augdir=augdir)
>>> result = str(newpath)
>>> print(result)
backup/somefile_aug2.bak
"""
if prefix is not None:
augpref = prefix
if suffix is not None:
augsuf = suffix
# Breakup path
dpath, fname = split(path)
fname_noext, ext = splitext(fname)
if newfname is not None:
fname_noext = newfname
# Augment ext
if newext is None:
newext = ext
# Augment fname
new_fname = ''.join((augpref, fname_noext, augsuf, newext, augext))
# Augment dpath
if augdir is not None:
new_dpath = join(dpath, augdir)
if ensure:
# create new dir if needebe
ensuredir(new_dpath)
else:
new_dpath = dpath
# Recombine into new path
newpath = join(new_dpath, new_fname)
return newpath | python | def augpath(path, augsuf='', augext='', augpref='', augdir=None, newext=None,
newfname=None, ensure=False, prefix=None, suffix=None):
"""
augments end of path before the extension.
augpath
Args:
path (str):
augsuf (str): augment filename before extension
Returns:
str: newpath
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> path = 'somefile.txt'
>>> augsuf = '_aug'
>>> newpath = augpath(path, augsuf)
>>> result = str(newpath)
>>> print(result)
somefile_aug.txt
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> path = 'somefile.txt'
>>> augsuf = '_aug2'
>>> newext = '.bak'
>>> augdir = 'backup'
>>> newpath = augpath(path, augsuf, newext=newext, augdir=augdir)
>>> result = str(newpath)
>>> print(result)
backup/somefile_aug2.bak
"""
if prefix is not None:
augpref = prefix
if suffix is not None:
augsuf = suffix
# Breakup path
dpath, fname = split(path)
fname_noext, ext = splitext(fname)
if newfname is not None:
fname_noext = newfname
# Augment ext
if newext is None:
newext = ext
# Augment fname
new_fname = ''.join((augpref, fname_noext, augsuf, newext, augext))
# Augment dpath
if augdir is not None:
new_dpath = join(dpath, augdir)
if ensure:
# create new dir if needebe
ensuredir(new_dpath)
else:
new_dpath = dpath
# Recombine into new path
newpath = join(new_dpath, new_fname)
return newpath | [
"def",
"augpath",
"(",
"path",
",",
"augsuf",
"=",
"''",
",",
"augext",
"=",
"''",
",",
"augpref",
"=",
"''",
",",
"augdir",
"=",
"None",
",",
"newext",
"=",
"None",
",",
"newfname",
"=",
"None",
",",
"ensure",
"=",
"False",
",",
"prefix",
"=",
"None",
",",
"suffix",
"=",
"None",
")",
":",
"if",
"prefix",
"is",
"not",
"None",
":",
"augpref",
"=",
"prefix",
"if",
"suffix",
"is",
"not",
"None",
":",
"augsuf",
"=",
"suffix",
"# Breakup path",
"dpath",
",",
"fname",
"=",
"split",
"(",
"path",
")",
"fname_noext",
",",
"ext",
"=",
"splitext",
"(",
"fname",
")",
"if",
"newfname",
"is",
"not",
"None",
":",
"fname_noext",
"=",
"newfname",
"# Augment ext",
"if",
"newext",
"is",
"None",
":",
"newext",
"=",
"ext",
"# Augment fname",
"new_fname",
"=",
"''",
".",
"join",
"(",
"(",
"augpref",
",",
"fname_noext",
",",
"augsuf",
",",
"newext",
",",
"augext",
")",
")",
"# Augment dpath",
"if",
"augdir",
"is",
"not",
"None",
":",
"new_dpath",
"=",
"join",
"(",
"dpath",
",",
"augdir",
")",
"if",
"ensure",
":",
"# create new dir if needebe",
"ensuredir",
"(",
"new_dpath",
")",
"else",
":",
"new_dpath",
"=",
"dpath",
"# Recombine into new path",
"newpath",
"=",
"join",
"(",
"new_dpath",
",",
"new_fname",
")",
"return",
"newpath"
] | augments end of path before the extension.
augpath
Args:
path (str):
augsuf (str): augment filename before extension
Returns:
str: newpath
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> path = 'somefile.txt'
>>> augsuf = '_aug'
>>> newpath = augpath(path, augsuf)
>>> result = str(newpath)
>>> print(result)
somefile_aug.txt
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> path = 'somefile.txt'
>>> augsuf = '_aug2'
>>> newext = '.bak'
>>> augdir = 'backup'
>>> newpath = augpath(path, augsuf, newext=newext, augdir=augdir)
>>> result = str(newpath)
>>> print(result)
backup/somefile_aug2.bak | [
"augments",
"end",
"of",
"path",
"before",
"the",
"extension",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L308-L368 | train |
Erotemic/utool | utool/util_path.py | remove_files_in_dir | def remove_files_in_dir(dpath, fname_pattern_list='*', recursive=False,
verbose=VERBOSE, dryrun=False, ignore_errors=False):
""" Removes files matching a pattern from a directory """
if isinstance(fname_pattern_list, six.string_types):
fname_pattern_list = [fname_pattern_list]
if verbose > 2:
print('[util_path] Removing files:')
print(' * from dpath = %r ' % dpath)
print(' * with patterns = %r' % fname_pattern_list)
print(' * recursive = %r' % recursive)
num_removed, num_matched = (0, 0)
if not exists(dpath):
msg = ('!!! dir = %r does not exist!' % dpath)
if verbose:
print(msg)
warnings.warn(msg, category=UserWarning)
for root, dname_list, fname_list in os.walk(dpath):
for fname_pattern in fname_pattern_list:
for fname in fnmatch.filter(fname_list, fname_pattern):
num_matched += 1
num_removed += remove_file(join(root, fname),
ignore_errors=ignore_errors,
dryrun=dryrun,
verbose=verbose > 5)
if not recursive:
break
if verbose > 0:
print('[util_path] ... Removed %d/%d files' % (num_removed, num_matched))
return True | python | def remove_files_in_dir(dpath, fname_pattern_list='*', recursive=False,
verbose=VERBOSE, dryrun=False, ignore_errors=False):
""" Removes files matching a pattern from a directory """
if isinstance(fname_pattern_list, six.string_types):
fname_pattern_list = [fname_pattern_list]
if verbose > 2:
print('[util_path] Removing files:')
print(' * from dpath = %r ' % dpath)
print(' * with patterns = %r' % fname_pattern_list)
print(' * recursive = %r' % recursive)
num_removed, num_matched = (0, 0)
if not exists(dpath):
msg = ('!!! dir = %r does not exist!' % dpath)
if verbose:
print(msg)
warnings.warn(msg, category=UserWarning)
for root, dname_list, fname_list in os.walk(dpath):
for fname_pattern in fname_pattern_list:
for fname in fnmatch.filter(fname_list, fname_pattern):
num_matched += 1
num_removed += remove_file(join(root, fname),
ignore_errors=ignore_errors,
dryrun=dryrun,
verbose=verbose > 5)
if not recursive:
break
if verbose > 0:
print('[util_path] ... Removed %d/%d files' % (num_removed, num_matched))
return True | [
"def",
"remove_files_in_dir",
"(",
"dpath",
",",
"fname_pattern_list",
"=",
"'*'",
",",
"recursive",
"=",
"False",
",",
"verbose",
"=",
"VERBOSE",
",",
"dryrun",
"=",
"False",
",",
"ignore_errors",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"fname_pattern_list",
",",
"six",
".",
"string_types",
")",
":",
"fname_pattern_list",
"=",
"[",
"fname_pattern_list",
"]",
"if",
"verbose",
">",
"2",
":",
"print",
"(",
"'[util_path] Removing files:'",
")",
"print",
"(",
"' * from dpath = %r '",
"%",
"dpath",
")",
"print",
"(",
"' * with patterns = %r'",
"%",
"fname_pattern_list",
")",
"print",
"(",
"' * recursive = %r'",
"%",
"recursive",
")",
"num_removed",
",",
"num_matched",
"=",
"(",
"0",
",",
"0",
")",
"if",
"not",
"exists",
"(",
"dpath",
")",
":",
"msg",
"=",
"(",
"'!!! dir = %r does not exist!'",
"%",
"dpath",
")",
"if",
"verbose",
":",
"print",
"(",
"msg",
")",
"warnings",
".",
"warn",
"(",
"msg",
",",
"category",
"=",
"UserWarning",
")",
"for",
"root",
",",
"dname_list",
",",
"fname_list",
"in",
"os",
".",
"walk",
"(",
"dpath",
")",
":",
"for",
"fname_pattern",
"in",
"fname_pattern_list",
":",
"for",
"fname",
"in",
"fnmatch",
".",
"filter",
"(",
"fname_list",
",",
"fname_pattern",
")",
":",
"num_matched",
"+=",
"1",
"num_removed",
"+=",
"remove_file",
"(",
"join",
"(",
"root",
",",
"fname",
")",
",",
"ignore_errors",
"=",
"ignore_errors",
",",
"dryrun",
"=",
"dryrun",
",",
"verbose",
"=",
"verbose",
">",
"5",
")",
"if",
"not",
"recursive",
":",
"break",
"if",
"verbose",
">",
"0",
":",
"print",
"(",
"'[util_path] ... Removed %d/%d files'",
"%",
"(",
"num_removed",
",",
"num_matched",
")",
")",
"return",
"True"
] | Removes files matching a pattern from a directory | [
"Removes",
"files",
"matching",
"a",
"pattern",
"from",
"a",
"directory"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L371-L399 | train |
Erotemic/utool | utool/util_path.py | delete | def delete(path, dryrun=False, recursive=True, verbose=None, print_exists=True,
ignore_errors=True):
""" Removes a file, directory, or symlink """
if verbose is None:
verbose = VERBOSE
if not QUIET:
verbose = 1
if verbose > 0:
print('[util_path] Deleting path=%r' % path)
exists_flag = exists(path)
link_flag = islink(path)
if not exists_flag and not link_flag:
if print_exists and verbose:
print('..does not exist!')
flag = False
else:
rmargs = dict(verbose=verbose > 1, ignore_errors=ignore_errors,
dryrun=dryrun)
if islink(path):
os.unlink(path)
flag = True
elif isdir(path):
# First remove everything in the directory
flag = remove_files_in_dir(path, recursive=recursive, **rmargs)
# Then remove the directory itself
flag = flag and remove_dirs(path, **rmargs)
elif isfile(path):
flag = remove_file(path, **rmargs)
else:
raise ValueError('Unknown type of path=%r' % (path,))
if verbose > 0:
print('[util_path] Finished deleting path=%r' % path)
return flag | python | def delete(path, dryrun=False, recursive=True, verbose=None, print_exists=True,
ignore_errors=True):
""" Removes a file, directory, or symlink """
if verbose is None:
verbose = VERBOSE
if not QUIET:
verbose = 1
if verbose > 0:
print('[util_path] Deleting path=%r' % path)
exists_flag = exists(path)
link_flag = islink(path)
if not exists_flag and not link_flag:
if print_exists and verbose:
print('..does not exist!')
flag = False
else:
rmargs = dict(verbose=verbose > 1, ignore_errors=ignore_errors,
dryrun=dryrun)
if islink(path):
os.unlink(path)
flag = True
elif isdir(path):
# First remove everything in the directory
flag = remove_files_in_dir(path, recursive=recursive, **rmargs)
# Then remove the directory itself
flag = flag and remove_dirs(path, **rmargs)
elif isfile(path):
flag = remove_file(path, **rmargs)
else:
raise ValueError('Unknown type of path=%r' % (path,))
if verbose > 0:
print('[util_path] Finished deleting path=%r' % path)
return flag | [
"def",
"delete",
"(",
"path",
",",
"dryrun",
"=",
"False",
",",
"recursive",
"=",
"True",
",",
"verbose",
"=",
"None",
",",
"print_exists",
"=",
"True",
",",
"ignore_errors",
"=",
"True",
")",
":",
"if",
"verbose",
"is",
"None",
":",
"verbose",
"=",
"VERBOSE",
"if",
"not",
"QUIET",
":",
"verbose",
"=",
"1",
"if",
"verbose",
">",
"0",
":",
"print",
"(",
"'[util_path] Deleting path=%r'",
"%",
"path",
")",
"exists_flag",
"=",
"exists",
"(",
"path",
")",
"link_flag",
"=",
"islink",
"(",
"path",
")",
"if",
"not",
"exists_flag",
"and",
"not",
"link_flag",
":",
"if",
"print_exists",
"and",
"verbose",
":",
"print",
"(",
"'..does not exist!'",
")",
"flag",
"=",
"False",
"else",
":",
"rmargs",
"=",
"dict",
"(",
"verbose",
"=",
"verbose",
">",
"1",
",",
"ignore_errors",
"=",
"ignore_errors",
",",
"dryrun",
"=",
"dryrun",
")",
"if",
"islink",
"(",
"path",
")",
":",
"os",
".",
"unlink",
"(",
"path",
")",
"flag",
"=",
"True",
"elif",
"isdir",
"(",
"path",
")",
":",
"# First remove everything in the directory",
"flag",
"=",
"remove_files_in_dir",
"(",
"path",
",",
"recursive",
"=",
"recursive",
",",
"*",
"*",
"rmargs",
")",
"# Then remove the directory itself",
"flag",
"=",
"flag",
"and",
"remove_dirs",
"(",
"path",
",",
"*",
"*",
"rmargs",
")",
"elif",
"isfile",
"(",
"path",
")",
":",
"flag",
"=",
"remove_file",
"(",
"path",
",",
"*",
"*",
"rmargs",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown type of path=%r'",
"%",
"(",
"path",
",",
")",
")",
"if",
"verbose",
">",
"0",
":",
"print",
"(",
"'[util_path] Finished deleting path=%r'",
"%",
"path",
")",
"return",
"flag"
] | Removes a file, directory, or symlink | [
"Removes",
"a",
"file",
"directory",
"or",
"symlink"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L402-L434 | train |
Erotemic/utool | utool/util_path.py | remove_existing_fpaths | def remove_existing_fpaths(fpath_list, verbose=VERBOSE, quiet=QUIET,
strict=False, print_caller=PRINT_CALLER,
lbl='files'):
""" checks existance before removing. then tries to remove exisint paths """
import utool as ut
if print_caller:
print(util_dbg.get_caller_name(range(1, 4)) + ' called remove_existing_fpaths')
fpath_list_ = ut.filter_Nones(fpath_list)
exists_list = list(map(exists, fpath_list_))
if verbose:
n_total = len(fpath_list)
n_valid = len(fpath_list_)
n_exist = sum(exists_list)
print('[util_path.remove_existing_fpaths] request delete of %d %s' % (
n_total, lbl))
if n_valid != n_total:
print(('[util_path.remove_existing_fpaths] '
'trying to delete %d/%d non None %s ') %
(n_valid, n_total, lbl))
print(('[util_path.remove_existing_fpaths] '
' %d/%d exist and need to be deleted')
% (n_exist, n_valid))
existing_fpath_list = ut.compress(fpath_list_, exists_list)
return remove_fpaths(existing_fpath_list, verbose=verbose, quiet=quiet,
strict=strict, print_caller=False, lbl=lbl) | python | def remove_existing_fpaths(fpath_list, verbose=VERBOSE, quiet=QUIET,
strict=False, print_caller=PRINT_CALLER,
lbl='files'):
""" checks existance before removing. then tries to remove exisint paths """
import utool as ut
if print_caller:
print(util_dbg.get_caller_name(range(1, 4)) + ' called remove_existing_fpaths')
fpath_list_ = ut.filter_Nones(fpath_list)
exists_list = list(map(exists, fpath_list_))
if verbose:
n_total = len(fpath_list)
n_valid = len(fpath_list_)
n_exist = sum(exists_list)
print('[util_path.remove_existing_fpaths] request delete of %d %s' % (
n_total, lbl))
if n_valid != n_total:
print(('[util_path.remove_existing_fpaths] '
'trying to delete %d/%d non None %s ') %
(n_valid, n_total, lbl))
print(('[util_path.remove_existing_fpaths] '
' %d/%d exist and need to be deleted')
% (n_exist, n_valid))
existing_fpath_list = ut.compress(fpath_list_, exists_list)
return remove_fpaths(existing_fpath_list, verbose=verbose, quiet=quiet,
strict=strict, print_caller=False, lbl=lbl) | [
"def",
"remove_existing_fpaths",
"(",
"fpath_list",
",",
"verbose",
"=",
"VERBOSE",
",",
"quiet",
"=",
"QUIET",
",",
"strict",
"=",
"False",
",",
"print_caller",
"=",
"PRINT_CALLER",
",",
"lbl",
"=",
"'files'",
")",
":",
"import",
"utool",
"as",
"ut",
"if",
"print_caller",
":",
"print",
"(",
"util_dbg",
".",
"get_caller_name",
"(",
"range",
"(",
"1",
",",
"4",
")",
")",
"+",
"' called remove_existing_fpaths'",
")",
"fpath_list_",
"=",
"ut",
".",
"filter_Nones",
"(",
"fpath_list",
")",
"exists_list",
"=",
"list",
"(",
"map",
"(",
"exists",
",",
"fpath_list_",
")",
")",
"if",
"verbose",
":",
"n_total",
"=",
"len",
"(",
"fpath_list",
")",
"n_valid",
"=",
"len",
"(",
"fpath_list_",
")",
"n_exist",
"=",
"sum",
"(",
"exists_list",
")",
"print",
"(",
"'[util_path.remove_existing_fpaths] request delete of %d %s'",
"%",
"(",
"n_total",
",",
"lbl",
")",
")",
"if",
"n_valid",
"!=",
"n_total",
":",
"print",
"(",
"(",
"'[util_path.remove_existing_fpaths] '",
"'trying to delete %d/%d non None %s '",
")",
"%",
"(",
"n_valid",
",",
"n_total",
",",
"lbl",
")",
")",
"print",
"(",
"(",
"'[util_path.remove_existing_fpaths] '",
"' %d/%d exist and need to be deleted'",
")",
"%",
"(",
"n_exist",
",",
"n_valid",
")",
")",
"existing_fpath_list",
"=",
"ut",
".",
"compress",
"(",
"fpath_list_",
",",
"exists_list",
")",
"return",
"remove_fpaths",
"(",
"existing_fpath_list",
",",
"verbose",
"=",
"verbose",
",",
"quiet",
"=",
"quiet",
",",
"strict",
"=",
"strict",
",",
"print_caller",
"=",
"False",
",",
"lbl",
"=",
"lbl",
")"
] | checks existance before removing. then tries to remove exisint paths | [
"checks",
"existance",
"before",
"removing",
".",
"then",
"tries",
"to",
"remove",
"exisint",
"paths"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L437-L461 | train |
Erotemic/utool | utool/util_path.py | remove_fpaths | def remove_fpaths(fpaths, verbose=VERBOSE, quiet=QUIET, strict=False,
print_caller=PRINT_CALLER, lbl='files'):
"""
Removes multiple file paths
"""
import utool as ut
if print_caller:
print(util_dbg.get_caller_name(range(1, 4)) + ' called remove_fpaths')
n_total = len(fpaths)
_verbose = (not quiet and n_total > 0) or VERYVERBOSE
if _verbose:
print('[util_path.remove_fpaths] try removing %d %s' % (n_total, lbl))
n_removed = 0
prog = ut.ProgIter(fpaths, label='removing files', enabled=verbose)
_iter = iter(prog)
# Try to be fast at first
try:
for fpath in _iter:
os.remove(fpath)
n_removed += 1
except OSError as ex:
# Buf if we fail put a try in the inner loop
if VERYVERBOSE:
print('WARNING: Could not remove fpath = %r' % (fpath,))
if strict:
util_dbg.printex(ex, 'Could not remove fpath = %r' % (fpath,),
iswarning=False)
raise
for fpath in _iter:
try:
os.remove(fpath)
n_removed += 1
except OSError as ex:
if VERYVERBOSE:
print('WARNING: Could not remove fpath = %r' % (fpath,))
if _verbose:
print('[util_path.remove_fpaths] ... removed %d / %d %s' % (
n_removed, n_total, lbl))
return n_removed | python | def remove_fpaths(fpaths, verbose=VERBOSE, quiet=QUIET, strict=False,
print_caller=PRINT_CALLER, lbl='files'):
"""
Removes multiple file paths
"""
import utool as ut
if print_caller:
print(util_dbg.get_caller_name(range(1, 4)) + ' called remove_fpaths')
n_total = len(fpaths)
_verbose = (not quiet and n_total > 0) or VERYVERBOSE
if _verbose:
print('[util_path.remove_fpaths] try removing %d %s' % (n_total, lbl))
n_removed = 0
prog = ut.ProgIter(fpaths, label='removing files', enabled=verbose)
_iter = iter(prog)
# Try to be fast at first
try:
for fpath in _iter:
os.remove(fpath)
n_removed += 1
except OSError as ex:
# Buf if we fail put a try in the inner loop
if VERYVERBOSE:
print('WARNING: Could not remove fpath = %r' % (fpath,))
if strict:
util_dbg.printex(ex, 'Could not remove fpath = %r' % (fpath,),
iswarning=False)
raise
for fpath in _iter:
try:
os.remove(fpath)
n_removed += 1
except OSError as ex:
if VERYVERBOSE:
print('WARNING: Could not remove fpath = %r' % (fpath,))
if _verbose:
print('[util_path.remove_fpaths] ... removed %d / %d %s' % (
n_removed, n_total, lbl))
return n_removed | [
"def",
"remove_fpaths",
"(",
"fpaths",
",",
"verbose",
"=",
"VERBOSE",
",",
"quiet",
"=",
"QUIET",
",",
"strict",
"=",
"False",
",",
"print_caller",
"=",
"PRINT_CALLER",
",",
"lbl",
"=",
"'files'",
")",
":",
"import",
"utool",
"as",
"ut",
"if",
"print_caller",
":",
"print",
"(",
"util_dbg",
".",
"get_caller_name",
"(",
"range",
"(",
"1",
",",
"4",
")",
")",
"+",
"' called remove_fpaths'",
")",
"n_total",
"=",
"len",
"(",
"fpaths",
")",
"_verbose",
"=",
"(",
"not",
"quiet",
"and",
"n_total",
">",
"0",
")",
"or",
"VERYVERBOSE",
"if",
"_verbose",
":",
"print",
"(",
"'[util_path.remove_fpaths] try removing %d %s'",
"%",
"(",
"n_total",
",",
"lbl",
")",
")",
"n_removed",
"=",
"0",
"prog",
"=",
"ut",
".",
"ProgIter",
"(",
"fpaths",
",",
"label",
"=",
"'removing files'",
",",
"enabled",
"=",
"verbose",
")",
"_iter",
"=",
"iter",
"(",
"prog",
")",
"# Try to be fast at first",
"try",
":",
"for",
"fpath",
"in",
"_iter",
":",
"os",
".",
"remove",
"(",
"fpath",
")",
"n_removed",
"+=",
"1",
"except",
"OSError",
"as",
"ex",
":",
"# Buf if we fail put a try in the inner loop",
"if",
"VERYVERBOSE",
":",
"print",
"(",
"'WARNING: Could not remove fpath = %r'",
"%",
"(",
"fpath",
",",
")",
")",
"if",
"strict",
":",
"util_dbg",
".",
"printex",
"(",
"ex",
",",
"'Could not remove fpath = %r'",
"%",
"(",
"fpath",
",",
")",
",",
"iswarning",
"=",
"False",
")",
"raise",
"for",
"fpath",
"in",
"_iter",
":",
"try",
":",
"os",
".",
"remove",
"(",
"fpath",
")",
"n_removed",
"+=",
"1",
"except",
"OSError",
"as",
"ex",
":",
"if",
"VERYVERBOSE",
":",
"print",
"(",
"'WARNING: Could not remove fpath = %r'",
"%",
"(",
"fpath",
",",
")",
")",
"if",
"_verbose",
":",
"print",
"(",
"'[util_path.remove_fpaths] ... removed %d / %d %s'",
"%",
"(",
"n_removed",
",",
"n_total",
",",
"lbl",
")",
")",
"return",
"n_removed"
] | Removes multiple file paths | [
"Removes",
"multiple",
"file",
"paths"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L464-L502 | train |
Erotemic/utool | utool/util_path.py | longest_existing_path | def longest_existing_path(_path):
r"""
Returns the longest root of _path that exists
Args:
_path (str): path string
Returns:
str: _path - path string
CommandLine:
python -m utool.util_path --exec-longest_existing_path
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> target = dirname(ut.__file__)
>>> _path = join(target, 'nonexist/foobar')
>>> existing_path = longest_existing_path(_path)
>>> result = ('existing_path = %s' % (str(existing_path),))
>>> print(result)
>>> assert existing_path == target
"""
existing_path = _path
while True:
_path_new = os.path.dirname(existing_path)
if exists(_path_new):
existing_path = _path_new
break
if _path_new == existing_path:
print('!!! [utool] This is a very illformated path indeed.')
existing_path = ''
break
existing_path = _path_new
return existing_path | python | def longest_existing_path(_path):
r"""
Returns the longest root of _path that exists
Args:
_path (str): path string
Returns:
str: _path - path string
CommandLine:
python -m utool.util_path --exec-longest_existing_path
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> target = dirname(ut.__file__)
>>> _path = join(target, 'nonexist/foobar')
>>> existing_path = longest_existing_path(_path)
>>> result = ('existing_path = %s' % (str(existing_path),))
>>> print(result)
>>> assert existing_path == target
"""
existing_path = _path
while True:
_path_new = os.path.dirname(existing_path)
if exists(_path_new):
existing_path = _path_new
break
if _path_new == existing_path:
print('!!! [utool] This is a very illformated path indeed.')
existing_path = ''
break
existing_path = _path_new
return existing_path | [
"def",
"longest_existing_path",
"(",
"_path",
")",
":",
"existing_path",
"=",
"_path",
"while",
"True",
":",
"_path_new",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"existing_path",
")",
"if",
"exists",
"(",
"_path_new",
")",
":",
"existing_path",
"=",
"_path_new",
"break",
"if",
"_path_new",
"==",
"existing_path",
":",
"print",
"(",
"'!!! [utool] This is a very illformated path indeed.'",
")",
"existing_path",
"=",
"''",
"break",
"existing_path",
"=",
"_path_new",
"return",
"existing_path"
] | r"""
Returns the longest root of _path that exists
Args:
_path (str): path string
Returns:
str: _path - path string
CommandLine:
python -m utool.util_path --exec-longest_existing_path
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> target = dirname(ut.__file__)
>>> _path = join(target, 'nonexist/foobar')
>>> existing_path = longest_existing_path(_path)
>>> result = ('existing_path = %s' % (str(existing_path),))
>>> print(result)
>>> assert existing_path == target | [
"r",
"Returns",
"the",
"longest",
"root",
"of",
"_path",
"that",
"exists"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L508-L543 | train |
Erotemic/utool | utool/util_path.py | get_path_type | def get_path_type(path_):
r"""
returns if a path is a file, directory, link, or mount
"""
path_type = ''
if isfile(path_):
path_type += 'file'
if isdir(path_):
path_type += 'directory'
if islink(path_):
path_type += 'link'
if ismount(path_):
path_type += 'mount'
return path_type | python | def get_path_type(path_):
r"""
returns if a path is a file, directory, link, or mount
"""
path_type = ''
if isfile(path_):
path_type += 'file'
if isdir(path_):
path_type += 'directory'
if islink(path_):
path_type += 'link'
if ismount(path_):
path_type += 'mount'
return path_type | [
"def",
"get_path_type",
"(",
"path_",
")",
":",
"path_type",
"=",
"''",
"if",
"isfile",
"(",
"path_",
")",
":",
"path_type",
"+=",
"'file'",
"if",
"isdir",
"(",
"path_",
")",
":",
"path_type",
"+=",
"'directory'",
"if",
"islink",
"(",
"path_",
")",
":",
"path_type",
"+=",
"'link'",
"if",
"ismount",
"(",
"path_",
")",
":",
"path_type",
"+=",
"'mount'",
"return",
"path_type"
] | r"""
returns if a path is a file, directory, link, or mount | [
"r",
"returns",
"if",
"a",
"path",
"is",
"a",
"file",
"directory",
"link",
"or",
"mount"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L546-L559 | train |
Erotemic/utool | utool/util_path.py | checkpath | def checkpath(path_, verbose=VERYVERBOSE, n=None, info=VERYVERBOSE):
r""" verbose wrapper around ``os.path.exists``
Returns:
true if ``path_`` exists on the filesystem show only the
top `n` directories
Args:
path_ (str): path string
verbose (bool): verbosity flag(default = False)
n (int): (default = None)
info (bool): (default = False)
CommandLine:
python -m utool.util_path --test-checkpath
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> path_ = ut.__file__
>>> verbose = True
>>> n = None
>>> info = False
>>> result = checkpath(path_, verbose, n, info)
>>> print(result)
True
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> path_ = ut.__file__ + 'foobar'
>>> verbose = True
>>> result = checkpath(path_, verbose, n=None, info=True)
>>> print(result)
False
"""
assert isinstance(path_, six.string_types), (
'path_=%r is not a string. type(path_) = %r' % (path_, type(path_)))
path_ = normpath(path_)
if sys.platform.startswith('win32'):
# convert back to windows style path if using unix style
if path_.startswith('\\'):
dirs = path_.split('\\')
if len(dirs) > 1 and len(dirs[0]) == 0 and len(dirs[1]) == 1:
dirs[1] = dirs[1].upper() + ':'
path_ = '\\'.join(dirs[1:])
does_exist = exists(path_)
if verbose:
#print_('[utool] checkpath(%r)' % (path_))
pretty_path = path_ndir_split(path_, n)
caller_name = util_dbg.get_caller_name(allow_genexpr=False)
print('[%s] checkpath(%r)' % (caller_name, pretty_path))
if does_exist:
path_type = get_path_type(path_)
#path_type = 'file' if isfile(path_) else 'directory'
print('[%s] ...(%s) exists' % (caller_name, path_type,))
else:
print('[%s] ... does not exist' % (caller_name))
if not does_exist and info:
#print('[util_path] ! Does not exist')
_longest_path = longest_existing_path(path_)
_longest_path_type = get_path_type(_longest_path)
print('[util_path] ... The longest existing path is: %r' % _longest_path)
print('[util_path] ... and has type %r' % (_longest_path_type,))
return does_exist | python | def checkpath(path_, verbose=VERYVERBOSE, n=None, info=VERYVERBOSE):
r""" verbose wrapper around ``os.path.exists``
Returns:
true if ``path_`` exists on the filesystem show only the
top `n` directories
Args:
path_ (str): path string
verbose (bool): verbosity flag(default = False)
n (int): (default = None)
info (bool): (default = False)
CommandLine:
python -m utool.util_path --test-checkpath
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> path_ = ut.__file__
>>> verbose = True
>>> n = None
>>> info = False
>>> result = checkpath(path_, verbose, n, info)
>>> print(result)
True
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> path_ = ut.__file__ + 'foobar'
>>> verbose = True
>>> result = checkpath(path_, verbose, n=None, info=True)
>>> print(result)
False
"""
assert isinstance(path_, six.string_types), (
'path_=%r is not a string. type(path_) = %r' % (path_, type(path_)))
path_ = normpath(path_)
if sys.platform.startswith('win32'):
# convert back to windows style path if using unix style
if path_.startswith('\\'):
dirs = path_.split('\\')
if len(dirs) > 1 and len(dirs[0]) == 0 and len(dirs[1]) == 1:
dirs[1] = dirs[1].upper() + ':'
path_ = '\\'.join(dirs[1:])
does_exist = exists(path_)
if verbose:
#print_('[utool] checkpath(%r)' % (path_))
pretty_path = path_ndir_split(path_, n)
caller_name = util_dbg.get_caller_name(allow_genexpr=False)
print('[%s] checkpath(%r)' % (caller_name, pretty_path))
if does_exist:
path_type = get_path_type(path_)
#path_type = 'file' if isfile(path_) else 'directory'
print('[%s] ...(%s) exists' % (caller_name, path_type,))
else:
print('[%s] ... does not exist' % (caller_name))
if not does_exist and info:
#print('[util_path] ! Does not exist')
_longest_path = longest_existing_path(path_)
_longest_path_type = get_path_type(_longest_path)
print('[util_path] ... The longest existing path is: %r' % _longest_path)
print('[util_path] ... and has type %r' % (_longest_path_type,))
return does_exist | [
"def",
"checkpath",
"(",
"path_",
",",
"verbose",
"=",
"VERYVERBOSE",
",",
"n",
"=",
"None",
",",
"info",
"=",
"VERYVERBOSE",
")",
":",
"assert",
"isinstance",
"(",
"path_",
",",
"six",
".",
"string_types",
")",
",",
"(",
"'path_=%r is not a string. type(path_) = %r'",
"%",
"(",
"path_",
",",
"type",
"(",
"path_",
")",
")",
")",
"path_",
"=",
"normpath",
"(",
"path_",
")",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'win32'",
")",
":",
"# convert back to windows style path if using unix style",
"if",
"path_",
".",
"startswith",
"(",
"'\\\\'",
")",
":",
"dirs",
"=",
"path_",
".",
"split",
"(",
"'\\\\'",
")",
"if",
"len",
"(",
"dirs",
")",
">",
"1",
"and",
"len",
"(",
"dirs",
"[",
"0",
"]",
")",
"==",
"0",
"and",
"len",
"(",
"dirs",
"[",
"1",
"]",
")",
"==",
"1",
":",
"dirs",
"[",
"1",
"]",
"=",
"dirs",
"[",
"1",
"]",
".",
"upper",
"(",
")",
"+",
"':'",
"path_",
"=",
"'\\\\'",
".",
"join",
"(",
"dirs",
"[",
"1",
":",
"]",
")",
"does_exist",
"=",
"exists",
"(",
"path_",
")",
"if",
"verbose",
":",
"#print_('[utool] checkpath(%r)' % (path_))",
"pretty_path",
"=",
"path_ndir_split",
"(",
"path_",
",",
"n",
")",
"caller_name",
"=",
"util_dbg",
".",
"get_caller_name",
"(",
"allow_genexpr",
"=",
"False",
")",
"print",
"(",
"'[%s] checkpath(%r)'",
"%",
"(",
"caller_name",
",",
"pretty_path",
")",
")",
"if",
"does_exist",
":",
"path_type",
"=",
"get_path_type",
"(",
"path_",
")",
"#path_type = 'file' if isfile(path_) else 'directory'",
"print",
"(",
"'[%s] ...(%s) exists'",
"%",
"(",
"caller_name",
",",
"path_type",
",",
")",
")",
"else",
":",
"print",
"(",
"'[%s] ... does not exist'",
"%",
"(",
"caller_name",
")",
")",
"if",
"not",
"does_exist",
"and",
"info",
":",
"#print('[util_path] ! Does not exist')",
"_longest_path",
"=",
"longest_existing_path",
"(",
"path_",
")",
"_longest_path_type",
"=",
"get_path_type",
"(",
"_longest_path",
")",
"print",
"(",
"'[util_path] ... The longest existing path is: %r'",
"%",
"_longest_path",
")",
"print",
"(",
"'[util_path] ... and has type %r'",
"%",
"(",
"_longest_path_type",
",",
")",
")",
"return",
"does_exist"
] | r""" verbose wrapper around ``os.path.exists``
Returns:
true if ``path_`` exists on the filesystem show only the
top `n` directories
Args:
path_ (str): path string
verbose (bool): verbosity flag(default = False)
n (int): (default = None)
info (bool): (default = False)
CommandLine:
python -m utool.util_path --test-checkpath
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> path_ = ut.__file__
>>> verbose = True
>>> n = None
>>> info = False
>>> result = checkpath(path_, verbose, n, info)
>>> print(result)
True
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> path_ = ut.__file__ + 'foobar'
>>> verbose = True
>>> result = checkpath(path_, verbose, n=None, info=True)
>>> print(result)
False | [
"r",
"verbose",
"wrapper",
"around",
"os",
".",
"path",
".",
"exists"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L562-L628 | train |
Erotemic/utool | utool/util_path.py | ensurepath | def ensurepath(path_, verbose=None):
""" DEPRICATE - alias - use ensuredir instead """
if verbose is None:
verbose = VERYVERBOSE
return ensuredir(path_, verbose=verbose) | python | def ensurepath(path_, verbose=None):
""" DEPRICATE - alias - use ensuredir instead """
if verbose is None:
verbose = VERYVERBOSE
return ensuredir(path_, verbose=verbose) | [
"def",
"ensurepath",
"(",
"path_",
",",
"verbose",
"=",
"None",
")",
":",
"if",
"verbose",
"is",
"None",
":",
"verbose",
"=",
"VERYVERBOSE",
"return",
"ensuredir",
"(",
"path_",
",",
"verbose",
"=",
"verbose",
")"
] | DEPRICATE - alias - use ensuredir instead | [
"DEPRICATE",
"-",
"alias",
"-",
"use",
"ensuredir",
"instead"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L631-L635 | train |
Erotemic/utool | utool/util_path.py | ensuredir | def ensuredir(path_, verbose=None, info=False, mode=0o1777):
r"""
Ensures that directory will exist. creates new dir with sticky bits by
default
Args:
path (str): dpath to ensure. Can also be a tuple to send to join
info (bool): if True prints extra information
mode (int): octal mode of directory (default 0o1777)
Returns:
str: path - the ensured directory
"""
if verbose is None:
verbose = VERYVERBOSE
if isinstance(path_, (list, tuple)):
path_ = join(*path_)
if HAVE_PATHLIB and isinstance(path_, pathlib.Path):
path_ = str(path_)
if not checkpath(path_, verbose=verbose, info=info):
if verbose:
print('[util_path] mkdir(%r)' % path_)
try:
os.makedirs(normpath(path_), mode=mode)
except OSError as ex:
util_dbg.printex(
ex,
'check that the longest existing path '
'is not a bad windows symlink.', keys=['path_'])
raise
return path_ | python | def ensuredir(path_, verbose=None, info=False, mode=0o1777):
r"""
Ensures that directory will exist. creates new dir with sticky bits by
default
Args:
path (str): dpath to ensure. Can also be a tuple to send to join
info (bool): if True prints extra information
mode (int): octal mode of directory (default 0o1777)
Returns:
str: path - the ensured directory
"""
if verbose is None:
verbose = VERYVERBOSE
if isinstance(path_, (list, tuple)):
path_ = join(*path_)
if HAVE_PATHLIB and isinstance(path_, pathlib.Path):
path_ = str(path_)
if not checkpath(path_, verbose=verbose, info=info):
if verbose:
print('[util_path] mkdir(%r)' % path_)
try:
os.makedirs(normpath(path_), mode=mode)
except OSError as ex:
util_dbg.printex(
ex,
'check that the longest existing path '
'is not a bad windows symlink.', keys=['path_'])
raise
return path_ | [
"def",
"ensuredir",
"(",
"path_",
",",
"verbose",
"=",
"None",
",",
"info",
"=",
"False",
",",
"mode",
"=",
"0o1777",
")",
":",
"if",
"verbose",
"is",
"None",
":",
"verbose",
"=",
"VERYVERBOSE",
"if",
"isinstance",
"(",
"path_",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"path_",
"=",
"join",
"(",
"*",
"path_",
")",
"if",
"HAVE_PATHLIB",
"and",
"isinstance",
"(",
"path_",
",",
"pathlib",
".",
"Path",
")",
":",
"path_",
"=",
"str",
"(",
"path_",
")",
"if",
"not",
"checkpath",
"(",
"path_",
",",
"verbose",
"=",
"verbose",
",",
"info",
"=",
"info",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"'[util_path] mkdir(%r)'",
"%",
"path_",
")",
"try",
":",
"os",
".",
"makedirs",
"(",
"normpath",
"(",
"path_",
")",
",",
"mode",
"=",
"mode",
")",
"except",
"OSError",
"as",
"ex",
":",
"util_dbg",
".",
"printex",
"(",
"ex",
",",
"'check that the longest existing path '",
"'is not a bad windows symlink.'",
",",
"keys",
"=",
"[",
"'path_'",
"]",
")",
"raise",
"return",
"path_"
] | r"""
Ensures that directory will exist. creates new dir with sticky bits by
default
Args:
path (str): dpath to ensure. Can also be a tuple to send to join
info (bool): if True prints extra information
mode (int): octal mode of directory (default 0o1777)
Returns:
str: path - the ensured directory | [
"r",
"Ensures",
"that",
"directory",
"will",
"exist",
".",
"creates",
"new",
"dir",
"with",
"sticky",
"bits",
"by",
"default"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L638-L669 | train |
Erotemic/utool | utool/util_path.py | touch | def touch(fpath, times=None, verbose=True):
r"""
Creates file if it doesnt exist
Args:
fpath (str): file path
times (None):
verbose (bool):
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> fpath = '?'
>>> times = None
>>> verbose = True
>>> result = touch(fpath, times, verbose)
>>> print(result)
References:
http://stackoverflow.com/questions/1158076/implement-touch-using-python
"""
try:
if verbose:
print('[util_path] touching %r' % fpath)
with open(fpath, 'a'):
os.utime(fpath, times)
except Exception as ex:
import utool
utool.printex(ex, 'touch %s' % fpath)
raise
return fpath | python | def touch(fpath, times=None, verbose=True):
r"""
Creates file if it doesnt exist
Args:
fpath (str): file path
times (None):
verbose (bool):
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> fpath = '?'
>>> times = None
>>> verbose = True
>>> result = touch(fpath, times, verbose)
>>> print(result)
References:
http://stackoverflow.com/questions/1158076/implement-touch-using-python
"""
try:
if verbose:
print('[util_path] touching %r' % fpath)
with open(fpath, 'a'):
os.utime(fpath, times)
except Exception as ex:
import utool
utool.printex(ex, 'touch %s' % fpath)
raise
return fpath | [
"def",
"touch",
"(",
"fpath",
",",
"times",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"try",
":",
"if",
"verbose",
":",
"print",
"(",
"'[util_path] touching %r'",
"%",
"fpath",
")",
"with",
"open",
"(",
"fpath",
",",
"'a'",
")",
":",
"os",
".",
"utime",
"(",
"fpath",
",",
"times",
")",
"except",
"Exception",
"as",
"ex",
":",
"import",
"utool",
"utool",
".",
"printex",
"(",
"ex",
",",
"'touch %s'",
"%",
"fpath",
")",
"raise",
"return",
"fpath"
] | r"""
Creates file if it doesnt exist
Args:
fpath (str): file path
times (None):
verbose (bool):
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> fpath = '?'
>>> times = None
>>> verbose = True
>>> result = touch(fpath, times, verbose)
>>> print(result)
References:
http://stackoverflow.com/questions/1158076/implement-touch-using-python | [
"r",
"Creates",
"file",
"if",
"it",
"doesnt",
"exist"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L672-L702 | train |
Erotemic/utool | utool/util_path.py | copy_list | def copy_list(src_list, dst_list, lbl='Copying',
ioerr_ok=False, sherro_ok=False, oserror_ok=False):
""" Copies all data and stat info """
# Feb - 6 - 2014 Copy function
task_iter = zip(src_list, dst_list)
def docopy(src, dst):
try:
shutil.copy2(src, dst)
except OSError:
if ioerr_ok:
return False
raise
except shutil.Error:
if sherro_ok:
return False
raise
except IOError:
if ioerr_ok:
return False
raise
return True
progiter = util_progress.ProgIter(task_iter, adjust=True, lbl=lbl)
success_list = [docopy(src, dst) for (src, dst) in progiter]
return success_list | python | def copy_list(src_list, dst_list, lbl='Copying',
ioerr_ok=False, sherro_ok=False, oserror_ok=False):
""" Copies all data and stat info """
# Feb - 6 - 2014 Copy function
task_iter = zip(src_list, dst_list)
def docopy(src, dst):
try:
shutil.copy2(src, dst)
except OSError:
if ioerr_ok:
return False
raise
except shutil.Error:
if sherro_ok:
return False
raise
except IOError:
if ioerr_ok:
return False
raise
return True
progiter = util_progress.ProgIter(task_iter, adjust=True, lbl=lbl)
success_list = [docopy(src, dst) for (src, dst) in progiter]
return success_list | [
"def",
"copy_list",
"(",
"src_list",
",",
"dst_list",
",",
"lbl",
"=",
"'Copying'",
",",
"ioerr_ok",
"=",
"False",
",",
"sherro_ok",
"=",
"False",
",",
"oserror_ok",
"=",
"False",
")",
":",
"# Feb - 6 - 2014 Copy function",
"task_iter",
"=",
"zip",
"(",
"src_list",
",",
"dst_list",
")",
"def",
"docopy",
"(",
"src",
",",
"dst",
")",
":",
"try",
":",
"shutil",
".",
"copy2",
"(",
"src",
",",
"dst",
")",
"except",
"OSError",
":",
"if",
"ioerr_ok",
":",
"return",
"False",
"raise",
"except",
"shutil",
".",
"Error",
":",
"if",
"sherro_ok",
":",
"return",
"False",
"raise",
"except",
"IOError",
":",
"if",
"ioerr_ok",
":",
"return",
"False",
"raise",
"return",
"True",
"progiter",
"=",
"util_progress",
".",
"ProgIter",
"(",
"task_iter",
",",
"adjust",
"=",
"True",
",",
"lbl",
"=",
"lbl",
")",
"success_list",
"=",
"[",
"docopy",
"(",
"src",
",",
"dst",
")",
"for",
"(",
"src",
",",
"dst",
")",
"in",
"progiter",
"]",
"return",
"success_list"
] | Copies all data and stat info | [
"Copies",
"all",
"data",
"and",
"stat",
"info"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L871-L894 | train |
Erotemic/utool | utool/util_path.py | glob | def glob(dpath, pattern=None, recursive=False, with_files=True, with_dirs=True,
maxdepth=None, exclude_dirs=[], fullpath=True, **kwargs):
r"""
Globs directory for pattern
DEPRICATED:
use pathlib.glob instead
Args:
dpath (str): directory path or pattern
pattern (str or list): pattern or list of patterns
(use only if pattern is not in dpath)
recursive (bool): (default = False)
with_files (bool): (default = True)
with_dirs (bool): (default = True)
maxdepth (None): (default = None)
exclude_dirs (list): (default = [])
Returns:
list: path_list
SeeAlso:
iglob
CommandLine:
python -m utool.util_path --test-glob
python -m utool.util_path --exec-glob:1
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> from os.path import dirname
>>> import utool as ut
>>> dpath = dirname(ut.__file__)
>>> pattern = '__*.py'
>>> recursive = True
>>> with_files = True
>>> with_dirs = True
>>> maxdepth = None
>>> fullpath = False
>>> exclude_dirs = ['_internal', join(dpath, 'experimental')]
>>> print('exclude_dirs = ' + ut.repr2(exclude_dirs))
>>> path_list = glob(dpath, pattern, recursive, with_files, with_dirs,
>>> maxdepth, exclude_dirs, fullpath)
>>> path_list = sorted(path_list)
>>> result = ('path_list = %s' % (ut.repr3(path_list),))
>>> result = result.replace(r'\\', '/')
>>> print(result)
path_list = [
'__init__.py',
'__main__.py',
'tests/__init__.py',
]
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> dpath = dirname(ut.__file__) + '/__*.py'
>>> path_list = glob(dpath)
>>> result = ('path_list = %s' % (str(path_list),))
>>> print(result)
"""
gen = iglob(dpath, pattern, recursive=recursive,
with_files=with_files, with_dirs=with_dirs, maxdepth=maxdepth,
fullpath=fullpath, exclude_dirs=exclude_dirs, **kwargs)
path_list = list(gen)
return path_list | python | def glob(dpath, pattern=None, recursive=False, with_files=True, with_dirs=True,
maxdepth=None, exclude_dirs=[], fullpath=True, **kwargs):
r"""
Globs directory for pattern
DEPRICATED:
use pathlib.glob instead
Args:
dpath (str): directory path or pattern
pattern (str or list): pattern or list of patterns
(use only if pattern is not in dpath)
recursive (bool): (default = False)
with_files (bool): (default = True)
with_dirs (bool): (default = True)
maxdepth (None): (default = None)
exclude_dirs (list): (default = [])
Returns:
list: path_list
SeeAlso:
iglob
CommandLine:
python -m utool.util_path --test-glob
python -m utool.util_path --exec-glob:1
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> from os.path import dirname
>>> import utool as ut
>>> dpath = dirname(ut.__file__)
>>> pattern = '__*.py'
>>> recursive = True
>>> with_files = True
>>> with_dirs = True
>>> maxdepth = None
>>> fullpath = False
>>> exclude_dirs = ['_internal', join(dpath, 'experimental')]
>>> print('exclude_dirs = ' + ut.repr2(exclude_dirs))
>>> path_list = glob(dpath, pattern, recursive, with_files, with_dirs,
>>> maxdepth, exclude_dirs, fullpath)
>>> path_list = sorted(path_list)
>>> result = ('path_list = %s' % (ut.repr3(path_list),))
>>> result = result.replace(r'\\', '/')
>>> print(result)
path_list = [
'__init__.py',
'__main__.py',
'tests/__init__.py',
]
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> dpath = dirname(ut.__file__) + '/__*.py'
>>> path_list = glob(dpath)
>>> result = ('path_list = %s' % (str(path_list),))
>>> print(result)
"""
gen = iglob(dpath, pattern, recursive=recursive,
with_files=with_files, with_dirs=with_dirs, maxdepth=maxdepth,
fullpath=fullpath, exclude_dirs=exclude_dirs, **kwargs)
path_list = list(gen)
return path_list | [
"def",
"glob",
"(",
"dpath",
",",
"pattern",
"=",
"None",
",",
"recursive",
"=",
"False",
",",
"with_files",
"=",
"True",
",",
"with_dirs",
"=",
"True",
",",
"maxdepth",
"=",
"None",
",",
"exclude_dirs",
"=",
"[",
"]",
",",
"fullpath",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"gen",
"=",
"iglob",
"(",
"dpath",
",",
"pattern",
",",
"recursive",
"=",
"recursive",
",",
"with_files",
"=",
"with_files",
",",
"with_dirs",
"=",
"with_dirs",
",",
"maxdepth",
"=",
"maxdepth",
",",
"fullpath",
"=",
"fullpath",
",",
"exclude_dirs",
"=",
"exclude_dirs",
",",
"*",
"*",
"kwargs",
")",
"path_list",
"=",
"list",
"(",
"gen",
")",
"return",
"path_list"
] | r"""
Globs directory for pattern
DEPRICATED:
use pathlib.glob instead
Args:
dpath (str): directory path or pattern
pattern (str or list): pattern or list of patterns
(use only if pattern is not in dpath)
recursive (bool): (default = False)
with_files (bool): (default = True)
with_dirs (bool): (default = True)
maxdepth (None): (default = None)
exclude_dirs (list): (default = [])
Returns:
list: path_list
SeeAlso:
iglob
CommandLine:
python -m utool.util_path --test-glob
python -m utool.util_path --exec-glob:1
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> from os.path import dirname
>>> import utool as ut
>>> dpath = dirname(ut.__file__)
>>> pattern = '__*.py'
>>> recursive = True
>>> with_files = True
>>> with_dirs = True
>>> maxdepth = None
>>> fullpath = False
>>> exclude_dirs = ['_internal', join(dpath, 'experimental')]
>>> print('exclude_dirs = ' + ut.repr2(exclude_dirs))
>>> path_list = glob(dpath, pattern, recursive, with_files, with_dirs,
>>> maxdepth, exclude_dirs, fullpath)
>>> path_list = sorted(path_list)
>>> result = ('path_list = %s' % (ut.repr3(path_list),))
>>> result = result.replace(r'\\', '/')
>>> print(result)
path_list = [
'__init__.py',
'__main__.py',
'tests/__init__.py',
]
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> dpath = dirname(ut.__file__) + '/__*.py'
>>> path_list = glob(dpath)
>>> result = ('path_list = %s' % (str(path_list),))
>>> print(result) | [
"r",
"Globs",
"directory",
"for",
"pattern"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L957-L1024 | train |
Erotemic/utool | utool/util_path.py | num_images_in_dir | def num_images_in_dir(path):
"""
returns the number of images in a directory
"""
num_imgs = 0
for root, dirs, files in os.walk(path):
for fname in files:
if fpath_has_imgext(fname):
num_imgs += 1
return num_imgs | python | def num_images_in_dir(path):
"""
returns the number of images in a directory
"""
num_imgs = 0
for root, dirs, files in os.walk(path):
for fname in files:
if fpath_has_imgext(fname):
num_imgs += 1
return num_imgs | [
"def",
"num_images_in_dir",
"(",
"path",
")",
":",
"num_imgs",
"=",
"0",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"path",
")",
":",
"for",
"fname",
"in",
"files",
":",
"if",
"fpath_has_imgext",
"(",
"fname",
")",
":",
"num_imgs",
"+=",
"1",
"return",
"num_imgs"
] | returns the number of images in a directory | [
"returns",
"the",
"number",
"of",
"images",
"in",
"a",
"directory"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L1150-L1159 | train |
Erotemic/utool | utool/util_path.py | fpath_has_ext | def fpath_has_ext(fname, exts, case_sensitive=False):
""" returns true if the filename has any of the given extensions """
fname_ = fname.lower() if not case_sensitive else fname
if case_sensitive:
ext_pats = ['*' + ext for ext in exts]
else:
ext_pats = ['*' + ext.lower() for ext in exts]
return any([fnmatch.fnmatch(fname_, pat) for pat in ext_pats]) | python | def fpath_has_ext(fname, exts, case_sensitive=False):
""" returns true if the filename has any of the given extensions """
fname_ = fname.lower() if not case_sensitive else fname
if case_sensitive:
ext_pats = ['*' + ext for ext in exts]
else:
ext_pats = ['*' + ext.lower() for ext in exts]
return any([fnmatch.fnmatch(fname_, pat) for pat in ext_pats]) | [
"def",
"fpath_has_ext",
"(",
"fname",
",",
"exts",
",",
"case_sensitive",
"=",
"False",
")",
":",
"fname_",
"=",
"fname",
".",
"lower",
"(",
")",
"if",
"not",
"case_sensitive",
"else",
"fname",
"if",
"case_sensitive",
":",
"ext_pats",
"=",
"[",
"'*'",
"+",
"ext",
"for",
"ext",
"in",
"exts",
"]",
"else",
":",
"ext_pats",
"=",
"[",
"'*'",
"+",
"ext",
".",
"lower",
"(",
")",
"for",
"ext",
"in",
"exts",
"]",
"return",
"any",
"(",
"[",
"fnmatch",
".",
"fnmatch",
"(",
"fname_",
",",
"pat",
")",
"for",
"pat",
"in",
"ext_pats",
"]",
")"
] | returns true if the filename has any of the given extensions | [
"returns",
"true",
"if",
"the",
"filename",
"has",
"any",
"of",
"the",
"given",
"extensions"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L1167-L1174 | train |
Erotemic/utool | utool/util_path.py | get_modpath | def get_modpath(modname, prefer_pkg=False, prefer_main=False):
r"""
Returns path to module
Args:
modname (str or module): module name or actual module
Returns:
str: module_dir
CommandLine:
python -m utool.util_path --test-get_modpath
Setup:
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> utool_dir = dirname(dirname(ut.__file__))
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> utool_dir = dirname(dirname(ut.__file__))
>>> modname = 'utool.util_path'
>>> module_dir = get_modpath(modname)
>>> result = ut.truepath_relative(module_dir, utool_dir)
>>> result = ut.ensure_unixslash(result)
>>> print(result)
utool/util_path.py
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> utool_dir = dirname(dirname(ut.__file__))
>>> modname = 'utool._internal'
>>> module_dir = get_modpath(modname, prefer_pkg=True)
>>> result = ut.ensure_unixslash(module_dir)
>>> print(result)
>>> assert result.endswith('utool/_internal')
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> utool_dir = dirname(dirname(ut.__file__))
>>> modname = 'utool'
>>> module_dir = get_modpath(modname)
>>> result = ut.truepath_relative(module_dir, utool_dir)
>>> result = ut.ensure_unixslash(result)
>>> print(result)
utool/__init__.py
"""
import importlib
if isinstance(modname, six.string_types):
module = importlib.import_module(modname)
else:
module = modname # Hack
modpath = module.__file__.replace('.pyc', '.py')
initname = '__init__.py'
mainname = '__main__.py'
if prefer_pkg:
if modpath.endswith(initname) or modpath.endswith(mainname):
modpath = dirname(modpath)
# modpath = modpath[:-len(initname)]
if prefer_main:
if modpath.endswith(initname):
main_modpath = modpath[:-len(initname)] + mainname
if exists(main_modpath):
modpath = main_modpath
#modname = modname.replace('.__init__', '').strip()
#module_dir = get_module_dir(module)
return modpath | python | def get_modpath(modname, prefer_pkg=False, prefer_main=False):
r"""
Returns path to module
Args:
modname (str or module): module name or actual module
Returns:
str: module_dir
CommandLine:
python -m utool.util_path --test-get_modpath
Setup:
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> utool_dir = dirname(dirname(ut.__file__))
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> utool_dir = dirname(dirname(ut.__file__))
>>> modname = 'utool.util_path'
>>> module_dir = get_modpath(modname)
>>> result = ut.truepath_relative(module_dir, utool_dir)
>>> result = ut.ensure_unixslash(result)
>>> print(result)
utool/util_path.py
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> utool_dir = dirname(dirname(ut.__file__))
>>> modname = 'utool._internal'
>>> module_dir = get_modpath(modname, prefer_pkg=True)
>>> result = ut.ensure_unixslash(module_dir)
>>> print(result)
>>> assert result.endswith('utool/_internal')
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> utool_dir = dirname(dirname(ut.__file__))
>>> modname = 'utool'
>>> module_dir = get_modpath(modname)
>>> result = ut.truepath_relative(module_dir, utool_dir)
>>> result = ut.ensure_unixslash(result)
>>> print(result)
utool/__init__.py
"""
import importlib
if isinstance(modname, six.string_types):
module = importlib.import_module(modname)
else:
module = modname # Hack
modpath = module.__file__.replace('.pyc', '.py')
initname = '__init__.py'
mainname = '__main__.py'
if prefer_pkg:
if modpath.endswith(initname) or modpath.endswith(mainname):
modpath = dirname(modpath)
# modpath = modpath[:-len(initname)]
if prefer_main:
if modpath.endswith(initname):
main_modpath = modpath[:-len(initname)] + mainname
if exists(main_modpath):
modpath = main_modpath
#modname = modname.replace('.__init__', '').strip()
#module_dir = get_module_dir(module)
return modpath | [
"def",
"get_modpath",
"(",
"modname",
",",
"prefer_pkg",
"=",
"False",
",",
"prefer_main",
"=",
"False",
")",
":",
"import",
"importlib",
"if",
"isinstance",
"(",
"modname",
",",
"six",
".",
"string_types",
")",
":",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"modname",
")",
"else",
":",
"module",
"=",
"modname",
"# Hack",
"modpath",
"=",
"module",
".",
"__file__",
".",
"replace",
"(",
"'.pyc'",
",",
"'.py'",
")",
"initname",
"=",
"'__init__.py'",
"mainname",
"=",
"'__main__.py'",
"if",
"prefer_pkg",
":",
"if",
"modpath",
".",
"endswith",
"(",
"initname",
")",
"or",
"modpath",
".",
"endswith",
"(",
"mainname",
")",
":",
"modpath",
"=",
"dirname",
"(",
"modpath",
")",
"# modpath = modpath[:-len(initname)]",
"if",
"prefer_main",
":",
"if",
"modpath",
".",
"endswith",
"(",
"initname",
")",
":",
"main_modpath",
"=",
"modpath",
"[",
":",
"-",
"len",
"(",
"initname",
")",
"]",
"+",
"mainname",
"if",
"exists",
"(",
"main_modpath",
")",
":",
"modpath",
"=",
"main_modpath",
"#modname = modname.replace('.__init__', '').strip()",
"#module_dir = get_module_dir(module)",
"return",
"modpath"
] | r"""
Returns path to module
Args:
modname (str or module): module name or actual module
Returns:
str: module_dir
CommandLine:
python -m utool.util_path --test-get_modpath
Setup:
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> utool_dir = dirname(dirname(ut.__file__))
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> utool_dir = dirname(dirname(ut.__file__))
>>> modname = 'utool.util_path'
>>> module_dir = get_modpath(modname)
>>> result = ut.truepath_relative(module_dir, utool_dir)
>>> result = ut.ensure_unixslash(result)
>>> print(result)
utool/util_path.py
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> utool_dir = dirname(dirname(ut.__file__))
>>> modname = 'utool._internal'
>>> module_dir = get_modpath(modname, prefer_pkg=True)
>>> result = ut.ensure_unixslash(module_dir)
>>> print(result)
>>> assert result.endswith('utool/_internal')
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> utool_dir = dirname(dirname(ut.__file__))
>>> modname = 'utool'
>>> module_dir = get_modpath(modname)
>>> result = ut.truepath_relative(module_dir, utool_dir)
>>> result = ut.ensure_unixslash(result)
>>> print(result)
utool/__init__.py | [
"r",
"Returns",
"path",
"to",
"module"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L1233-L1305 | train |
Erotemic/utool | utool/util_path.py | get_relative_modpath | def get_relative_modpath(module_fpath):
"""
Returns path to module relative to the package root
Args:
module_fpath (str): module filepath
Returns:
str: modname
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> module_fpath = ut.util_path.__file__
>>> rel_modpath = ut.get_relative_modpath(module_fpath)
>>> rel_modpath = rel_modpath.replace('.pyc', '.py') # allow pyc or py
>>> result = ensure_crossplat_path(rel_modpath)
>>> print(result)
utool/util_path.py
"""
modsubdir_list = get_module_subdir_list(module_fpath)
_, ext = splitext(module_fpath)
rel_modpath = join(*modsubdir_list) + ext
rel_modpath = ensure_crossplat_path(rel_modpath)
return rel_modpath | python | def get_relative_modpath(module_fpath):
"""
Returns path to module relative to the package root
Args:
module_fpath (str): module filepath
Returns:
str: modname
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> module_fpath = ut.util_path.__file__
>>> rel_modpath = ut.get_relative_modpath(module_fpath)
>>> rel_modpath = rel_modpath.replace('.pyc', '.py') # allow pyc or py
>>> result = ensure_crossplat_path(rel_modpath)
>>> print(result)
utool/util_path.py
"""
modsubdir_list = get_module_subdir_list(module_fpath)
_, ext = splitext(module_fpath)
rel_modpath = join(*modsubdir_list) + ext
rel_modpath = ensure_crossplat_path(rel_modpath)
return rel_modpath | [
"def",
"get_relative_modpath",
"(",
"module_fpath",
")",
":",
"modsubdir_list",
"=",
"get_module_subdir_list",
"(",
"module_fpath",
")",
"_",
",",
"ext",
"=",
"splitext",
"(",
"module_fpath",
")",
"rel_modpath",
"=",
"join",
"(",
"*",
"modsubdir_list",
")",
"+",
"ext",
"rel_modpath",
"=",
"ensure_crossplat_path",
"(",
"rel_modpath",
")",
"return",
"rel_modpath"
] | Returns path to module relative to the package root
Args:
module_fpath (str): module filepath
Returns:
str: modname
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> module_fpath = ut.util_path.__file__
>>> rel_modpath = ut.get_relative_modpath(module_fpath)
>>> rel_modpath = rel_modpath.replace('.pyc', '.py') # allow pyc or py
>>> result = ensure_crossplat_path(rel_modpath)
>>> print(result)
utool/util_path.py | [
"Returns",
"path",
"to",
"module",
"relative",
"to",
"the",
"package",
"root"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L1350-L1375 | train |
Erotemic/utool | utool/util_path.py | get_modname_from_modpath | def get_modname_from_modpath(module_fpath):
"""
returns importable name from file path
get_modname_from_modpath
Args:
module_fpath (str): module filepath
Returns:
str: modname
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> module_fpath = ut.util_path.__file__
>>> modname = ut.get_modname_from_modpath(module_fpath)
>>> result = modname
>>> print(result)
utool.util_path
"""
modsubdir_list = get_module_subdir_list(module_fpath)
modname = '.'.join(modsubdir_list)
modname = modname.replace('.__init__', '').strip()
modname = modname.replace('.__main__', '').strip()
return modname | python | def get_modname_from_modpath(module_fpath):
"""
returns importable name from file path
get_modname_from_modpath
Args:
module_fpath (str): module filepath
Returns:
str: modname
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> module_fpath = ut.util_path.__file__
>>> modname = ut.get_modname_from_modpath(module_fpath)
>>> result = modname
>>> print(result)
utool.util_path
"""
modsubdir_list = get_module_subdir_list(module_fpath)
modname = '.'.join(modsubdir_list)
modname = modname.replace('.__init__', '').strip()
modname = modname.replace('.__main__', '').strip()
return modname | [
"def",
"get_modname_from_modpath",
"(",
"module_fpath",
")",
":",
"modsubdir_list",
"=",
"get_module_subdir_list",
"(",
"module_fpath",
")",
"modname",
"=",
"'.'",
".",
"join",
"(",
"modsubdir_list",
")",
"modname",
"=",
"modname",
".",
"replace",
"(",
"'.__init__'",
",",
"''",
")",
".",
"strip",
"(",
")",
"modname",
"=",
"modname",
".",
"replace",
"(",
"'.__main__'",
",",
"''",
")",
".",
"strip",
"(",
")",
"return",
"modname"
] | returns importable name from file path
get_modname_from_modpath
Args:
module_fpath (str): module filepath
Returns:
str: modname
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> module_fpath = ut.util_path.__file__
>>> modname = ut.get_modname_from_modpath(module_fpath)
>>> result = modname
>>> print(result)
utool.util_path | [
"returns",
"importable",
"name",
"from",
"file",
"path"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L1378-L1404 | train |
Erotemic/utool | utool/util_path.py | ls | def ls(path, pattern='*'):
""" like unix ls - lists all files and dirs in path"""
path_iter = glob(path, pattern, recursive=False)
return sorted(list(path_iter)) | python | def ls(path, pattern='*'):
""" like unix ls - lists all files and dirs in path"""
path_iter = glob(path, pattern, recursive=False)
return sorted(list(path_iter)) | [
"def",
"ls",
"(",
"path",
",",
"pattern",
"=",
"'*'",
")",
":",
"path_iter",
"=",
"glob",
"(",
"path",
",",
"pattern",
",",
"recursive",
"=",
"False",
")",
"return",
"sorted",
"(",
"list",
"(",
"path_iter",
")",
")"
] | like unix ls - lists all files and dirs in path | [
"like",
"unix",
"ls",
"-",
"lists",
"all",
"files",
"and",
"dirs",
"in",
"path"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L1437-L1440 | train |
Erotemic/utool | utool/util_path.py | ls_moduledirs | def ls_moduledirs(path, private=True, full=True):
""" lists all dirs which are python modules in path """
dir_list = ls_dirs(path)
module_dir_iter = filter(is_module_dir, dir_list)
if not private:
module_dir_iter = filterfalse(is_private_module, module_dir_iter)
if not full:
module_dir_iter = map(basename, module_dir_iter)
return list(module_dir_iter) | python | def ls_moduledirs(path, private=True, full=True):
""" lists all dirs which are python modules in path """
dir_list = ls_dirs(path)
module_dir_iter = filter(is_module_dir, dir_list)
if not private:
module_dir_iter = filterfalse(is_private_module, module_dir_iter)
if not full:
module_dir_iter = map(basename, module_dir_iter)
return list(module_dir_iter) | [
"def",
"ls_moduledirs",
"(",
"path",
",",
"private",
"=",
"True",
",",
"full",
"=",
"True",
")",
":",
"dir_list",
"=",
"ls_dirs",
"(",
"path",
")",
"module_dir_iter",
"=",
"filter",
"(",
"is_module_dir",
",",
"dir_list",
")",
"if",
"not",
"private",
":",
"module_dir_iter",
"=",
"filterfalse",
"(",
"is_private_module",
",",
"module_dir_iter",
")",
"if",
"not",
"full",
":",
"module_dir_iter",
"=",
"map",
"(",
"basename",
",",
"module_dir_iter",
")",
"return",
"list",
"(",
"module_dir_iter",
")"
] | lists all dirs which are python modules in path | [
"lists",
"all",
"dirs",
"which",
"are",
"python",
"modules",
"in",
"path"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L1460-L1468 | train |
Erotemic/utool | utool/util_path.py | list_images | def list_images(img_dpath_, ignore_list=[], recursive=False, fullpath=False,
full=None, sort=True):
r"""
Returns a list of images in a directory. By default returns relative paths.
TODO: rename to ls_images
TODO: Change all instances of fullpath to full
Args:
img_dpath_ (str):
ignore_list (list): (default = [])
recursive (bool): (default = False)
fullpath (bool): (default = False)
full (None): (default = None)
sort (bool): (default = True)
Returns:
list: gname_list
CommandLine:
python -m utool.util_path --exec-list_images
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> img_dpath_ = '?'
>>> ignore_list = []
>>> recursive = False
>>> fullpath = False
>>> full = None
>>> sort = True
>>> gname_list = list_images(img_dpath_, ignore_list, recursive,
>>> fullpath, full, sort)
>>> result = ('gname_list = %s' % (str(gname_list),))
>>> print(result)
"""
#if not QUIET:
# print(ignore_list)
if full is not None:
fullpath = fullpath or full
img_dpath_ = util_str.ensure_unicode(img_dpath_)
img_dpath = realpath(img_dpath_)
ignore_set = set(ignore_list)
gname_list_ = []
assertpath(img_dpath)
# Get all the files in a directory recursively
true_imgpath = truepath(img_dpath)
for root, dlist, flist in os.walk(true_imgpath):
root = util_str.ensure_unicode(root)
rel_dpath = relpath(root, img_dpath)
# Ignore directories
if any([dname in ignore_set for dname in dirsplit(rel_dpath)]):
continue
for fname in iter(flist):
fname = util_str.ensure_unicode(fname)
gname = join(rel_dpath, fname).replace('\\', '/')
if gname.startswith('./'):
gname = gname[2:]
if fpath_has_imgext(gname):
# Ignore Files
if gname in ignore_set:
continue
if fullpath:
gpath = join(img_dpath, gname)
gname_list_.append(gpath)
else:
gname_list_.append(gname)
if not recursive:
break
if sort:
gname_list = sorted(gname_list_)
return gname_list | python | def list_images(img_dpath_, ignore_list=[], recursive=False, fullpath=False,
full=None, sort=True):
r"""
Returns a list of images in a directory. By default returns relative paths.
TODO: rename to ls_images
TODO: Change all instances of fullpath to full
Args:
img_dpath_ (str):
ignore_list (list): (default = [])
recursive (bool): (default = False)
fullpath (bool): (default = False)
full (None): (default = None)
sort (bool): (default = True)
Returns:
list: gname_list
CommandLine:
python -m utool.util_path --exec-list_images
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> img_dpath_ = '?'
>>> ignore_list = []
>>> recursive = False
>>> fullpath = False
>>> full = None
>>> sort = True
>>> gname_list = list_images(img_dpath_, ignore_list, recursive,
>>> fullpath, full, sort)
>>> result = ('gname_list = %s' % (str(gname_list),))
>>> print(result)
"""
#if not QUIET:
# print(ignore_list)
if full is not None:
fullpath = fullpath or full
img_dpath_ = util_str.ensure_unicode(img_dpath_)
img_dpath = realpath(img_dpath_)
ignore_set = set(ignore_list)
gname_list_ = []
assertpath(img_dpath)
# Get all the files in a directory recursively
true_imgpath = truepath(img_dpath)
for root, dlist, flist in os.walk(true_imgpath):
root = util_str.ensure_unicode(root)
rel_dpath = relpath(root, img_dpath)
# Ignore directories
if any([dname in ignore_set for dname in dirsplit(rel_dpath)]):
continue
for fname in iter(flist):
fname = util_str.ensure_unicode(fname)
gname = join(rel_dpath, fname).replace('\\', '/')
if gname.startswith('./'):
gname = gname[2:]
if fpath_has_imgext(gname):
# Ignore Files
if gname in ignore_set:
continue
if fullpath:
gpath = join(img_dpath, gname)
gname_list_.append(gpath)
else:
gname_list_.append(gname)
if not recursive:
break
if sort:
gname_list = sorted(gname_list_)
return gname_list | [
"def",
"list_images",
"(",
"img_dpath_",
",",
"ignore_list",
"=",
"[",
"]",
",",
"recursive",
"=",
"False",
",",
"fullpath",
"=",
"False",
",",
"full",
"=",
"None",
",",
"sort",
"=",
"True",
")",
":",
"#if not QUIET:",
"# print(ignore_list)",
"if",
"full",
"is",
"not",
"None",
":",
"fullpath",
"=",
"fullpath",
"or",
"full",
"img_dpath_",
"=",
"util_str",
".",
"ensure_unicode",
"(",
"img_dpath_",
")",
"img_dpath",
"=",
"realpath",
"(",
"img_dpath_",
")",
"ignore_set",
"=",
"set",
"(",
"ignore_list",
")",
"gname_list_",
"=",
"[",
"]",
"assertpath",
"(",
"img_dpath",
")",
"# Get all the files in a directory recursively",
"true_imgpath",
"=",
"truepath",
"(",
"img_dpath",
")",
"for",
"root",
",",
"dlist",
",",
"flist",
"in",
"os",
".",
"walk",
"(",
"true_imgpath",
")",
":",
"root",
"=",
"util_str",
".",
"ensure_unicode",
"(",
"root",
")",
"rel_dpath",
"=",
"relpath",
"(",
"root",
",",
"img_dpath",
")",
"# Ignore directories",
"if",
"any",
"(",
"[",
"dname",
"in",
"ignore_set",
"for",
"dname",
"in",
"dirsplit",
"(",
"rel_dpath",
")",
"]",
")",
":",
"continue",
"for",
"fname",
"in",
"iter",
"(",
"flist",
")",
":",
"fname",
"=",
"util_str",
".",
"ensure_unicode",
"(",
"fname",
")",
"gname",
"=",
"join",
"(",
"rel_dpath",
",",
"fname",
")",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
"if",
"gname",
".",
"startswith",
"(",
"'./'",
")",
":",
"gname",
"=",
"gname",
"[",
"2",
":",
"]",
"if",
"fpath_has_imgext",
"(",
"gname",
")",
":",
"# Ignore Files",
"if",
"gname",
"in",
"ignore_set",
":",
"continue",
"if",
"fullpath",
":",
"gpath",
"=",
"join",
"(",
"img_dpath",
",",
"gname",
")",
"gname_list_",
".",
"append",
"(",
"gpath",
")",
"else",
":",
"gname_list_",
".",
"append",
"(",
"gname",
")",
"if",
"not",
"recursive",
":",
"break",
"if",
"sort",
":",
"gname_list",
"=",
"sorted",
"(",
"gname_list_",
")",
"return",
"gname_list"
] | r"""
Returns a list of images in a directory. By default returns relative paths.
TODO: rename to ls_images
TODO: Change all instances of fullpath to full
Args:
img_dpath_ (str):
ignore_list (list): (default = [])
recursive (bool): (default = False)
fullpath (bool): (default = False)
full (None): (default = None)
sort (bool): (default = True)
Returns:
list: gname_list
CommandLine:
python -m utool.util_path --exec-list_images
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> img_dpath_ = '?'
>>> ignore_list = []
>>> recursive = False
>>> fullpath = False
>>> full = None
>>> sort = True
>>> gname_list = list_images(img_dpath_, ignore_list, recursive,
>>> fullpath, full, sort)
>>> result = ('gname_list = %s' % (str(gname_list),))
>>> print(result) | [
"r",
"Returns",
"a",
"list",
"of",
"images",
"in",
"a",
"directory",
".",
"By",
"default",
"returns",
"relative",
"paths",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L1502-L1573 | train |
Erotemic/utool | utool/util_path.py | assertpath | def assertpath(path_, msg='', **kwargs):
""" Asserts that a patha exists """
if NO_ASSERTS:
return
if path_ is None:
raise AssertionError('path is None! %s' % (path_, msg))
if path_ == '':
raise AssertionError('path=%r is the empty string! %s' % (path_, msg))
if not checkpath(path_, **kwargs):
raise AssertionError('path=%r does not exist! %s' % (path_, msg)) | python | def assertpath(path_, msg='', **kwargs):
""" Asserts that a patha exists """
if NO_ASSERTS:
return
if path_ is None:
raise AssertionError('path is None! %s' % (path_, msg))
if path_ == '':
raise AssertionError('path=%r is the empty string! %s' % (path_, msg))
if not checkpath(path_, **kwargs):
raise AssertionError('path=%r does not exist! %s' % (path_, msg)) | [
"def",
"assertpath",
"(",
"path_",
",",
"msg",
"=",
"''",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"NO_ASSERTS",
":",
"return",
"if",
"path_",
"is",
"None",
":",
"raise",
"AssertionError",
"(",
"'path is None! %s'",
"%",
"(",
"path_",
",",
"msg",
")",
")",
"if",
"path_",
"==",
"''",
":",
"raise",
"AssertionError",
"(",
"'path=%r is the empty string! %s'",
"%",
"(",
"path_",
",",
"msg",
")",
")",
"if",
"not",
"checkpath",
"(",
"path_",
",",
"*",
"*",
"kwargs",
")",
":",
"raise",
"AssertionError",
"(",
"'path=%r does not exist! %s'",
"%",
"(",
"path_",
",",
"msg",
")",
")"
] | Asserts that a patha exists | [
"Asserts",
"that",
"a",
"patha",
"exists"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L1579-L1588 | train |
Erotemic/utool | utool/util_path.py | matching_fpaths | def matching_fpaths(dpath_list, include_patterns, exclude_dirs=[],
greater_exclude_dirs=[], exclude_patterns=[],
recursive=True):
r"""
walks dpath lists returning all directories that match the requested
pattern.
Args:
dpath_list (list):
include_patterns (str):
exclude_dirs (None):
recursive (bool):
References:
# TODO: fix names and behavior of exclude_dirs and greater_exclude_dirs
http://stackoverflow.com/questions/19859840/excluding-directories-in-os-walk
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> dpath_list = [dirname(dirname(ut.__file__))]
>>> include_patterns = get_standard_include_patterns()
>>> exclude_dirs = ['_page']
>>> greater_exclude_dirs = get_standard_exclude_dnames()
>>> recursive = True
>>> fpath_gen = matching_fpaths(dpath_list, include_patterns, exclude_dirs,
>>> greater_exclude_dirs, recursive)
>>> result = list(fpath_gen)
>>> print('\n'.join(result))
"""
if isinstance(dpath_list, six.string_types):
dpath_list = [dpath_list]
for dpath in dpath_list:
for root, dname_list, fname_list in os.walk(dpath):
# Look at all subdirs
subdirs = pathsplit_full(relpath(root, dpath))
# HACK:
if any([dir_ in greater_exclude_dirs for dir_ in subdirs]):
continue
# Look at one subdir
if basename(root) in exclude_dirs:
continue
_match = fnmatch.fnmatch
for name in fname_list:
# yeild filepaths that are included
if any(_match(name, pat) for pat in include_patterns):
# ... and not excluded
if not any(_match(name, pat) for pat in exclude_patterns):
fpath = join(root, name)
yield fpath
if not recursive:
break | python | def matching_fpaths(dpath_list, include_patterns, exclude_dirs=[],
greater_exclude_dirs=[], exclude_patterns=[],
recursive=True):
r"""
walks dpath lists returning all directories that match the requested
pattern.
Args:
dpath_list (list):
include_patterns (str):
exclude_dirs (None):
recursive (bool):
References:
# TODO: fix names and behavior of exclude_dirs and greater_exclude_dirs
http://stackoverflow.com/questions/19859840/excluding-directories-in-os-walk
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> dpath_list = [dirname(dirname(ut.__file__))]
>>> include_patterns = get_standard_include_patterns()
>>> exclude_dirs = ['_page']
>>> greater_exclude_dirs = get_standard_exclude_dnames()
>>> recursive = True
>>> fpath_gen = matching_fpaths(dpath_list, include_patterns, exclude_dirs,
>>> greater_exclude_dirs, recursive)
>>> result = list(fpath_gen)
>>> print('\n'.join(result))
"""
if isinstance(dpath_list, six.string_types):
dpath_list = [dpath_list]
for dpath in dpath_list:
for root, dname_list, fname_list in os.walk(dpath):
# Look at all subdirs
subdirs = pathsplit_full(relpath(root, dpath))
# HACK:
if any([dir_ in greater_exclude_dirs for dir_ in subdirs]):
continue
# Look at one subdir
if basename(root) in exclude_dirs:
continue
_match = fnmatch.fnmatch
for name in fname_list:
# yeild filepaths that are included
if any(_match(name, pat) for pat in include_patterns):
# ... and not excluded
if not any(_match(name, pat) for pat in exclude_patterns):
fpath = join(root, name)
yield fpath
if not recursive:
break | [
"def",
"matching_fpaths",
"(",
"dpath_list",
",",
"include_patterns",
",",
"exclude_dirs",
"=",
"[",
"]",
",",
"greater_exclude_dirs",
"=",
"[",
"]",
",",
"exclude_patterns",
"=",
"[",
"]",
",",
"recursive",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"dpath_list",
",",
"six",
".",
"string_types",
")",
":",
"dpath_list",
"=",
"[",
"dpath_list",
"]",
"for",
"dpath",
"in",
"dpath_list",
":",
"for",
"root",
",",
"dname_list",
",",
"fname_list",
"in",
"os",
".",
"walk",
"(",
"dpath",
")",
":",
"# Look at all subdirs",
"subdirs",
"=",
"pathsplit_full",
"(",
"relpath",
"(",
"root",
",",
"dpath",
")",
")",
"# HACK:",
"if",
"any",
"(",
"[",
"dir_",
"in",
"greater_exclude_dirs",
"for",
"dir_",
"in",
"subdirs",
"]",
")",
":",
"continue",
"# Look at one subdir",
"if",
"basename",
"(",
"root",
")",
"in",
"exclude_dirs",
":",
"continue",
"_match",
"=",
"fnmatch",
".",
"fnmatch",
"for",
"name",
"in",
"fname_list",
":",
"# yeild filepaths that are included",
"if",
"any",
"(",
"_match",
"(",
"name",
",",
"pat",
")",
"for",
"pat",
"in",
"include_patterns",
")",
":",
"# ... and not excluded",
"if",
"not",
"any",
"(",
"_match",
"(",
"name",
",",
"pat",
")",
"for",
"pat",
"in",
"exclude_patterns",
")",
":",
"fpath",
"=",
"join",
"(",
"root",
",",
"name",
")",
"yield",
"fpath",
"if",
"not",
"recursive",
":",
"break"
] | r"""
walks dpath lists returning all directories that match the requested
pattern.
Args:
dpath_list (list):
include_patterns (str):
exclude_dirs (None):
recursive (bool):
References:
# TODO: fix names and behavior of exclude_dirs and greater_exclude_dirs
http://stackoverflow.com/questions/19859840/excluding-directories-in-os-walk
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> dpath_list = [dirname(dirname(ut.__file__))]
>>> include_patterns = get_standard_include_patterns()
>>> exclude_dirs = ['_page']
>>> greater_exclude_dirs = get_standard_exclude_dnames()
>>> recursive = True
>>> fpath_gen = matching_fpaths(dpath_list, include_patterns, exclude_dirs,
>>> greater_exclude_dirs, recursive)
>>> result = list(fpath_gen)
>>> print('\n'.join(result)) | [
"r",
"walks",
"dpath",
"lists",
"returning",
"all",
"directories",
"that",
"match",
"the",
"requested",
"pattern",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L1608-L1660 | train |
Erotemic/utool | utool/util_path.py | sed | def sed(regexpr, repl, force=False, recursive=False, dpath_list=None,
fpath_list=None, verbose=None, include_patterns=None,
exclude_patterns=[]):
"""
Python implementation of sed. NOT FINISHED
searches and replaces text in files
Args:
regexpr (str): regx patterns to find
repl (str): text to replace
force (bool):
recursive (bool):
dpath_list (list): directories to search (defaults to cwd)
"""
#_grep(r, [repl], dpath_list=dpath_list, recursive=recursive)
if include_patterns is None:
include_patterns = ['*.py', '*.pyx', '*.pxi', '*.cxx', '*.cpp', '*.hxx', '*.hpp', '*.c', '*.h', '*.html', '*.tex']
if dpath_list is None:
dpath_list = [os.getcwd()]
if verbose is None:
verbose = ut.NOT_QUIET
if fpath_list is None:
greater_exclude_dirs = get_standard_exclude_dnames()
exclude_dirs = []
fpath_generator = matching_fpaths(
dpath_list, include_patterns, exclude_dirs,
greater_exclude_dirs=greater_exclude_dirs,
recursive=recursive, exclude_patterns=exclude_patterns)
else:
fpath_generator = fpath_list
if verbose:
print('sed-ing %r' % (dpath_list,))
print(' * regular expression : %r' % (regexpr,))
print(' * replacement : %r' % (repl,))
print(' * include_patterns : %r' % (include_patterns,))
print(' * recursive: %r' % (recursive,))
print(' * force: %r' % (force,))
from utool import util_str
print(' * fpath_list: %s' % (util_str.repr3(fpath_list),))
regexpr = extend_regex(regexpr)
#if '\x08' in regexpr:
# print('Remember \\x08 != \\b')
# print('subsituting for you for you')
# regexpr = regexpr.replace('\x08', '\\b')
# print(' * regular expression : %r' % (regexpr,))
# Walk through each directory recursively
num_changed = 0
num_files_checked = 0
fpaths_changed = []
for fpath in fpath_generator:
num_files_checked += 1
changed_lines = sedfile(fpath, regexpr, repl, force, verbose=verbose)
if changed_lines is not None:
fpaths_changed.append(fpath)
num_changed += len(changed_lines)
import utool as ut
print('num_files_checked = %r' % (num_files_checked,))
print('fpaths_changed = %s' % (ut.repr3(sorted(fpaths_changed)),))
print('total lines changed = %r' % (num_changed,)) | python | def sed(regexpr, repl, force=False, recursive=False, dpath_list=None,
fpath_list=None, verbose=None, include_patterns=None,
exclude_patterns=[]):
"""
Python implementation of sed. NOT FINISHED
searches and replaces text in files
Args:
regexpr (str): regx patterns to find
repl (str): text to replace
force (bool):
recursive (bool):
dpath_list (list): directories to search (defaults to cwd)
"""
#_grep(r, [repl], dpath_list=dpath_list, recursive=recursive)
if include_patterns is None:
include_patterns = ['*.py', '*.pyx', '*.pxi', '*.cxx', '*.cpp', '*.hxx', '*.hpp', '*.c', '*.h', '*.html', '*.tex']
if dpath_list is None:
dpath_list = [os.getcwd()]
if verbose is None:
verbose = ut.NOT_QUIET
if fpath_list is None:
greater_exclude_dirs = get_standard_exclude_dnames()
exclude_dirs = []
fpath_generator = matching_fpaths(
dpath_list, include_patterns, exclude_dirs,
greater_exclude_dirs=greater_exclude_dirs,
recursive=recursive, exclude_patterns=exclude_patterns)
else:
fpath_generator = fpath_list
if verbose:
print('sed-ing %r' % (dpath_list,))
print(' * regular expression : %r' % (regexpr,))
print(' * replacement : %r' % (repl,))
print(' * include_patterns : %r' % (include_patterns,))
print(' * recursive: %r' % (recursive,))
print(' * force: %r' % (force,))
from utool import util_str
print(' * fpath_list: %s' % (util_str.repr3(fpath_list),))
regexpr = extend_regex(regexpr)
#if '\x08' in regexpr:
# print('Remember \\x08 != \\b')
# print('subsituting for you for you')
# regexpr = regexpr.replace('\x08', '\\b')
# print(' * regular expression : %r' % (regexpr,))
# Walk through each directory recursively
num_changed = 0
num_files_checked = 0
fpaths_changed = []
for fpath in fpath_generator:
num_files_checked += 1
changed_lines = sedfile(fpath, regexpr, repl, force, verbose=verbose)
if changed_lines is not None:
fpaths_changed.append(fpath)
num_changed += len(changed_lines)
import utool as ut
print('num_files_checked = %r' % (num_files_checked,))
print('fpaths_changed = %s' % (ut.repr3(sorted(fpaths_changed)),))
print('total lines changed = %r' % (num_changed,)) | [
"def",
"sed",
"(",
"regexpr",
",",
"repl",
",",
"force",
"=",
"False",
",",
"recursive",
"=",
"False",
",",
"dpath_list",
"=",
"None",
",",
"fpath_list",
"=",
"None",
",",
"verbose",
"=",
"None",
",",
"include_patterns",
"=",
"None",
",",
"exclude_patterns",
"=",
"[",
"]",
")",
":",
"#_grep(r, [repl], dpath_list=dpath_list, recursive=recursive)",
"if",
"include_patterns",
"is",
"None",
":",
"include_patterns",
"=",
"[",
"'*.py'",
",",
"'*.pyx'",
",",
"'*.pxi'",
",",
"'*.cxx'",
",",
"'*.cpp'",
",",
"'*.hxx'",
",",
"'*.hpp'",
",",
"'*.c'",
",",
"'*.h'",
",",
"'*.html'",
",",
"'*.tex'",
"]",
"if",
"dpath_list",
"is",
"None",
":",
"dpath_list",
"=",
"[",
"os",
".",
"getcwd",
"(",
")",
"]",
"if",
"verbose",
"is",
"None",
":",
"verbose",
"=",
"ut",
".",
"NOT_QUIET",
"if",
"fpath_list",
"is",
"None",
":",
"greater_exclude_dirs",
"=",
"get_standard_exclude_dnames",
"(",
")",
"exclude_dirs",
"=",
"[",
"]",
"fpath_generator",
"=",
"matching_fpaths",
"(",
"dpath_list",
",",
"include_patterns",
",",
"exclude_dirs",
",",
"greater_exclude_dirs",
"=",
"greater_exclude_dirs",
",",
"recursive",
"=",
"recursive",
",",
"exclude_patterns",
"=",
"exclude_patterns",
")",
"else",
":",
"fpath_generator",
"=",
"fpath_list",
"if",
"verbose",
":",
"print",
"(",
"'sed-ing %r'",
"%",
"(",
"dpath_list",
",",
")",
")",
"print",
"(",
"' * regular expression : %r'",
"%",
"(",
"regexpr",
",",
")",
")",
"print",
"(",
"' * replacement : %r'",
"%",
"(",
"repl",
",",
")",
")",
"print",
"(",
"' * include_patterns : %r'",
"%",
"(",
"include_patterns",
",",
")",
")",
"print",
"(",
"' * recursive: %r'",
"%",
"(",
"recursive",
",",
")",
")",
"print",
"(",
"' * force: %r'",
"%",
"(",
"force",
",",
")",
")",
"from",
"utool",
"import",
"util_str",
"print",
"(",
"' * fpath_list: %s'",
"%",
"(",
"util_str",
".",
"repr3",
"(",
"fpath_list",
")",
",",
")",
")",
"regexpr",
"=",
"extend_regex",
"(",
"regexpr",
")",
"#if '\\x08' in regexpr:",
"# print('Remember \\\\x08 != \\\\b')",
"# print('subsituting for you for you')",
"# regexpr = regexpr.replace('\\x08', '\\\\b')",
"# print(' * regular expression : %r' % (regexpr,))",
"# Walk through each directory recursively",
"num_changed",
"=",
"0",
"num_files_checked",
"=",
"0",
"fpaths_changed",
"=",
"[",
"]",
"for",
"fpath",
"in",
"fpath_generator",
":",
"num_files_checked",
"+=",
"1",
"changed_lines",
"=",
"sedfile",
"(",
"fpath",
",",
"regexpr",
",",
"repl",
",",
"force",
",",
"verbose",
"=",
"verbose",
")",
"if",
"changed_lines",
"is",
"not",
"None",
":",
"fpaths_changed",
".",
"append",
"(",
"fpath",
")",
"num_changed",
"+=",
"len",
"(",
"changed_lines",
")",
"import",
"utool",
"as",
"ut",
"print",
"(",
"'num_files_checked = %r'",
"%",
"(",
"num_files_checked",
",",
")",
")",
"print",
"(",
"'fpaths_changed = %s'",
"%",
"(",
"ut",
".",
"repr3",
"(",
"sorted",
"(",
"fpaths_changed",
")",
")",
",",
")",
")",
"print",
"(",
"'total lines changed = %r'",
"%",
"(",
"num_changed",
",",
")",
")"
] | Python implementation of sed. NOT FINISHED
searches and replaces text in files
Args:
regexpr (str): regx patterns to find
repl (str): text to replace
force (bool):
recursive (bool):
dpath_list (list): directories to search (defaults to cwd) | [
"Python",
"implementation",
"of",
"sed",
".",
"NOT",
"FINISHED"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L1663-L1723 | train |
Erotemic/utool | utool/util_path.py | grep | def grep(regex_list, recursive=True, dpath_list=None, include_patterns=None,
exclude_dirs=[], greater_exclude_dirs=None, inverse=False,
exclude_patterns=[], verbose=VERBOSE, fpath_list=None, reflags=0,
cache=None):
r"""
greps for patterns
Python implementation of grep. NOT FINISHED
Args:
regex_list (str or list): one or more patterns to find
recursive (bool):
dpath_list (list): directories to search (defaults to cwd)
include_patterns (list) : defaults to standard file extensions
Returns:
(list, list, list): (found_fpaths, found_lines_list, found_lxs_list)
CommandLine:
python -m utool.util_path --test-grep
utprof.py -m utool.util_path --exec-grep
utprof.py utool/util_path.py --exec-grep
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> #dpath_list = [ut.truepath('~/code/utool/utool')]
>>> dpath_list = [ut.truepath(dirname(ut.__file__))]
>>> include_patterns = ['*.py']
>>> exclude_dirs = []
>>> regex_list = ['grepfile']
>>> verbose = True
>>> recursive = True
>>> result = ut.grep(regex_list, recursive, dpath_list, include_patterns,
>>> exclude_dirs)
>>> (found_fpath_list, found_lines_list, found_lxs_list) = result
>>> assert 'util_path.py' in list(map(basename, found_fpath_list))
"""
from utool import util_regex
# from utool import util_str
from utool import util_list
if include_patterns is None:
include_patterns = ['*']
# include_patterns = get_standard_include_patterns()
if greater_exclude_dirs is None:
greater_exclude_dirs = []
# greater_exclude_dirs = get_standard_exclude_dnames()
# ensure list input
if isinstance(include_patterns, six.string_types):
include_patterns = [include_patterns]
if dpath_list is None:
dpath_list = [os.getcwd()]
if verbose:
recursive_stat_str = ['flat', 'recursive'][recursive]
print('[util_path] Greping (%s) %r for %r' % (recursive_stat_str,
dpath_list, regex_list))
print('[util_path] regex_list = %s' % (regex_list))
if isinstance(regex_list, six.string_types):
regex_list = [regex_list]
found_fpath_list = []
found_lines_list = []
found_lxs_list = []
# Walk through each directory recursively
if fpath_list is None:
fpath_generator = matching_fpaths(
dpath_list=dpath_list, include_patterns=include_patterns,
exclude_dirs=exclude_dirs,
greater_exclude_dirs=greater_exclude_dirs,
exclude_patterns=exclude_patterns, recursive=recursive)
else:
fpath_generator = fpath_list
# from utool import util_regex
# extended_regex_list, reflags = util_regex.extend_regex3(regex_list, reflags)
# if verbose:
# print('extended_regex_list = %r' % (extended_regex_list,))
# print('reflags = %r' % (reflags,))
_exprs_flags = [util_regex.extend_regex2(expr, reflags)
for expr in regex_list]
extended_regex_list = util_list.take_column(_exprs_flags, 0)
reflags_list = util_list.take_column(_exprs_flags, 1)
# HACK
reflags = reflags_list[0]
# For each matching filepath
for fpath in fpath_generator:
# For each search pattern
found_lines, found_lxs = grepfile(fpath, extended_regex_list,
reflags_list, cache=cache)
if inverse:
if len(found_lines) == 0:
# Append files that the pattern was not found in
found_fpath_list.append(fpath)
found_lines_list.append([])
found_lxs_list.append([])
elif len(found_lines) > 0:
found_fpath_list.append(fpath) # regular matching
found_lines_list.append(found_lines)
found_lxs_list.append(found_lxs)
grep_result = (found_fpath_list, found_lines_list, found_lxs_list)
if verbose:
print('==========')
print('==========')
print('[util_path] found matches in %d files' %
len(found_fpath_list))
print(make_grep_resultstr(grep_result, extended_regex_list, reflags))
# print('[util_path] found matches in %d files' % len(found_fpath_list))
# pat = util_regex.regex_or(extended_regex_list)
# for fpath, found, lxs in zip(found_fpath_list, found_lines_list,
# found_lxs_list):
# if len(found) > 0:
# print('----------------------')
# print('Found %d line(s) in %r: ' % (len(found), fpath))
# name = split(fpath)[1]
# max_line = len(lxs)
# ndigits = str(len(str(max_line)))
# fmt_str = '%s : %' + ndigits + 'd |%s'
# for (lx, line) in zip(lxs, found):
# # hack
# colored_line = util_str.highlight_regex(
# line.rstrip('\n'), pat, reflags=reflags)
# print(fmt_str % (name, lx, colored_line))
#print('[util_path] found matches in %d files' % len(found_fpath_list))
return grep_result | python | def grep(regex_list, recursive=True, dpath_list=None, include_patterns=None,
exclude_dirs=[], greater_exclude_dirs=None, inverse=False,
exclude_patterns=[], verbose=VERBOSE, fpath_list=None, reflags=0,
cache=None):
r"""
greps for patterns
Python implementation of grep. NOT FINISHED
Args:
regex_list (str or list): one or more patterns to find
recursive (bool):
dpath_list (list): directories to search (defaults to cwd)
include_patterns (list) : defaults to standard file extensions
Returns:
(list, list, list): (found_fpaths, found_lines_list, found_lxs_list)
CommandLine:
python -m utool.util_path --test-grep
utprof.py -m utool.util_path --exec-grep
utprof.py utool/util_path.py --exec-grep
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> #dpath_list = [ut.truepath('~/code/utool/utool')]
>>> dpath_list = [ut.truepath(dirname(ut.__file__))]
>>> include_patterns = ['*.py']
>>> exclude_dirs = []
>>> regex_list = ['grepfile']
>>> verbose = True
>>> recursive = True
>>> result = ut.grep(regex_list, recursive, dpath_list, include_patterns,
>>> exclude_dirs)
>>> (found_fpath_list, found_lines_list, found_lxs_list) = result
>>> assert 'util_path.py' in list(map(basename, found_fpath_list))
"""
from utool import util_regex
# from utool import util_str
from utool import util_list
if include_patterns is None:
include_patterns = ['*']
# include_patterns = get_standard_include_patterns()
if greater_exclude_dirs is None:
greater_exclude_dirs = []
# greater_exclude_dirs = get_standard_exclude_dnames()
# ensure list input
if isinstance(include_patterns, six.string_types):
include_patterns = [include_patterns]
if dpath_list is None:
dpath_list = [os.getcwd()]
if verbose:
recursive_stat_str = ['flat', 'recursive'][recursive]
print('[util_path] Greping (%s) %r for %r' % (recursive_stat_str,
dpath_list, regex_list))
print('[util_path] regex_list = %s' % (regex_list))
if isinstance(regex_list, six.string_types):
regex_list = [regex_list]
found_fpath_list = []
found_lines_list = []
found_lxs_list = []
# Walk through each directory recursively
if fpath_list is None:
fpath_generator = matching_fpaths(
dpath_list=dpath_list, include_patterns=include_patterns,
exclude_dirs=exclude_dirs,
greater_exclude_dirs=greater_exclude_dirs,
exclude_patterns=exclude_patterns, recursive=recursive)
else:
fpath_generator = fpath_list
# from utool import util_regex
# extended_regex_list, reflags = util_regex.extend_regex3(regex_list, reflags)
# if verbose:
# print('extended_regex_list = %r' % (extended_regex_list,))
# print('reflags = %r' % (reflags,))
_exprs_flags = [util_regex.extend_regex2(expr, reflags)
for expr in regex_list]
extended_regex_list = util_list.take_column(_exprs_flags, 0)
reflags_list = util_list.take_column(_exprs_flags, 1)
# HACK
reflags = reflags_list[0]
# For each matching filepath
for fpath in fpath_generator:
# For each search pattern
found_lines, found_lxs = grepfile(fpath, extended_regex_list,
reflags_list, cache=cache)
if inverse:
if len(found_lines) == 0:
# Append files that the pattern was not found in
found_fpath_list.append(fpath)
found_lines_list.append([])
found_lxs_list.append([])
elif len(found_lines) > 0:
found_fpath_list.append(fpath) # regular matching
found_lines_list.append(found_lines)
found_lxs_list.append(found_lxs)
grep_result = (found_fpath_list, found_lines_list, found_lxs_list)
if verbose:
print('==========')
print('==========')
print('[util_path] found matches in %d files' %
len(found_fpath_list))
print(make_grep_resultstr(grep_result, extended_regex_list, reflags))
# print('[util_path] found matches in %d files' % len(found_fpath_list))
# pat = util_regex.regex_or(extended_regex_list)
# for fpath, found, lxs in zip(found_fpath_list, found_lines_list,
# found_lxs_list):
# if len(found) > 0:
# print('----------------------')
# print('Found %d line(s) in %r: ' % (len(found), fpath))
# name = split(fpath)[1]
# max_line = len(lxs)
# ndigits = str(len(str(max_line)))
# fmt_str = '%s : %' + ndigits + 'd |%s'
# for (lx, line) in zip(lxs, found):
# # hack
# colored_line = util_str.highlight_regex(
# line.rstrip('\n'), pat, reflags=reflags)
# print(fmt_str % (name, lx, colored_line))
#print('[util_path] found matches in %d files' % len(found_fpath_list))
return grep_result | [
"def",
"grep",
"(",
"regex_list",
",",
"recursive",
"=",
"True",
",",
"dpath_list",
"=",
"None",
",",
"include_patterns",
"=",
"None",
",",
"exclude_dirs",
"=",
"[",
"]",
",",
"greater_exclude_dirs",
"=",
"None",
",",
"inverse",
"=",
"False",
",",
"exclude_patterns",
"=",
"[",
"]",
",",
"verbose",
"=",
"VERBOSE",
",",
"fpath_list",
"=",
"None",
",",
"reflags",
"=",
"0",
",",
"cache",
"=",
"None",
")",
":",
"from",
"utool",
"import",
"util_regex",
"# from utool import util_str",
"from",
"utool",
"import",
"util_list",
"if",
"include_patterns",
"is",
"None",
":",
"include_patterns",
"=",
"[",
"'*'",
"]",
"# include_patterns = get_standard_include_patterns()",
"if",
"greater_exclude_dirs",
"is",
"None",
":",
"greater_exclude_dirs",
"=",
"[",
"]",
"# greater_exclude_dirs = get_standard_exclude_dnames()",
"# ensure list input",
"if",
"isinstance",
"(",
"include_patterns",
",",
"six",
".",
"string_types",
")",
":",
"include_patterns",
"=",
"[",
"include_patterns",
"]",
"if",
"dpath_list",
"is",
"None",
":",
"dpath_list",
"=",
"[",
"os",
".",
"getcwd",
"(",
")",
"]",
"if",
"verbose",
":",
"recursive_stat_str",
"=",
"[",
"'flat'",
",",
"'recursive'",
"]",
"[",
"recursive",
"]",
"print",
"(",
"'[util_path] Greping (%s) %r for %r'",
"%",
"(",
"recursive_stat_str",
",",
"dpath_list",
",",
"regex_list",
")",
")",
"print",
"(",
"'[util_path] regex_list = %s'",
"%",
"(",
"regex_list",
")",
")",
"if",
"isinstance",
"(",
"regex_list",
",",
"six",
".",
"string_types",
")",
":",
"regex_list",
"=",
"[",
"regex_list",
"]",
"found_fpath_list",
"=",
"[",
"]",
"found_lines_list",
"=",
"[",
"]",
"found_lxs_list",
"=",
"[",
"]",
"# Walk through each directory recursively",
"if",
"fpath_list",
"is",
"None",
":",
"fpath_generator",
"=",
"matching_fpaths",
"(",
"dpath_list",
"=",
"dpath_list",
",",
"include_patterns",
"=",
"include_patterns",
",",
"exclude_dirs",
"=",
"exclude_dirs",
",",
"greater_exclude_dirs",
"=",
"greater_exclude_dirs",
",",
"exclude_patterns",
"=",
"exclude_patterns",
",",
"recursive",
"=",
"recursive",
")",
"else",
":",
"fpath_generator",
"=",
"fpath_list",
"# from utool import util_regex",
"# extended_regex_list, reflags = util_regex.extend_regex3(regex_list, reflags)",
"# if verbose:",
"# print('extended_regex_list = %r' % (extended_regex_list,))",
"# print('reflags = %r' % (reflags,))",
"_exprs_flags",
"=",
"[",
"util_regex",
".",
"extend_regex2",
"(",
"expr",
",",
"reflags",
")",
"for",
"expr",
"in",
"regex_list",
"]",
"extended_regex_list",
"=",
"util_list",
".",
"take_column",
"(",
"_exprs_flags",
",",
"0",
")",
"reflags_list",
"=",
"util_list",
".",
"take_column",
"(",
"_exprs_flags",
",",
"1",
")",
"# HACK",
"reflags",
"=",
"reflags_list",
"[",
"0",
"]",
"# For each matching filepath",
"for",
"fpath",
"in",
"fpath_generator",
":",
"# For each search pattern",
"found_lines",
",",
"found_lxs",
"=",
"grepfile",
"(",
"fpath",
",",
"extended_regex_list",
",",
"reflags_list",
",",
"cache",
"=",
"cache",
")",
"if",
"inverse",
":",
"if",
"len",
"(",
"found_lines",
")",
"==",
"0",
":",
"# Append files that the pattern was not found in",
"found_fpath_list",
".",
"append",
"(",
"fpath",
")",
"found_lines_list",
".",
"append",
"(",
"[",
"]",
")",
"found_lxs_list",
".",
"append",
"(",
"[",
"]",
")",
"elif",
"len",
"(",
"found_lines",
")",
">",
"0",
":",
"found_fpath_list",
".",
"append",
"(",
"fpath",
")",
"# regular matching",
"found_lines_list",
".",
"append",
"(",
"found_lines",
")",
"found_lxs_list",
".",
"append",
"(",
"found_lxs",
")",
"grep_result",
"=",
"(",
"found_fpath_list",
",",
"found_lines_list",
",",
"found_lxs_list",
")",
"if",
"verbose",
":",
"print",
"(",
"'=========='",
")",
"print",
"(",
"'=========='",
")",
"print",
"(",
"'[util_path] found matches in %d files'",
"%",
"len",
"(",
"found_fpath_list",
")",
")",
"print",
"(",
"make_grep_resultstr",
"(",
"grep_result",
",",
"extended_regex_list",
",",
"reflags",
")",
")",
"# print('[util_path] found matches in %d files' % len(found_fpath_list))",
"# pat = util_regex.regex_or(extended_regex_list)",
"# for fpath, found, lxs in zip(found_fpath_list, found_lines_list,",
"# found_lxs_list):",
"# if len(found) > 0:",
"# print('----------------------')",
"# print('Found %d line(s) in %r: ' % (len(found), fpath))",
"# name = split(fpath)[1]",
"# max_line = len(lxs)",
"# ndigits = str(len(str(max_line)))",
"# fmt_str = '%s : %' + ndigits + 'd |%s'",
"# for (lx, line) in zip(lxs, found):",
"# # hack",
"# colored_line = util_str.highlight_regex(",
"# line.rstrip('\\n'), pat, reflags=reflags)",
"# print(fmt_str % (name, lx, colored_line))",
"#print('[util_path] found matches in %d files' % len(found_fpath_list))",
"return",
"grep_result"
] | r"""
greps for patterns
Python implementation of grep. NOT FINISHED
Args:
regex_list (str or list): one or more patterns to find
recursive (bool):
dpath_list (list): directories to search (defaults to cwd)
include_patterns (list) : defaults to standard file extensions
Returns:
(list, list, list): (found_fpaths, found_lines_list, found_lxs_list)
CommandLine:
python -m utool.util_path --test-grep
utprof.py -m utool.util_path --exec-grep
utprof.py utool/util_path.py --exec-grep
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> #dpath_list = [ut.truepath('~/code/utool/utool')]
>>> dpath_list = [ut.truepath(dirname(ut.__file__))]
>>> include_patterns = ['*.py']
>>> exclude_dirs = []
>>> regex_list = ['grepfile']
>>> verbose = True
>>> recursive = True
>>> result = ut.grep(regex_list, recursive, dpath_list, include_patterns,
>>> exclude_dirs)
>>> (found_fpath_list, found_lines_list, found_lxs_list) = result
>>> assert 'util_path.py' in list(map(basename, found_fpath_list)) | [
"r",
"greps",
"for",
"patterns",
"Python",
"implementation",
"of",
"grep",
".",
"NOT",
"FINISHED"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L1974-L2101 | train |
Erotemic/utool | utool/util_path.py | get_win32_short_path_name | def get_win32_short_path_name(long_name):
"""
Gets the short path name of a given long path.
References:
http://stackoverflow.com/a/23598461/200291
http://stackoverflow.com/questions/23598289/get-win-short-fname-python
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut # NOQA
>>> # build test data
>>> #long_name = unicode(normpath(ut.get_resource_dir()))
>>> long_name = unicode(r'C:/Program Files (x86)')
>>> #long_name = unicode(r'C:/Python27')
#unicode(normpath(ut.get_resource_dir()))
>>> # execute function
>>> result = get_win32_short_path_name(long_name)
>>> # verify results
>>> print(result)
C:/PROGRA~2
"""
import ctypes
from ctypes import wintypes
_GetShortPathNameW = ctypes.windll.kernel32.GetShortPathNameW
_GetShortPathNameW.argtypes = [wintypes.LPCWSTR, wintypes.LPWSTR, wintypes.DWORD]
_GetShortPathNameW.restype = wintypes.DWORD
output_buf_size = 0
while True:
output_buf = ctypes.create_unicode_buffer(output_buf_size)
needed = _GetShortPathNameW(long_name, output_buf, output_buf_size)
if output_buf_size >= needed:
short_name = output_buf.value
break
else:
output_buf_size = needed
return short_name | python | def get_win32_short_path_name(long_name):
"""
Gets the short path name of a given long path.
References:
http://stackoverflow.com/a/23598461/200291
http://stackoverflow.com/questions/23598289/get-win-short-fname-python
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut # NOQA
>>> # build test data
>>> #long_name = unicode(normpath(ut.get_resource_dir()))
>>> long_name = unicode(r'C:/Program Files (x86)')
>>> #long_name = unicode(r'C:/Python27')
#unicode(normpath(ut.get_resource_dir()))
>>> # execute function
>>> result = get_win32_short_path_name(long_name)
>>> # verify results
>>> print(result)
C:/PROGRA~2
"""
import ctypes
from ctypes import wintypes
_GetShortPathNameW = ctypes.windll.kernel32.GetShortPathNameW
_GetShortPathNameW.argtypes = [wintypes.LPCWSTR, wintypes.LPWSTR, wintypes.DWORD]
_GetShortPathNameW.restype = wintypes.DWORD
output_buf_size = 0
while True:
output_buf = ctypes.create_unicode_buffer(output_buf_size)
needed = _GetShortPathNameW(long_name, output_buf, output_buf_size)
if output_buf_size >= needed:
short_name = output_buf.value
break
else:
output_buf_size = needed
return short_name | [
"def",
"get_win32_short_path_name",
"(",
"long_name",
")",
":",
"import",
"ctypes",
"from",
"ctypes",
"import",
"wintypes",
"_GetShortPathNameW",
"=",
"ctypes",
".",
"windll",
".",
"kernel32",
".",
"GetShortPathNameW",
"_GetShortPathNameW",
".",
"argtypes",
"=",
"[",
"wintypes",
".",
"LPCWSTR",
",",
"wintypes",
".",
"LPWSTR",
",",
"wintypes",
".",
"DWORD",
"]",
"_GetShortPathNameW",
".",
"restype",
"=",
"wintypes",
".",
"DWORD",
"output_buf_size",
"=",
"0",
"while",
"True",
":",
"output_buf",
"=",
"ctypes",
".",
"create_unicode_buffer",
"(",
"output_buf_size",
")",
"needed",
"=",
"_GetShortPathNameW",
"(",
"long_name",
",",
"output_buf",
",",
"output_buf_size",
")",
"if",
"output_buf_size",
">=",
"needed",
":",
"short_name",
"=",
"output_buf",
".",
"value",
"break",
"else",
":",
"output_buf_size",
"=",
"needed",
"return",
"short_name"
] | Gets the short path name of a given long path.
References:
http://stackoverflow.com/a/23598461/200291
http://stackoverflow.com/questions/23598289/get-win-short-fname-python
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut # NOQA
>>> # build test data
>>> #long_name = unicode(normpath(ut.get_resource_dir()))
>>> long_name = unicode(r'C:/Program Files (x86)')
>>> #long_name = unicode(r'C:/Python27')
#unicode(normpath(ut.get_resource_dir()))
>>> # execute function
>>> result = get_win32_short_path_name(long_name)
>>> # verify results
>>> print(result)
C:/PROGRA~2 | [
"Gets",
"the",
"short",
"path",
"name",
"of",
"a",
"given",
"long",
"path",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L2129-L2166 | train |
Erotemic/utool | utool/util_path.py | platform_path | def platform_path(path):
r"""
Returns platform specific path for pyinstaller usage
Args:
path (str):
Returns:
str: path2
CommandLine:
python -m utool.util_path --test-platform_path
Example:
>>> # ENABLE_DOCTEST
>>> # FIXME: find examples of the wird paths this fixes (mostly on win32 i think)
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> path = 'some/odd/../weird/path'
>>> path2 = platform_path(path)
>>> result = str(path2)
>>> if ut.WIN32:
... ut.assert_eq(path2, r'some\weird\path')
... else:
... ut.assert_eq(path2, r'some/weird/path')
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut # NOQA
>>> if ut.WIN32:
... path = 'C:/PROGRA~2'
... path2 = platform_path(path)
... assert path2 == u'..\\..\\..\\..\\Program Files (x86)'
"""
try:
if path == '':
raise ValueError('path cannot be the empty string')
# get path relative to cwd
path1 = truepath_relative(path)
if sys.platform.startswith('win32'):
path2 = expand_win32_shortname(path1)
else:
path2 = path1
except Exception as ex:
util_dbg.printex(ex, keys=['path', 'path1', 'path2'])
raise
return path2 | python | def platform_path(path):
r"""
Returns platform specific path for pyinstaller usage
Args:
path (str):
Returns:
str: path2
CommandLine:
python -m utool.util_path --test-platform_path
Example:
>>> # ENABLE_DOCTEST
>>> # FIXME: find examples of the wird paths this fixes (mostly on win32 i think)
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> path = 'some/odd/../weird/path'
>>> path2 = platform_path(path)
>>> result = str(path2)
>>> if ut.WIN32:
... ut.assert_eq(path2, r'some\weird\path')
... else:
... ut.assert_eq(path2, r'some/weird/path')
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut # NOQA
>>> if ut.WIN32:
... path = 'C:/PROGRA~2'
... path2 = platform_path(path)
... assert path2 == u'..\\..\\..\\..\\Program Files (x86)'
"""
try:
if path == '':
raise ValueError('path cannot be the empty string')
# get path relative to cwd
path1 = truepath_relative(path)
if sys.platform.startswith('win32'):
path2 = expand_win32_shortname(path1)
else:
path2 = path1
except Exception as ex:
util_dbg.printex(ex, keys=['path', 'path1', 'path2'])
raise
return path2 | [
"def",
"platform_path",
"(",
"path",
")",
":",
"try",
":",
"if",
"path",
"==",
"''",
":",
"raise",
"ValueError",
"(",
"'path cannot be the empty string'",
")",
"# get path relative to cwd",
"path1",
"=",
"truepath_relative",
"(",
"path",
")",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'win32'",
")",
":",
"path2",
"=",
"expand_win32_shortname",
"(",
"path1",
")",
"else",
":",
"path2",
"=",
"path1",
"except",
"Exception",
"as",
"ex",
":",
"util_dbg",
".",
"printex",
"(",
"ex",
",",
"keys",
"=",
"[",
"'path'",
",",
"'path1'",
",",
"'path2'",
"]",
")",
"raise",
"return",
"path2"
] | r"""
Returns platform specific path for pyinstaller usage
Args:
path (str):
Returns:
str: path2
CommandLine:
python -m utool.util_path --test-platform_path
Example:
>>> # ENABLE_DOCTEST
>>> # FIXME: find examples of the wird paths this fixes (mostly on win32 i think)
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> path = 'some/odd/../weird/path'
>>> path2 = platform_path(path)
>>> result = str(path2)
>>> if ut.WIN32:
... ut.assert_eq(path2, r'some\weird\path')
... else:
... ut.assert_eq(path2, r'some/weird/path')
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut # NOQA
>>> if ut.WIN32:
... path = 'C:/PROGRA~2'
... path2 = platform_path(path)
... assert path2 == u'..\\..\\..\\..\\Program Files (x86)' | [
"r",
"Returns",
"platform",
"specific",
"path",
"for",
"pyinstaller",
"usage"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L2191-L2238 | train |
Erotemic/utool | utool/util_path.py | find_lib_fpath | def find_lib_fpath(libname, root_dir, recurse_down=True, verbose=False, debug=False):
""" Search for the library """
def get_lib_fname_list(libname):
"""
input <libname>: library name (e.g. 'hesaff', not 'libhesaff')
returns <libnames>: list of plausible library file names
"""
if sys.platform.startswith('win32'):
libnames = ['lib' + libname + '.dll', libname + '.dll']
elif sys.platform.startswith('darwin'):
libnames = ['lib' + libname + '.dylib']
elif sys.platform.startswith('linux'):
libnames = ['lib' + libname + '.so']
else:
raise Exception('Unknown operating system: %s' % sys.platform)
return libnames
def get_lib_dpath_list(root_dir):
"""
input <root_dir>: deepest directory to look for a library (dll, so, dylib)
returns <libnames>: list of plausible directories to look.
"""
'returns possible lib locations'
get_lib_dpath_list = [root_dir,
join(root_dir, 'lib'),
join(root_dir, 'build'),
join(root_dir, 'build', 'lib')]
return get_lib_dpath_list
lib_fname_list = get_lib_fname_list(libname)
tried_fpaths = []
while root_dir is not None:
for lib_fname in lib_fname_list:
for lib_dpath in get_lib_dpath_list(root_dir):
lib_fpath = normpath(join(lib_dpath, lib_fname))
if exists(lib_fpath):
if verbose:
print('\n[c] Checked: '.join(tried_fpaths))
if debug:
print('using: %r' % lib_fpath)
return lib_fpath
else:
# Remember which candiate library fpaths did not exist
tried_fpaths.append(lib_fpath)
_new_root = dirname(root_dir)
if _new_root == root_dir:
root_dir = None
break
else:
root_dir = _new_root
if not recurse_down:
break
msg = ('\n[C!] load_clib(libname=%r root_dir=%r, recurse_down=%r, verbose=%r)' %
(libname, root_dir, recurse_down, verbose) +
'\n[c!] Cannot FIND dynamic library')
print(msg)
print('\n[c!] Checked: '.join(tried_fpaths))
raise ImportError(msg) | python | def find_lib_fpath(libname, root_dir, recurse_down=True, verbose=False, debug=False):
""" Search for the library """
def get_lib_fname_list(libname):
"""
input <libname>: library name (e.g. 'hesaff', not 'libhesaff')
returns <libnames>: list of plausible library file names
"""
if sys.platform.startswith('win32'):
libnames = ['lib' + libname + '.dll', libname + '.dll']
elif sys.platform.startswith('darwin'):
libnames = ['lib' + libname + '.dylib']
elif sys.platform.startswith('linux'):
libnames = ['lib' + libname + '.so']
else:
raise Exception('Unknown operating system: %s' % sys.platform)
return libnames
def get_lib_dpath_list(root_dir):
"""
input <root_dir>: deepest directory to look for a library (dll, so, dylib)
returns <libnames>: list of plausible directories to look.
"""
'returns possible lib locations'
get_lib_dpath_list = [root_dir,
join(root_dir, 'lib'),
join(root_dir, 'build'),
join(root_dir, 'build', 'lib')]
return get_lib_dpath_list
lib_fname_list = get_lib_fname_list(libname)
tried_fpaths = []
while root_dir is not None:
for lib_fname in lib_fname_list:
for lib_dpath in get_lib_dpath_list(root_dir):
lib_fpath = normpath(join(lib_dpath, lib_fname))
if exists(lib_fpath):
if verbose:
print('\n[c] Checked: '.join(tried_fpaths))
if debug:
print('using: %r' % lib_fpath)
return lib_fpath
else:
# Remember which candiate library fpaths did not exist
tried_fpaths.append(lib_fpath)
_new_root = dirname(root_dir)
if _new_root == root_dir:
root_dir = None
break
else:
root_dir = _new_root
if not recurse_down:
break
msg = ('\n[C!] load_clib(libname=%r root_dir=%r, recurse_down=%r, verbose=%r)' %
(libname, root_dir, recurse_down, verbose) +
'\n[c!] Cannot FIND dynamic library')
print(msg)
print('\n[c!] Checked: '.join(tried_fpaths))
raise ImportError(msg) | [
"def",
"find_lib_fpath",
"(",
"libname",
",",
"root_dir",
",",
"recurse_down",
"=",
"True",
",",
"verbose",
"=",
"False",
",",
"debug",
"=",
"False",
")",
":",
"def",
"get_lib_fname_list",
"(",
"libname",
")",
":",
"\"\"\"\n input <libname>: library name (e.g. 'hesaff', not 'libhesaff')\n returns <libnames>: list of plausible library file names\n \"\"\"",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'win32'",
")",
":",
"libnames",
"=",
"[",
"'lib'",
"+",
"libname",
"+",
"'.dll'",
",",
"libname",
"+",
"'.dll'",
"]",
"elif",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'darwin'",
")",
":",
"libnames",
"=",
"[",
"'lib'",
"+",
"libname",
"+",
"'.dylib'",
"]",
"elif",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'linux'",
")",
":",
"libnames",
"=",
"[",
"'lib'",
"+",
"libname",
"+",
"'.so'",
"]",
"else",
":",
"raise",
"Exception",
"(",
"'Unknown operating system: %s'",
"%",
"sys",
".",
"platform",
")",
"return",
"libnames",
"def",
"get_lib_dpath_list",
"(",
"root_dir",
")",
":",
"\"\"\"\n input <root_dir>: deepest directory to look for a library (dll, so, dylib)\n returns <libnames>: list of plausible directories to look.\n \"\"\"",
"'returns possible lib locations'",
"get_lib_dpath_list",
"=",
"[",
"root_dir",
",",
"join",
"(",
"root_dir",
",",
"'lib'",
")",
",",
"join",
"(",
"root_dir",
",",
"'build'",
")",
",",
"join",
"(",
"root_dir",
",",
"'build'",
",",
"'lib'",
")",
"]",
"return",
"get_lib_dpath_list",
"lib_fname_list",
"=",
"get_lib_fname_list",
"(",
"libname",
")",
"tried_fpaths",
"=",
"[",
"]",
"while",
"root_dir",
"is",
"not",
"None",
":",
"for",
"lib_fname",
"in",
"lib_fname_list",
":",
"for",
"lib_dpath",
"in",
"get_lib_dpath_list",
"(",
"root_dir",
")",
":",
"lib_fpath",
"=",
"normpath",
"(",
"join",
"(",
"lib_dpath",
",",
"lib_fname",
")",
")",
"if",
"exists",
"(",
"lib_fpath",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"'\\n[c] Checked: '",
".",
"join",
"(",
"tried_fpaths",
")",
")",
"if",
"debug",
":",
"print",
"(",
"'using: %r'",
"%",
"lib_fpath",
")",
"return",
"lib_fpath",
"else",
":",
"# Remember which candiate library fpaths did not exist",
"tried_fpaths",
".",
"append",
"(",
"lib_fpath",
")",
"_new_root",
"=",
"dirname",
"(",
"root_dir",
")",
"if",
"_new_root",
"==",
"root_dir",
":",
"root_dir",
"=",
"None",
"break",
"else",
":",
"root_dir",
"=",
"_new_root",
"if",
"not",
"recurse_down",
":",
"break",
"msg",
"=",
"(",
"'\\n[C!] load_clib(libname=%r root_dir=%r, recurse_down=%r, verbose=%r)'",
"%",
"(",
"libname",
",",
"root_dir",
",",
"recurse_down",
",",
"verbose",
")",
"+",
"'\\n[c!] Cannot FIND dynamic library'",
")",
"print",
"(",
"msg",
")",
"print",
"(",
"'\\n[c!] Checked: '",
".",
"join",
"(",
"tried_fpaths",
")",
")",
"raise",
"ImportError",
"(",
"msg",
")"
] | Search for the library | [
"Search",
"for",
"the",
"library"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L2320-L2379 | train |
Erotemic/utool | utool/util_path.py | ensure_mingw_drive | def ensure_mingw_drive(win32_path):
r""" replaces windows drives with mingw style drives
Args:
win32_path (str):
CommandLine:
python -m utool.util_path --test-ensure_mingw_drive
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> win32_path = r'C:/Program Files/Foobar'
>>> result = ensure_mingw_drive(win32_path)
>>> print(result)
/c/Program Files/Foobar
"""
win32_drive, _path = splitdrive(win32_path)
mingw_drive = '/' + win32_drive[:-1].lower()
mingw_path = mingw_drive + _path
return mingw_path | python | def ensure_mingw_drive(win32_path):
r""" replaces windows drives with mingw style drives
Args:
win32_path (str):
CommandLine:
python -m utool.util_path --test-ensure_mingw_drive
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> win32_path = r'C:/Program Files/Foobar'
>>> result = ensure_mingw_drive(win32_path)
>>> print(result)
/c/Program Files/Foobar
"""
win32_drive, _path = splitdrive(win32_path)
mingw_drive = '/' + win32_drive[:-1].lower()
mingw_path = mingw_drive + _path
return mingw_path | [
"def",
"ensure_mingw_drive",
"(",
"win32_path",
")",
":",
"win32_drive",
",",
"_path",
"=",
"splitdrive",
"(",
"win32_path",
")",
"mingw_drive",
"=",
"'/'",
"+",
"win32_drive",
"[",
":",
"-",
"1",
"]",
".",
"lower",
"(",
")",
"mingw_path",
"=",
"mingw_drive",
"+",
"_path",
"return",
"mingw_path"
] | r""" replaces windows drives with mingw style drives
Args:
win32_path (str):
CommandLine:
python -m utool.util_path --test-ensure_mingw_drive
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> win32_path = r'C:/Program Files/Foobar'
>>> result = ensure_mingw_drive(win32_path)
>>> print(result)
/c/Program Files/Foobar | [
"r",
"replaces",
"windows",
"drives",
"with",
"mingw",
"style",
"drives"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L2382-L2402 | train |
Erotemic/utool | utool/util_path.py | ancestor_paths | def ancestor_paths(start=None, limit={}):
"""
All paths above you
"""
import utool as ut
limit = ut.ensure_iterable(limit)
limit = {expanduser(p) for p in limit}.union(set(limit))
if start is None:
start = os.getcwd()
path = start
prev = None
while path != prev and prev not in limit:
yield path
prev = path
path = dirname(path) | python | def ancestor_paths(start=None, limit={}):
"""
All paths above you
"""
import utool as ut
limit = ut.ensure_iterable(limit)
limit = {expanduser(p) for p in limit}.union(set(limit))
if start is None:
start = os.getcwd()
path = start
prev = None
while path != prev and prev not in limit:
yield path
prev = path
path = dirname(path) | [
"def",
"ancestor_paths",
"(",
"start",
"=",
"None",
",",
"limit",
"=",
"{",
"}",
")",
":",
"import",
"utool",
"as",
"ut",
"limit",
"=",
"ut",
".",
"ensure_iterable",
"(",
"limit",
")",
"limit",
"=",
"{",
"expanduser",
"(",
"p",
")",
"for",
"p",
"in",
"limit",
"}",
".",
"union",
"(",
"set",
"(",
"limit",
")",
")",
"if",
"start",
"is",
"None",
":",
"start",
"=",
"os",
".",
"getcwd",
"(",
")",
"path",
"=",
"start",
"prev",
"=",
"None",
"while",
"path",
"!=",
"prev",
"and",
"prev",
"not",
"in",
"limit",
":",
"yield",
"path",
"prev",
"=",
"path",
"path",
"=",
"dirname",
"(",
"path",
")"
] | All paths above you | [
"All",
"paths",
"above",
"you"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L2436-L2450 | train |
Erotemic/utool | utool/util_path.py | search_candidate_paths | def search_candidate_paths(candidate_path_list, candidate_name_list=None,
priority_paths=None, required_subpaths=[],
verbose=None):
"""
searches for existing paths that meed a requirement
Args:
candidate_path_list (list): list of paths to check. If
candidate_name_list is specified this is the dpath list instead
candidate_name_list (list): specifies several names to check
(default = None)
priority_paths (None): specifies paths to check first.
Ignore candidate_name_list (default = None)
required_subpaths (list): specified required directory structure
(default = [])
verbose (bool): verbosity flag(default = True)
Returns:
str: return_path
CommandLine:
python -m utool.util_path --test-search_candidate_paths
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> candidate_path_list = [ut.truepath('~/RPI/code/utool'),
>>> ut.truepath('~/code/utool')]
>>> candidate_name_list = None
>>> required_subpaths = []
>>> verbose = True
>>> priority_paths = None
>>> return_path = search_candidate_paths(candidate_path_list,
>>> candidate_name_list,
>>> priority_paths, required_subpaths,
>>> verbose)
>>> result = ('return_path = %s' % (str(return_path),))
>>> print(result)
"""
import utool as ut
if verbose is None:
verbose = 0 if QUIET else 1
if verbose >= 1:
print('[search_candidate_paths] Searching for candidate paths')
if candidate_name_list is not None:
candidate_path_list_ = [join(dpath, fname) for dpath, fname in
itertools.product(candidate_path_list,
candidate_name_list)]
else:
candidate_path_list_ = candidate_path_list
if priority_paths is not None:
candidate_path_list_ = priority_paths + candidate_path_list_
return_path = None
for path in candidate_path_list_:
if path is not None and exists(path):
if verbose >= 2:
print('[search_candidate_paths] Found candidate directory %r' % (path,))
print('[search_candidate_paths] ... checking for approprate structure')
# tomcat directory exists. Make sure it also contains a webapps dir
subpath_list = [join(path, subpath) for subpath in required_subpaths]
if all(ut.checkpath(path_, verbose=verbose) for path_ in subpath_list):
return_path = path
if verbose >= 2:
print('[search_candidate_paths] Found acceptable path')
return return_path
break
if verbose >= 1:
print('[search_candidate_paths] Failed to find acceptable path')
return return_path | python | def search_candidate_paths(candidate_path_list, candidate_name_list=None,
priority_paths=None, required_subpaths=[],
verbose=None):
"""
searches for existing paths that meed a requirement
Args:
candidate_path_list (list): list of paths to check. If
candidate_name_list is specified this is the dpath list instead
candidate_name_list (list): specifies several names to check
(default = None)
priority_paths (None): specifies paths to check first.
Ignore candidate_name_list (default = None)
required_subpaths (list): specified required directory structure
(default = [])
verbose (bool): verbosity flag(default = True)
Returns:
str: return_path
CommandLine:
python -m utool.util_path --test-search_candidate_paths
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> candidate_path_list = [ut.truepath('~/RPI/code/utool'),
>>> ut.truepath('~/code/utool')]
>>> candidate_name_list = None
>>> required_subpaths = []
>>> verbose = True
>>> priority_paths = None
>>> return_path = search_candidate_paths(candidate_path_list,
>>> candidate_name_list,
>>> priority_paths, required_subpaths,
>>> verbose)
>>> result = ('return_path = %s' % (str(return_path),))
>>> print(result)
"""
import utool as ut
if verbose is None:
verbose = 0 if QUIET else 1
if verbose >= 1:
print('[search_candidate_paths] Searching for candidate paths')
if candidate_name_list is not None:
candidate_path_list_ = [join(dpath, fname) for dpath, fname in
itertools.product(candidate_path_list,
candidate_name_list)]
else:
candidate_path_list_ = candidate_path_list
if priority_paths is not None:
candidate_path_list_ = priority_paths + candidate_path_list_
return_path = None
for path in candidate_path_list_:
if path is not None and exists(path):
if verbose >= 2:
print('[search_candidate_paths] Found candidate directory %r' % (path,))
print('[search_candidate_paths] ... checking for approprate structure')
# tomcat directory exists. Make sure it also contains a webapps dir
subpath_list = [join(path, subpath) for subpath in required_subpaths]
if all(ut.checkpath(path_, verbose=verbose) for path_ in subpath_list):
return_path = path
if verbose >= 2:
print('[search_candidate_paths] Found acceptable path')
return return_path
break
if verbose >= 1:
print('[search_candidate_paths] Failed to find acceptable path')
return return_path | [
"def",
"search_candidate_paths",
"(",
"candidate_path_list",
",",
"candidate_name_list",
"=",
"None",
",",
"priority_paths",
"=",
"None",
",",
"required_subpaths",
"=",
"[",
"]",
",",
"verbose",
"=",
"None",
")",
":",
"import",
"utool",
"as",
"ut",
"if",
"verbose",
"is",
"None",
":",
"verbose",
"=",
"0",
"if",
"QUIET",
"else",
"1",
"if",
"verbose",
">=",
"1",
":",
"print",
"(",
"'[search_candidate_paths] Searching for candidate paths'",
")",
"if",
"candidate_name_list",
"is",
"not",
"None",
":",
"candidate_path_list_",
"=",
"[",
"join",
"(",
"dpath",
",",
"fname",
")",
"for",
"dpath",
",",
"fname",
"in",
"itertools",
".",
"product",
"(",
"candidate_path_list",
",",
"candidate_name_list",
")",
"]",
"else",
":",
"candidate_path_list_",
"=",
"candidate_path_list",
"if",
"priority_paths",
"is",
"not",
"None",
":",
"candidate_path_list_",
"=",
"priority_paths",
"+",
"candidate_path_list_",
"return_path",
"=",
"None",
"for",
"path",
"in",
"candidate_path_list_",
":",
"if",
"path",
"is",
"not",
"None",
"and",
"exists",
"(",
"path",
")",
":",
"if",
"verbose",
">=",
"2",
":",
"print",
"(",
"'[search_candidate_paths] Found candidate directory %r'",
"%",
"(",
"path",
",",
")",
")",
"print",
"(",
"'[search_candidate_paths] ... checking for approprate structure'",
")",
"# tomcat directory exists. Make sure it also contains a webapps dir",
"subpath_list",
"=",
"[",
"join",
"(",
"path",
",",
"subpath",
")",
"for",
"subpath",
"in",
"required_subpaths",
"]",
"if",
"all",
"(",
"ut",
".",
"checkpath",
"(",
"path_",
",",
"verbose",
"=",
"verbose",
")",
"for",
"path_",
"in",
"subpath_list",
")",
":",
"return_path",
"=",
"path",
"if",
"verbose",
">=",
"2",
":",
"print",
"(",
"'[search_candidate_paths] Found acceptable path'",
")",
"return",
"return_path",
"break",
"if",
"verbose",
">=",
"1",
":",
"print",
"(",
"'[search_candidate_paths] Failed to find acceptable path'",
")",
"return",
"return_path"
] | searches for existing paths that meed a requirement
Args:
candidate_path_list (list): list of paths to check. If
candidate_name_list is specified this is the dpath list instead
candidate_name_list (list): specifies several names to check
(default = None)
priority_paths (None): specifies paths to check first.
Ignore candidate_name_list (default = None)
required_subpaths (list): specified required directory structure
(default = [])
verbose (bool): verbosity flag(default = True)
Returns:
str: return_path
CommandLine:
python -m utool.util_path --test-search_candidate_paths
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> candidate_path_list = [ut.truepath('~/RPI/code/utool'),
>>> ut.truepath('~/code/utool')]
>>> candidate_name_list = None
>>> required_subpaths = []
>>> verbose = True
>>> priority_paths = None
>>> return_path = search_candidate_paths(candidate_path_list,
>>> candidate_name_list,
>>> priority_paths, required_subpaths,
>>> verbose)
>>> result = ('return_path = %s' % (str(return_path),))
>>> print(result) | [
"searches",
"for",
"existing",
"paths",
"that",
"meed",
"a",
"requirement"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L2453-L2525 | train |
Erotemic/utool | utool/util_path.py | symlink | def symlink(real_path, link_path, overwrite=False, on_error='raise',
verbose=2):
"""
Attempt to create a symbolic link.
TODO:
Can this be fixed on windows?
Args:
path (str): path to real file or directory
link_path (str): path to desired location for symlink
overwrite (bool): overwrite existing symlinks (default = False)
on_error (str): strategy for dealing with errors.
raise or ignore
verbose (int): verbosity level (default=2)
Returns:
str: link path
CommandLine:
python -m utool.util_path symlink
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> dpath = ut.get_app_resource_dir('utool')
>>> real_path = join(dpath, 'real_file.txt')
>>> link_path = join(dpath, 'link_file.txt')
>>> ut.emap(ut.delete, [real_path, link_path], verbose=0)
>>> ut.writeto(real_path, 'foo')
>>> result = symlink(real_path, link_path)
>>> assert ut.readfrom(result) == 'foo'
>>> ut.emap(ut.delete, [real_path, link_path], verbose=0)
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> real_dpath = ut.get_app_resource_dir('utool', 'real_dpath')
>>> link_dpath = ut.augpath(real_dpath, newfname='link_dpath')
>>> real_path = join(real_dpath, 'afile.txt')
>>> link_path = join(link_dpath, 'afile.txt')
>>> ut.emap(ut.delete, [real_path, link_path], verbose=0)
>>> ut.ensuredir(real_dpath)
>>> ut.writeto(real_path, 'foo')
>>> result = symlink(real_dpath, link_dpath)
>>> assert ut.readfrom(link_path) == 'foo'
>>> ut.delete(link_dpath, verbose=0)
>>> assert ut.checkpath(real_path)
>>> ut.delete(real_dpath, verbose=0)
>>> assert not ut.checkpath(real_path)
"""
path = normpath(real_path)
link = normpath(link_path)
if verbose:
print('[util_path] Creating symlink: path={} link={}'.format(path, link))
if os.path.islink(link):
if verbose:
print('[util_path] symlink already exists')
os_readlink = getattr(os, "readlink", None)
if callable(os_readlink):
if os_readlink(link) == path:
if verbose > 1:
print('[path] ... and points to the right place')
return link
else:
print('[util_path] Warning, symlinks are not implemented on windows')
if verbose > 1:
print('[util_path] ... but it points somewhere else')
if overwrite:
delete(link, verbose > 1)
elif on_error == 'ignore':
return False
try:
os_symlink = getattr(os, "symlink", None)
if callable(os_symlink):
os_symlink(path, link)
else:
win_shortcut(path, link)
except Exception as ex:
import utool as ut
checkpath(link, verbose=True)
checkpath(path, verbose=True)
do_raise = (on_error == 'raise')
ut.printex(ex, '[util_path] error making symlink',
iswarning=not do_raise)
if do_raise:
raise
return link | python | def symlink(real_path, link_path, overwrite=False, on_error='raise',
verbose=2):
"""
Attempt to create a symbolic link.
TODO:
Can this be fixed on windows?
Args:
path (str): path to real file or directory
link_path (str): path to desired location for symlink
overwrite (bool): overwrite existing symlinks (default = False)
on_error (str): strategy for dealing with errors.
raise or ignore
verbose (int): verbosity level (default=2)
Returns:
str: link path
CommandLine:
python -m utool.util_path symlink
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> dpath = ut.get_app_resource_dir('utool')
>>> real_path = join(dpath, 'real_file.txt')
>>> link_path = join(dpath, 'link_file.txt')
>>> ut.emap(ut.delete, [real_path, link_path], verbose=0)
>>> ut.writeto(real_path, 'foo')
>>> result = symlink(real_path, link_path)
>>> assert ut.readfrom(result) == 'foo'
>>> ut.emap(ut.delete, [real_path, link_path], verbose=0)
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> real_dpath = ut.get_app_resource_dir('utool', 'real_dpath')
>>> link_dpath = ut.augpath(real_dpath, newfname='link_dpath')
>>> real_path = join(real_dpath, 'afile.txt')
>>> link_path = join(link_dpath, 'afile.txt')
>>> ut.emap(ut.delete, [real_path, link_path], verbose=0)
>>> ut.ensuredir(real_dpath)
>>> ut.writeto(real_path, 'foo')
>>> result = symlink(real_dpath, link_dpath)
>>> assert ut.readfrom(link_path) == 'foo'
>>> ut.delete(link_dpath, verbose=0)
>>> assert ut.checkpath(real_path)
>>> ut.delete(real_dpath, verbose=0)
>>> assert not ut.checkpath(real_path)
"""
path = normpath(real_path)
link = normpath(link_path)
if verbose:
print('[util_path] Creating symlink: path={} link={}'.format(path, link))
if os.path.islink(link):
if verbose:
print('[util_path] symlink already exists')
os_readlink = getattr(os, "readlink", None)
if callable(os_readlink):
if os_readlink(link) == path:
if verbose > 1:
print('[path] ... and points to the right place')
return link
else:
print('[util_path] Warning, symlinks are not implemented on windows')
if verbose > 1:
print('[util_path] ... but it points somewhere else')
if overwrite:
delete(link, verbose > 1)
elif on_error == 'ignore':
return False
try:
os_symlink = getattr(os, "symlink", None)
if callable(os_symlink):
os_symlink(path, link)
else:
win_shortcut(path, link)
except Exception as ex:
import utool as ut
checkpath(link, verbose=True)
checkpath(path, verbose=True)
do_raise = (on_error == 'raise')
ut.printex(ex, '[util_path] error making symlink',
iswarning=not do_raise)
if do_raise:
raise
return link | [
"def",
"symlink",
"(",
"real_path",
",",
"link_path",
",",
"overwrite",
"=",
"False",
",",
"on_error",
"=",
"'raise'",
",",
"verbose",
"=",
"2",
")",
":",
"path",
"=",
"normpath",
"(",
"real_path",
")",
"link",
"=",
"normpath",
"(",
"link_path",
")",
"if",
"verbose",
":",
"print",
"(",
"'[util_path] Creating symlink: path={} link={}'",
".",
"format",
"(",
"path",
",",
"link",
")",
")",
"if",
"os",
".",
"path",
".",
"islink",
"(",
"link",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"'[util_path] symlink already exists'",
")",
"os_readlink",
"=",
"getattr",
"(",
"os",
",",
"\"readlink\"",
",",
"None",
")",
"if",
"callable",
"(",
"os_readlink",
")",
":",
"if",
"os_readlink",
"(",
"link",
")",
"==",
"path",
":",
"if",
"verbose",
">",
"1",
":",
"print",
"(",
"'[path] ... and points to the right place'",
")",
"return",
"link",
"else",
":",
"print",
"(",
"'[util_path] Warning, symlinks are not implemented on windows'",
")",
"if",
"verbose",
">",
"1",
":",
"print",
"(",
"'[util_path] ... but it points somewhere else'",
")",
"if",
"overwrite",
":",
"delete",
"(",
"link",
",",
"verbose",
">",
"1",
")",
"elif",
"on_error",
"==",
"'ignore'",
":",
"return",
"False",
"try",
":",
"os_symlink",
"=",
"getattr",
"(",
"os",
",",
"\"symlink\"",
",",
"None",
")",
"if",
"callable",
"(",
"os_symlink",
")",
":",
"os_symlink",
"(",
"path",
",",
"link",
")",
"else",
":",
"win_shortcut",
"(",
"path",
",",
"link",
")",
"except",
"Exception",
"as",
"ex",
":",
"import",
"utool",
"as",
"ut",
"checkpath",
"(",
"link",
",",
"verbose",
"=",
"True",
")",
"checkpath",
"(",
"path",
",",
"verbose",
"=",
"True",
")",
"do_raise",
"=",
"(",
"on_error",
"==",
"'raise'",
")",
"ut",
".",
"printex",
"(",
"ex",
",",
"'[util_path] error making symlink'",
",",
"iswarning",
"=",
"not",
"do_raise",
")",
"if",
"do_raise",
":",
"raise",
"return",
"link"
] | Attempt to create a symbolic link.
TODO:
Can this be fixed on windows?
Args:
path (str): path to real file or directory
link_path (str): path to desired location for symlink
overwrite (bool): overwrite existing symlinks (default = False)
on_error (str): strategy for dealing with errors.
raise or ignore
verbose (int): verbosity level (default=2)
Returns:
str: link path
CommandLine:
python -m utool.util_path symlink
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> dpath = ut.get_app_resource_dir('utool')
>>> real_path = join(dpath, 'real_file.txt')
>>> link_path = join(dpath, 'link_file.txt')
>>> ut.emap(ut.delete, [real_path, link_path], verbose=0)
>>> ut.writeto(real_path, 'foo')
>>> result = symlink(real_path, link_path)
>>> assert ut.readfrom(result) == 'foo'
>>> ut.emap(ut.delete, [real_path, link_path], verbose=0)
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> real_dpath = ut.get_app_resource_dir('utool', 'real_dpath')
>>> link_dpath = ut.augpath(real_dpath, newfname='link_dpath')
>>> real_path = join(real_dpath, 'afile.txt')
>>> link_path = join(link_dpath, 'afile.txt')
>>> ut.emap(ut.delete, [real_path, link_path], verbose=0)
>>> ut.ensuredir(real_dpath)
>>> ut.writeto(real_path, 'foo')
>>> result = symlink(real_dpath, link_dpath)
>>> assert ut.readfrom(link_path) == 'foo'
>>> ut.delete(link_dpath, verbose=0)
>>> assert ut.checkpath(real_path)
>>> ut.delete(real_dpath, verbose=0)
>>> assert not ut.checkpath(real_path) | [
"Attempt",
"to",
"create",
"a",
"symbolic",
"link",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L2567-L2656 | train |
Erotemic/utool | utool/util_path.py | remove_broken_links | def remove_broken_links(dpath, verbose=True):
"""
Removes all broken links in a directory
Args:
dpath (str): directory path
Returns:
int: num removed
References:
http://stackoverflow.com/questions/20794/find-broken-symlinks-with-python
CommandLine:
python -m utool remove_broken_links:0
Example:
>>> # DISABLE_DOCTEST
>>> # SCRIPT
>>> from utool.util_path import * # NOQA
>>> remove_broken_links('.')
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> dpath = ut.ensure_app_resource_dir('utool', 'path_tests')
>>> ut.delete(dpath)
>>> test_dpath = ut.ensuredir(join(dpath, 'testdpath'))
>>> test_fpath = ut.ensurefile(join(dpath, 'testfpath.txt'))
>>> flink1 = ut.symlink(test_fpath, join(dpath, 'flink1'))
>>> dlink1 = ut.symlink(test_fpath, join(dpath, 'dlink1'))
>>> assert len(ut.ls(dpath)) == 4
>>> ut.delete(test_fpath)
>>> assert len(ut.ls(dpath)) == 3
>>> remove_broken_links(dpath)
>>> ut.delete(test_dpath)
>>> remove_broken_links(dpath)
>>> assert len(ut.ls(dpath)) == 0
"""
fname_list = [join(dpath, fname) for fname in os.listdir(dpath)]
broken_links = list(filterfalse(exists, filter(islink, fname_list)))
num_broken = len(broken_links)
if verbose:
if verbose > 1 or num_broken > 0:
print('[util_path] Removing %d broken links in %r' % (num_broken, dpath,))
for link in broken_links:
os.unlink(link)
return num_broken | python | def remove_broken_links(dpath, verbose=True):
"""
Removes all broken links in a directory
Args:
dpath (str): directory path
Returns:
int: num removed
References:
http://stackoverflow.com/questions/20794/find-broken-symlinks-with-python
CommandLine:
python -m utool remove_broken_links:0
Example:
>>> # DISABLE_DOCTEST
>>> # SCRIPT
>>> from utool.util_path import * # NOQA
>>> remove_broken_links('.')
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> dpath = ut.ensure_app_resource_dir('utool', 'path_tests')
>>> ut.delete(dpath)
>>> test_dpath = ut.ensuredir(join(dpath, 'testdpath'))
>>> test_fpath = ut.ensurefile(join(dpath, 'testfpath.txt'))
>>> flink1 = ut.symlink(test_fpath, join(dpath, 'flink1'))
>>> dlink1 = ut.symlink(test_fpath, join(dpath, 'dlink1'))
>>> assert len(ut.ls(dpath)) == 4
>>> ut.delete(test_fpath)
>>> assert len(ut.ls(dpath)) == 3
>>> remove_broken_links(dpath)
>>> ut.delete(test_dpath)
>>> remove_broken_links(dpath)
>>> assert len(ut.ls(dpath)) == 0
"""
fname_list = [join(dpath, fname) for fname in os.listdir(dpath)]
broken_links = list(filterfalse(exists, filter(islink, fname_list)))
num_broken = len(broken_links)
if verbose:
if verbose > 1 or num_broken > 0:
print('[util_path] Removing %d broken links in %r' % (num_broken, dpath,))
for link in broken_links:
os.unlink(link)
return num_broken | [
"def",
"remove_broken_links",
"(",
"dpath",
",",
"verbose",
"=",
"True",
")",
":",
"fname_list",
"=",
"[",
"join",
"(",
"dpath",
",",
"fname",
")",
"for",
"fname",
"in",
"os",
".",
"listdir",
"(",
"dpath",
")",
"]",
"broken_links",
"=",
"list",
"(",
"filterfalse",
"(",
"exists",
",",
"filter",
"(",
"islink",
",",
"fname_list",
")",
")",
")",
"num_broken",
"=",
"len",
"(",
"broken_links",
")",
"if",
"verbose",
":",
"if",
"verbose",
">",
"1",
"or",
"num_broken",
">",
"0",
":",
"print",
"(",
"'[util_path] Removing %d broken links in %r'",
"%",
"(",
"num_broken",
",",
"dpath",
",",
")",
")",
"for",
"link",
"in",
"broken_links",
":",
"os",
".",
"unlink",
"(",
"link",
")",
"return",
"num_broken"
] | Removes all broken links in a directory
Args:
dpath (str): directory path
Returns:
int: num removed
References:
http://stackoverflow.com/questions/20794/find-broken-symlinks-with-python
CommandLine:
python -m utool remove_broken_links:0
Example:
>>> # DISABLE_DOCTEST
>>> # SCRIPT
>>> from utool.util_path import * # NOQA
>>> remove_broken_links('.')
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> dpath = ut.ensure_app_resource_dir('utool', 'path_tests')
>>> ut.delete(dpath)
>>> test_dpath = ut.ensuredir(join(dpath, 'testdpath'))
>>> test_fpath = ut.ensurefile(join(dpath, 'testfpath.txt'))
>>> flink1 = ut.symlink(test_fpath, join(dpath, 'flink1'))
>>> dlink1 = ut.symlink(test_fpath, join(dpath, 'dlink1'))
>>> assert len(ut.ls(dpath)) == 4
>>> ut.delete(test_fpath)
>>> assert len(ut.ls(dpath)) == 3
>>> remove_broken_links(dpath)
>>> ut.delete(test_dpath)
>>> remove_broken_links(dpath)
>>> assert len(ut.ls(dpath)) == 0 | [
"Removes",
"all",
"broken",
"links",
"in",
"a",
"directory"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L2659-L2707 | train |
Erotemic/utool | utool/util_path.py | non_existing_path | def non_existing_path(path_, dpath=None, offset=0, suffix=None,
force_fmt=False):
r"""
Searches for and finds a path garuenteed to not exist.
Args:
path_ (str): path string. If may include a "%" formatstr.
dpath (str): directory path(default = None)
offset (int): (default = 0)
suffix (None): (default = None)
Returns:
str: path_ - path string
CommandLine:
python -m utool.util_path non_existing_path
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> base = ut.ensure_app_resource_dir('utool', 'tmp')
>>> ut.touch(base + '/tmp.txt')
>>> ut.touch(base + '/tmp0.txt')
>>> ut.delete(base + '/tmp1.txt')
>>> path_ = base + '/tmp.txt'
>>> newpath = ut.non_existing_path(path_)
>>> assert basename(newpath) == 'tmp1.txt'
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> base = ut.ensure_app_resource_dir('utool', 'tmp')
>>> ut.ensurepath(base + '/dir_old')
>>> ut.ensurepath(base + '/dir_old0')
>>> ut.ensurepath(base + '/dir_old1')
>>> ut.delete(base + '/dir_old2')
>>> path_ = base + '/dir'
>>> suffix = '_old'
>>> newpath = ut.non_existing_path(path_, suffix=suffix)
>>> ut.assert_eq(basename(newpath), 'dir_old2')
"""
import utool as ut
from os.path import basename, dirname
if dpath is None:
dpath = dirname(path_)
base_fmtstr = basename(path_)
if suffix is not None:
base_fmtstr = ut.augpath(base_fmtstr, suffix)
if '%' not in base_fmtstr:
if not force_fmt:
# If we have don't have to format,
# then try to use the first choice
first_choice = join(dpath, base_fmtstr)
if not exists(first_choice):
return first_choice
# otherwise we ensure we can format and we continue
base_fmtstr = ut.augpath(base_fmtstr, '%d')
dname_list = ut.glob(dpath, pattern='*', recursive=False, with_files=True,
with_dirs=True)
conflict_set = set(basename(dname) for dname in dname_list)
newname = ut.get_nonconflicting_string(base_fmtstr, conflict_set,
offset=offset)
newpath = join(dpath, newname)
return newpath | python | def non_existing_path(path_, dpath=None, offset=0, suffix=None,
force_fmt=False):
r"""
Searches for and finds a path garuenteed to not exist.
Args:
path_ (str): path string. If may include a "%" formatstr.
dpath (str): directory path(default = None)
offset (int): (default = 0)
suffix (None): (default = None)
Returns:
str: path_ - path string
CommandLine:
python -m utool.util_path non_existing_path
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> base = ut.ensure_app_resource_dir('utool', 'tmp')
>>> ut.touch(base + '/tmp.txt')
>>> ut.touch(base + '/tmp0.txt')
>>> ut.delete(base + '/tmp1.txt')
>>> path_ = base + '/tmp.txt'
>>> newpath = ut.non_existing_path(path_)
>>> assert basename(newpath) == 'tmp1.txt'
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> base = ut.ensure_app_resource_dir('utool', 'tmp')
>>> ut.ensurepath(base + '/dir_old')
>>> ut.ensurepath(base + '/dir_old0')
>>> ut.ensurepath(base + '/dir_old1')
>>> ut.delete(base + '/dir_old2')
>>> path_ = base + '/dir'
>>> suffix = '_old'
>>> newpath = ut.non_existing_path(path_, suffix=suffix)
>>> ut.assert_eq(basename(newpath), 'dir_old2')
"""
import utool as ut
from os.path import basename, dirname
if dpath is None:
dpath = dirname(path_)
base_fmtstr = basename(path_)
if suffix is not None:
base_fmtstr = ut.augpath(base_fmtstr, suffix)
if '%' not in base_fmtstr:
if not force_fmt:
# If we have don't have to format,
# then try to use the first choice
first_choice = join(dpath, base_fmtstr)
if not exists(first_choice):
return first_choice
# otherwise we ensure we can format and we continue
base_fmtstr = ut.augpath(base_fmtstr, '%d')
dname_list = ut.glob(dpath, pattern='*', recursive=False, with_files=True,
with_dirs=True)
conflict_set = set(basename(dname) for dname in dname_list)
newname = ut.get_nonconflicting_string(base_fmtstr, conflict_set,
offset=offset)
newpath = join(dpath, newname)
return newpath | [
"def",
"non_existing_path",
"(",
"path_",
",",
"dpath",
"=",
"None",
",",
"offset",
"=",
"0",
",",
"suffix",
"=",
"None",
",",
"force_fmt",
"=",
"False",
")",
":",
"import",
"utool",
"as",
"ut",
"from",
"os",
".",
"path",
"import",
"basename",
",",
"dirname",
"if",
"dpath",
"is",
"None",
":",
"dpath",
"=",
"dirname",
"(",
"path_",
")",
"base_fmtstr",
"=",
"basename",
"(",
"path_",
")",
"if",
"suffix",
"is",
"not",
"None",
":",
"base_fmtstr",
"=",
"ut",
".",
"augpath",
"(",
"base_fmtstr",
",",
"suffix",
")",
"if",
"'%'",
"not",
"in",
"base_fmtstr",
":",
"if",
"not",
"force_fmt",
":",
"# If we have don't have to format,",
"# then try to use the first choice",
"first_choice",
"=",
"join",
"(",
"dpath",
",",
"base_fmtstr",
")",
"if",
"not",
"exists",
"(",
"first_choice",
")",
":",
"return",
"first_choice",
"# otherwise we ensure we can format and we continue",
"base_fmtstr",
"=",
"ut",
".",
"augpath",
"(",
"base_fmtstr",
",",
"'%d'",
")",
"dname_list",
"=",
"ut",
".",
"glob",
"(",
"dpath",
",",
"pattern",
"=",
"'*'",
",",
"recursive",
"=",
"False",
",",
"with_files",
"=",
"True",
",",
"with_dirs",
"=",
"True",
")",
"conflict_set",
"=",
"set",
"(",
"basename",
"(",
"dname",
")",
"for",
"dname",
"in",
"dname_list",
")",
"newname",
"=",
"ut",
".",
"get_nonconflicting_string",
"(",
"base_fmtstr",
",",
"conflict_set",
",",
"offset",
"=",
"offset",
")",
"newpath",
"=",
"join",
"(",
"dpath",
",",
"newname",
")",
"return",
"newpath"
] | r"""
Searches for and finds a path garuenteed to not exist.
Args:
path_ (str): path string. If may include a "%" formatstr.
dpath (str): directory path(default = None)
offset (int): (default = 0)
suffix (None): (default = None)
Returns:
str: path_ - path string
CommandLine:
python -m utool.util_path non_existing_path
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> base = ut.ensure_app_resource_dir('utool', 'tmp')
>>> ut.touch(base + '/tmp.txt')
>>> ut.touch(base + '/tmp0.txt')
>>> ut.delete(base + '/tmp1.txt')
>>> path_ = base + '/tmp.txt'
>>> newpath = ut.non_existing_path(path_)
>>> assert basename(newpath) == 'tmp1.txt'
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> import utool as ut
>>> base = ut.ensure_app_resource_dir('utool', 'tmp')
>>> ut.ensurepath(base + '/dir_old')
>>> ut.ensurepath(base + '/dir_old0')
>>> ut.ensurepath(base + '/dir_old1')
>>> ut.delete(base + '/dir_old2')
>>> path_ = base + '/dir'
>>> suffix = '_old'
>>> newpath = ut.non_existing_path(path_, suffix=suffix)
>>> ut.assert_eq(basename(newpath), 'dir_old2') | [
"r",
"Searches",
"for",
"and",
"finds",
"a",
"path",
"garuenteed",
"to",
"not",
"exist",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_path.py#L2710-L2779 | train |
glormph/msstitch | src/app/actions/mslookup/quant.py | create_isobaric_quant_lookup | def create_isobaric_quant_lookup(quantdb, specfn_consensus_els, channelmap):
"""Creates an sqlite lookup table of scannrs with quant data.
spectra - an iterable of tupled (filename, spectra)
consensus_els - a iterable with consensusElements"""
# store quantchannels in lookup and generate a db_id vs channel map
channels_store = ((name,) for name, c_id
in sorted(channelmap.items(), key=lambda x: x[1]))
quantdb.store_channelmap(channels_store)
channelmap_dbid = {channelmap[ch_name]: ch_id for ch_id, ch_name in
quantdb.get_channelmap()}
quants = []
mzmlmap = quantdb.get_mzmlfile_map()
for specfn, consensus_el in specfn_consensus_els:
rt = openmsreader.get_consxml_rt(consensus_el)
rt = round(float(Decimal(rt) / 60), 12)
qdata = get_quant_data(consensus_el)
spectra_id = quantdb.get_spectra_id(mzmlmap[specfn],
retention_time=rt)
for channel_no in sorted(qdata.keys()):
quants.append((spectra_id, channelmap_dbid[channel_no],
qdata[channel_no]))
if len(quants) == DB_STORE_CHUNK:
quantdb.store_isobaric_quants(quants)
quantdb.store_isobaric_quants(quants)
quantdb.index_isobaric_quants() | python | def create_isobaric_quant_lookup(quantdb, specfn_consensus_els, channelmap):
"""Creates an sqlite lookup table of scannrs with quant data.
spectra - an iterable of tupled (filename, spectra)
consensus_els - a iterable with consensusElements"""
# store quantchannels in lookup and generate a db_id vs channel map
channels_store = ((name,) for name, c_id
in sorted(channelmap.items(), key=lambda x: x[1]))
quantdb.store_channelmap(channels_store)
channelmap_dbid = {channelmap[ch_name]: ch_id for ch_id, ch_name in
quantdb.get_channelmap()}
quants = []
mzmlmap = quantdb.get_mzmlfile_map()
for specfn, consensus_el in specfn_consensus_els:
rt = openmsreader.get_consxml_rt(consensus_el)
rt = round(float(Decimal(rt) / 60), 12)
qdata = get_quant_data(consensus_el)
spectra_id = quantdb.get_spectra_id(mzmlmap[specfn],
retention_time=rt)
for channel_no in sorted(qdata.keys()):
quants.append((spectra_id, channelmap_dbid[channel_no],
qdata[channel_no]))
if len(quants) == DB_STORE_CHUNK:
quantdb.store_isobaric_quants(quants)
quantdb.store_isobaric_quants(quants)
quantdb.index_isobaric_quants() | [
"def",
"create_isobaric_quant_lookup",
"(",
"quantdb",
",",
"specfn_consensus_els",
",",
"channelmap",
")",
":",
"# store quantchannels in lookup and generate a db_id vs channel map",
"channels_store",
"=",
"(",
"(",
"name",
",",
")",
"for",
"name",
",",
"c_id",
"in",
"sorted",
"(",
"channelmap",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
")",
")",
"quantdb",
".",
"store_channelmap",
"(",
"channels_store",
")",
"channelmap_dbid",
"=",
"{",
"channelmap",
"[",
"ch_name",
"]",
":",
"ch_id",
"for",
"ch_id",
",",
"ch_name",
"in",
"quantdb",
".",
"get_channelmap",
"(",
")",
"}",
"quants",
"=",
"[",
"]",
"mzmlmap",
"=",
"quantdb",
".",
"get_mzmlfile_map",
"(",
")",
"for",
"specfn",
",",
"consensus_el",
"in",
"specfn_consensus_els",
":",
"rt",
"=",
"openmsreader",
".",
"get_consxml_rt",
"(",
"consensus_el",
")",
"rt",
"=",
"round",
"(",
"float",
"(",
"Decimal",
"(",
"rt",
")",
"/",
"60",
")",
",",
"12",
")",
"qdata",
"=",
"get_quant_data",
"(",
"consensus_el",
")",
"spectra_id",
"=",
"quantdb",
".",
"get_spectra_id",
"(",
"mzmlmap",
"[",
"specfn",
"]",
",",
"retention_time",
"=",
"rt",
")",
"for",
"channel_no",
"in",
"sorted",
"(",
"qdata",
".",
"keys",
"(",
")",
")",
":",
"quants",
".",
"append",
"(",
"(",
"spectra_id",
",",
"channelmap_dbid",
"[",
"channel_no",
"]",
",",
"qdata",
"[",
"channel_no",
"]",
")",
")",
"if",
"len",
"(",
"quants",
")",
"==",
"DB_STORE_CHUNK",
":",
"quantdb",
".",
"store_isobaric_quants",
"(",
"quants",
")",
"quantdb",
".",
"store_isobaric_quants",
"(",
"quants",
")",
"quantdb",
".",
"index_isobaric_quants",
"(",
")"
] | Creates an sqlite lookup table of scannrs with quant data.
spectra - an iterable of tupled (filename, spectra)
consensus_els - a iterable with consensusElements | [
"Creates",
"an",
"sqlite",
"lookup",
"table",
"of",
"scannrs",
"with",
"quant",
"data",
"."
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mslookup/quant.py#L9-L34 | train |
glormph/msstitch | src/app/actions/mslookup/quant.py | get_precursors_from_window | def get_precursors_from_window(quantdb, minmz):
"""Returns a dict of a specified amount of features from the
ms1 quant database, and the highest mz of those features"""
featmap = {}
mz = False
features = quantdb.get_precursor_quant_window(FEATURE_ALIGN_WINDOW_AMOUNT,
minmz)
for feat_id, fn_id, charge, mz, rt in features:
try:
featmap[fn_id][charge].append((mz, rt, feat_id))
except KeyError:
try:
featmap[fn_id][charge] = [(mz, rt, feat_id)]
except KeyError:
featmap[fn_id] = {charge: [(mz, rt, feat_id)]}
return featmap, mz | python | def get_precursors_from_window(quantdb, minmz):
"""Returns a dict of a specified amount of features from the
ms1 quant database, and the highest mz of those features"""
featmap = {}
mz = False
features = quantdb.get_precursor_quant_window(FEATURE_ALIGN_WINDOW_AMOUNT,
minmz)
for feat_id, fn_id, charge, mz, rt in features:
try:
featmap[fn_id][charge].append((mz, rt, feat_id))
except KeyError:
try:
featmap[fn_id][charge] = [(mz, rt, feat_id)]
except KeyError:
featmap[fn_id] = {charge: [(mz, rt, feat_id)]}
return featmap, mz | [
"def",
"get_precursors_from_window",
"(",
"quantdb",
",",
"minmz",
")",
":",
"featmap",
"=",
"{",
"}",
"mz",
"=",
"False",
"features",
"=",
"quantdb",
".",
"get_precursor_quant_window",
"(",
"FEATURE_ALIGN_WINDOW_AMOUNT",
",",
"minmz",
")",
"for",
"feat_id",
",",
"fn_id",
",",
"charge",
",",
"mz",
",",
"rt",
"in",
"features",
":",
"try",
":",
"featmap",
"[",
"fn_id",
"]",
"[",
"charge",
"]",
".",
"append",
"(",
"(",
"mz",
",",
"rt",
",",
"feat_id",
")",
")",
"except",
"KeyError",
":",
"try",
":",
"featmap",
"[",
"fn_id",
"]",
"[",
"charge",
"]",
"=",
"[",
"(",
"mz",
",",
"rt",
",",
"feat_id",
")",
"]",
"except",
"KeyError",
":",
"featmap",
"[",
"fn_id",
"]",
"=",
"{",
"charge",
":",
"[",
"(",
"mz",
",",
"rt",
",",
"feat_id",
")",
"]",
"}",
"return",
"featmap",
",",
"mz"
] | Returns a dict of a specified amount of features from the
ms1 quant database, and the highest mz of those features | [
"Returns",
"a",
"dict",
"of",
"a",
"specified",
"amount",
"of",
"features",
"from",
"the",
"ms1",
"quant",
"database",
"and",
"the",
"highest",
"mz",
"of",
"those",
"features"
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mslookup/quant.py#L105-L120 | train |
glormph/msstitch | src/app/actions/mslookup/quant.py | get_quant_data | def get_quant_data(cons_el):
"""Gets quant data from consensusXML element"""
quant_out = {}
for reporter in cons_el.findall('.//element'):
quant_out[reporter.attrib['map']] = reporter.attrib['it']
return quant_out | python | def get_quant_data(cons_el):
"""Gets quant data from consensusXML element"""
quant_out = {}
for reporter in cons_el.findall('.//element'):
quant_out[reporter.attrib['map']] = reporter.attrib['it']
return quant_out | [
"def",
"get_quant_data",
"(",
"cons_el",
")",
":",
"quant_out",
"=",
"{",
"}",
"for",
"reporter",
"in",
"cons_el",
".",
"findall",
"(",
"'.//element'",
")",
":",
"quant_out",
"[",
"reporter",
".",
"attrib",
"[",
"'map'",
"]",
"]",
"=",
"reporter",
".",
"attrib",
"[",
"'it'",
"]",
"return",
"quant_out"
] | Gets quant data from consensusXML element | [
"Gets",
"quant",
"data",
"from",
"consensusXML",
"element"
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mslookup/quant.py#L139-L144 | train |
Erotemic/utool | utool/util_cplat.py | get_plat_specifier | def get_plat_specifier():
"""
Standard platform specifier used by distutils
"""
import setuptools # NOQA
import distutils
plat_name = distutils.util.get_platform()
plat_specifier = ".%s-%s" % (plat_name, sys.version[0:3])
if hasattr(sys, 'gettotalrefcount'):
plat_specifier += '-pydebug'
return plat_specifier | python | def get_plat_specifier():
"""
Standard platform specifier used by distutils
"""
import setuptools # NOQA
import distutils
plat_name = distutils.util.get_platform()
plat_specifier = ".%s-%s" % (plat_name, sys.version[0:3])
if hasattr(sys, 'gettotalrefcount'):
plat_specifier += '-pydebug'
return plat_specifier | [
"def",
"get_plat_specifier",
"(",
")",
":",
"import",
"setuptools",
"# NOQA",
"import",
"distutils",
"plat_name",
"=",
"distutils",
".",
"util",
".",
"get_platform",
"(",
")",
"plat_specifier",
"=",
"\".%s-%s\"",
"%",
"(",
"plat_name",
",",
"sys",
".",
"version",
"[",
"0",
":",
"3",
"]",
")",
"if",
"hasattr",
"(",
"sys",
",",
"'gettotalrefcount'",
")",
":",
"plat_specifier",
"+=",
"'-pydebug'",
"return",
"plat_specifier"
] | Standard platform specifier used by distutils | [
"Standard",
"platform",
"specifier",
"used",
"by",
"distutils"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cplat.py#L66-L76 | train |
Erotemic/utool | utool/util_cplat.py | get_system_python_library | def get_system_python_library():
"""
FIXME; hacky way of finding python library. Not cross platform yet.
"""
import os
import utool as ut
from os.path import basename, realpath
pyname = basename(realpath(sys.executable))
ld_library_path = os.environ['LD_LIBRARY_PATH']
libdirs = [x for x in ld_library_path.split(os.pathsep) if x] + ['/usr/lib']
libfiles = ut.flatten([ut.glob(d, '*' + ut.get_lib_ext(), recursive=True) for d in libdirs])
python_libs = [realpath(f) for f in libfiles if 'lib' + pyname in basename(f)]
python_libs = ut.unique_ordered(python_libs)
assert len(python_libs) == 1, str(python_libs)
return python_libs[0] | python | def get_system_python_library():
"""
FIXME; hacky way of finding python library. Not cross platform yet.
"""
import os
import utool as ut
from os.path import basename, realpath
pyname = basename(realpath(sys.executable))
ld_library_path = os.environ['LD_LIBRARY_PATH']
libdirs = [x for x in ld_library_path.split(os.pathsep) if x] + ['/usr/lib']
libfiles = ut.flatten([ut.glob(d, '*' + ut.get_lib_ext(), recursive=True) for d in libdirs])
python_libs = [realpath(f) for f in libfiles if 'lib' + pyname in basename(f)]
python_libs = ut.unique_ordered(python_libs)
assert len(python_libs) == 1, str(python_libs)
return python_libs[0] | [
"def",
"get_system_python_library",
"(",
")",
":",
"import",
"os",
"import",
"utool",
"as",
"ut",
"from",
"os",
".",
"path",
"import",
"basename",
",",
"realpath",
"pyname",
"=",
"basename",
"(",
"realpath",
"(",
"sys",
".",
"executable",
")",
")",
"ld_library_path",
"=",
"os",
".",
"environ",
"[",
"'LD_LIBRARY_PATH'",
"]",
"libdirs",
"=",
"[",
"x",
"for",
"x",
"in",
"ld_library_path",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"if",
"x",
"]",
"+",
"[",
"'/usr/lib'",
"]",
"libfiles",
"=",
"ut",
".",
"flatten",
"(",
"[",
"ut",
".",
"glob",
"(",
"d",
",",
"'*'",
"+",
"ut",
".",
"get_lib_ext",
"(",
")",
",",
"recursive",
"=",
"True",
")",
"for",
"d",
"in",
"libdirs",
"]",
")",
"python_libs",
"=",
"[",
"realpath",
"(",
"f",
")",
"for",
"f",
"in",
"libfiles",
"if",
"'lib'",
"+",
"pyname",
"in",
"basename",
"(",
"f",
")",
"]",
"python_libs",
"=",
"ut",
".",
"unique_ordered",
"(",
"python_libs",
")",
"assert",
"len",
"(",
"python_libs",
")",
"==",
"1",
",",
"str",
"(",
"python_libs",
")",
"return",
"python_libs",
"[",
"0",
"]"
] | FIXME; hacky way of finding python library. Not cross platform yet. | [
"FIXME",
";",
"hacky",
"way",
"of",
"finding",
"python",
"library",
".",
"Not",
"cross",
"platform",
"yet",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cplat.py#L88-L102 | train |
Erotemic/utool | utool/util_cplat.py | get_dynlib_dependencies | def get_dynlib_dependencies(lib_path):
"""
Executes tools for inspecting dynamic library dependencies depending on the
current platform.
"""
if LINUX:
ldd_fpath = '/usr/bin/ldd'
depend_out, depend_err, ret = cmd(ldd_fpath, lib_path, verbose=False)
elif DARWIN:
otool_fpath = '/opt/local/bin/otool'
depend_out, depend_err, ret = cmd(otool_fpath, '-L', lib_path, verbose=False)
elif WIN32:
depend_out, depend_err, ret = cmd('objdump', '-p', lib_path, verbose=False)
#fnmatch.filter(depend_out.split('\n'), '*DLL*')
relevant_lines = [line for line in depend_out.splitlines() if 'DLL Name:' in line]
depend_out = '\n'.join(relevant_lines)
assert ret == 0, 'bad dependency check'
return depend_out | python | def get_dynlib_dependencies(lib_path):
"""
Executes tools for inspecting dynamic library dependencies depending on the
current platform.
"""
if LINUX:
ldd_fpath = '/usr/bin/ldd'
depend_out, depend_err, ret = cmd(ldd_fpath, lib_path, verbose=False)
elif DARWIN:
otool_fpath = '/opt/local/bin/otool'
depend_out, depend_err, ret = cmd(otool_fpath, '-L', lib_path, verbose=False)
elif WIN32:
depend_out, depend_err, ret = cmd('objdump', '-p', lib_path, verbose=False)
#fnmatch.filter(depend_out.split('\n'), '*DLL*')
relevant_lines = [line for line in depend_out.splitlines() if 'DLL Name:' in line]
depend_out = '\n'.join(relevant_lines)
assert ret == 0, 'bad dependency check'
return depend_out | [
"def",
"get_dynlib_dependencies",
"(",
"lib_path",
")",
":",
"if",
"LINUX",
":",
"ldd_fpath",
"=",
"'/usr/bin/ldd'",
"depend_out",
",",
"depend_err",
",",
"ret",
"=",
"cmd",
"(",
"ldd_fpath",
",",
"lib_path",
",",
"verbose",
"=",
"False",
")",
"elif",
"DARWIN",
":",
"otool_fpath",
"=",
"'/opt/local/bin/otool'",
"depend_out",
",",
"depend_err",
",",
"ret",
"=",
"cmd",
"(",
"otool_fpath",
",",
"'-L'",
",",
"lib_path",
",",
"verbose",
"=",
"False",
")",
"elif",
"WIN32",
":",
"depend_out",
",",
"depend_err",
",",
"ret",
"=",
"cmd",
"(",
"'objdump'",
",",
"'-p'",
",",
"lib_path",
",",
"verbose",
"=",
"False",
")",
"#fnmatch.filter(depend_out.split('\\n'), '*DLL*')",
"relevant_lines",
"=",
"[",
"line",
"for",
"line",
"in",
"depend_out",
".",
"splitlines",
"(",
")",
"if",
"'DLL Name:'",
"in",
"line",
"]",
"depend_out",
"=",
"'\\n'",
".",
"join",
"(",
"relevant_lines",
")",
"assert",
"ret",
"==",
"0",
",",
"'bad dependency check'",
"return",
"depend_out"
] | Executes tools for inspecting dynamic library dependencies depending on the
current platform. | [
"Executes",
"tools",
"for",
"inspecting",
"dynamic",
"library",
"dependencies",
"depending",
"on",
"the",
"current",
"platform",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cplat.py#L329-L346 | train |
Erotemic/utool | utool/util_cplat.py | startfile | def startfile(fpath, detatch=True, quote=False, verbose=False, quiet=True):
""" Uses default program defined by the system to open a file.
References:
http://stackoverflow.com/questions/2692873/quote-posix-shell-special-characters-in-python-output
"""
print('[cplat] startfile(%r)' % fpath)
fpath = normpath(fpath)
# print('[cplat] fpath=%s' % fpath)
if not exists(fpath):
raise Exception('Cannot start nonexistant file: %r' % fpath)
#if quote:
# fpath = '"%s"' % (fpath,)
if not WIN32:
fpath = pipes.quote(fpath)
if LINUX:
#out, err, ret = cmd(['xdg-open', fpath], detatch=True)
outtup = cmd(('xdg-open', fpath), detatch=detatch, verbose=verbose, quiet=quiet)
#outtup = cmd('xdg-open', fpath, detatch=detatch)
elif DARWIN:
outtup = cmd(('open', fpath), detatch=detatch, verbose=verbose, quiet=quiet)
elif WIN32:
os.startfile(fpath)
else:
raise RuntimeError('Unknown Platform')
if outtup is not None:
out, err, ret = outtup
if not ret:
raise Exception(out + ' -- ' + err)
pass | python | def startfile(fpath, detatch=True, quote=False, verbose=False, quiet=True):
""" Uses default program defined by the system to open a file.
References:
http://stackoverflow.com/questions/2692873/quote-posix-shell-special-characters-in-python-output
"""
print('[cplat] startfile(%r)' % fpath)
fpath = normpath(fpath)
# print('[cplat] fpath=%s' % fpath)
if not exists(fpath):
raise Exception('Cannot start nonexistant file: %r' % fpath)
#if quote:
# fpath = '"%s"' % (fpath,)
if not WIN32:
fpath = pipes.quote(fpath)
if LINUX:
#out, err, ret = cmd(['xdg-open', fpath], detatch=True)
outtup = cmd(('xdg-open', fpath), detatch=detatch, verbose=verbose, quiet=quiet)
#outtup = cmd('xdg-open', fpath, detatch=detatch)
elif DARWIN:
outtup = cmd(('open', fpath), detatch=detatch, verbose=verbose, quiet=quiet)
elif WIN32:
os.startfile(fpath)
else:
raise RuntimeError('Unknown Platform')
if outtup is not None:
out, err, ret = outtup
if not ret:
raise Exception(out + ' -- ' + err)
pass | [
"def",
"startfile",
"(",
"fpath",
",",
"detatch",
"=",
"True",
",",
"quote",
"=",
"False",
",",
"verbose",
"=",
"False",
",",
"quiet",
"=",
"True",
")",
":",
"print",
"(",
"'[cplat] startfile(%r)'",
"%",
"fpath",
")",
"fpath",
"=",
"normpath",
"(",
"fpath",
")",
"# print('[cplat] fpath=%s' % fpath)",
"if",
"not",
"exists",
"(",
"fpath",
")",
":",
"raise",
"Exception",
"(",
"'Cannot start nonexistant file: %r'",
"%",
"fpath",
")",
"#if quote:",
"# fpath = '\"%s\"' % (fpath,)",
"if",
"not",
"WIN32",
":",
"fpath",
"=",
"pipes",
".",
"quote",
"(",
"fpath",
")",
"if",
"LINUX",
":",
"#out, err, ret = cmd(['xdg-open', fpath], detatch=True)",
"outtup",
"=",
"cmd",
"(",
"(",
"'xdg-open'",
",",
"fpath",
")",
",",
"detatch",
"=",
"detatch",
",",
"verbose",
"=",
"verbose",
",",
"quiet",
"=",
"quiet",
")",
"#outtup = cmd('xdg-open', fpath, detatch=detatch)",
"elif",
"DARWIN",
":",
"outtup",
"=",
"cmd",
"(",
"(",
"'open'",
",",
"fpath",
")",
",",
"detatch",
"=",
"detatch",
",",
"verbose",
"=",
"verbose",
",",
"quiet",
"=",
"quiet",
")",
"elif",
"WIN32",
":",
"os",
".",
"startfile",
"(",
"fpath",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Unknown Platform'",
")",
"if",
"outtup",
"is",
"not",
"None",
":",
"out",
",",
"err",
",",
"ret",
"=",
"outtup",
"if",
"not",
"ret",
":",
"raise",
"Exception",
"(",
"out",
"+",
"' -- '",
"+",
"err",
")",
"pass"
] | Uses default program defined by the system to open a file.
References:
http://stackoverflow.com/questions/2692873/quote-posix-shell-special-characters-in-python-output | [
"Uses",
"default",
"program",
"defined",
"by",
"the",
"system",
"to",
"open",
"a",
"file",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cplat.py#L465-L495 | train |
Erotemic/utool | utool/util_cplat.py | view_directory | def view_directory(dname=None, fname=None, verbose=True):
"""
View a directory in the operating system file browser. Currently supports
windows explorer, mac open, and linux nautlius.
Args:
dname (str): directory name
fname (str): a filename to select in the directory (nautlius only)
verbose (bool):
CommandLine:
python -m utool.util_cplat --test-view_directory
Example:
>>> # DISABLE_DOCTEST
>>> # DOCTEST_DISABLE
>>> from utool.util_cplat import * # NOQA
>>> import utool as ut
>>> dname = ut.truepath('~')
>>> verbose = True
>>> view_directory(dname, verbose)
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> import utool as ut
>>> base = ut.ensure_app_cache_dir('utool', 'test_vd')
>>> dirs = [
>>> '',
>>> 'dir1',
>>> 'has space',
>>> 'space at end ',
>>> ' space at start ',
>>> '"quotes and spaces"',
>>> "'single quotes and spaces'",
>>> 'Frogram Piles (y2K)',
>>> ]
>>> dirs_ = [ut.ensuredir(join(base, d)) for d in dirs]
>>> for dname in dirs_:
>>> ut.view_directory(dname, verbose=False)
>>> fpath = join(base, 'afile.txt')
>>> ut.touch(fpath)
>>> ut.view_directory(base, fpath, verbose=False)
"""
from utool.util_arg import STRICT
from utool.util_path import checkpath
# from utool.util_str import SINGLE_QUOTE, DOUBLE_QUOTE
if HAVE_PATHLIB and isinstance(dname, pathlib.Path):
dname = str(dname)
if verbose:
print('[cplat] view_directory(%r) ' % dname)
dname = os.getcwd() if dname is None else dname
open_prog = {
'win32': 'explorer.exe',
'linux': 'nautilus',
'darwin': 'open'
}[OS_TYPE]
dname = normpath(dname)
if STRICT:
assert checkpath(dname, verbose=verbose), 'directory doesnt exit'
if fname is not None and OS_TYPE == 'linux':
arg = join(dname, fname)
else:
arg = dname
# if ' ' in dname and not dname.startswith((SINGLE_QUOTE, DOUBLE_QUOTE)):
# # Ensure quotations
# dname = '"%s"' % dname
# if not WIN32:
# arg = dname
# # arg = subprocess.list2cmdline([dname])
# # arg = pipes.quote(dname)
# else:
# arg = dname
# spawn and detatch process
args = (open_prog, arg)
print(subprocess.list2cmdline(args))
subprocess.Popen(args) | python | def view_directory(dname=None, fname=None, verbose=True):
"""
View a directory in the operating system file browser. Currently supports
windows explorer, mac open, and linux nautlius.
Args:
dname (str): directory name
fname (str): a filename to select in the directory (nautlius only)
verbose (bool):
CommandLine:
python -m utool.util_cplat --test-view_directory
Example:
>>> # DISABLE_DOCTEST
>>> # DOCTEST_DISABLE
>>> from utool.util_cplat import * # NOQA
>>> import utool as ut
>>> dname = ut.truepath('~')
>>> verbose = True
>>> view_directory(dname, verbose)
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> import utool as ut
>>> base = ut.ensure_app_cache_dir('utool', 'test_vd')
>>> dirs = [
>>> '',
>>> 'dir1',
>>> 'has space',
>>> 'space at end ',
>>> ' space at start ',
>>> '"quotes and spaces"',
>>> "'single quotes and spaces'",
>>> 'Frogram Piles (y2K)',
>>> ]
>>> dirs_ = [ut.ensuredir(join(base, d)) for d in dirs]
>>> for dname in dirs_:
>>> ut.view_directory(dname, verbose=False)
>>> fpath = join(base, 'afile.txt')
>>> ut.touch(fpath)
>>> ut.view_directory(base, fpath, verbose=False)
"""
from utool.util_arg import STRICT
from utool.util_path import checkpath
# from utool.util_str import SINGLE_QUOTE, DOUBLE_QUOTE
if HAVE_PATHLIB and isinstance(dname, pathlib.Path):
dname = str(dname)
if verbose:
print('[cplat] view_directory(%r) ' % dname)
dname = os.getcwd() if dname is None else dname
open_prog = {
'win32': 'explorer.exe',
'linux': 'nautilus',
'darwin': 'open'
}[OS_TYPE]
dname = normpath(dname)
if STRICT:
assert checkpath(dname, verbose=verbose), 'directory doesnt exit'
if fname is not None and OS_TYPE == 'linux':
arg = join(dname, fname)
else:
arg = dname
# if ' ' in dname and not dname.startswith((SINGLE_QUOTE, DOUBLE_QUOTE)):
# # Ensure quotations
# dname = '"%s"' % dname
# if not WIN32:
# arg = dname
# # arg = subprocess.list2cmdline([dname])
# # arg = pipes.quote(dname)
# else:
# arg = dname
# spawn and detatch process
args = (open_prog, arg)
print(subprocess.list2cmdline(args))
subprocess.Popen(args) | [
"def",
"view_directory",
"(",
"dname",
"=",
"None",
",",
"fname",
"=",
"None",
",",
"verbose",
"=",
"True",
")",
":",
"from",
"utool",
".",
"util_arg",
"import",
"STRICT",
"from",
"utool",
".",
"util_path",
"import",
"checkpath",
"# from utool.util_str import SINGLE_QUOTE, DOUBLE_QUOTE",
"if",
"HAVE_PATHLIB",
"and",
"isinstance",
"(",
"dname",
",",
"pathlib",
".",
"Path",
")",
":",
"dname",
"=",
"str",
"(",
"dname",
")",
"if",
"verbose",
":",
"print",
"(",
"'[cplat] view_directory(%r) '",
"%",
"dname",
")",
"dname",
"=",
"os",
".",
"getcwd",
"(",
")",
"if",
"dname",
"is",
"None",
"else",
"dname",
"open_prog",
"=",
"{",
"'win32'",
":",
"'explorer.exe'",
",",
"'linux'",
":",
"'nautilus'",
",",
"'darwin'",
":",
"'open'",
"}",
"[",
"OS_TYPE",
"]",
"dname",
"=",
"normpath",
"(",
"dname",
")",
"if",
"STRICT",
":",
"assert",
"checkpath",
"(",
"dname",
",",
"verbose",
"=",
"verbose",
")",
",",
"'directory doesnt exit'",
"if",
"fname",
"is",
"not",
"None",
"and",
"OS_TYPE",
"==",
"'linux'",
":",
"arg",
"=",
"join",
"(",
"dname",
",",
"fname",
")",
"else",
":",
"arg",
"=",
"dname",
"# if ' ' in dname and not dname.startswith((SINGLE_QUOTE, DOUBLE_QUOTE)):",
"# # Ensure quotations",
"# dname = '\"%s\"' % dname",
"# if not WIN32:",
"# arg = dname",
"# # arg = subprocess.list2cmdline([dname])",
"# # arg = pipes.quote(dname)",
"# else:",
"# arg = dname",
"# spawn and detatch process",
"args",
"=",
"(",
"open_prog",
",",
"arg",
")",
"print",
"(",
"subprocess",
".",
"list2cmdline",
"(",
"args",
")",
")",
"subprocess",
".",
"Popen",
"(",
"args",
")"
] | View a directory in the operating system file browser. Currently supports
windows explorer, mac open, and linux nautlius.
Args:
dname (str): directory name
fname (str): a filename to select in the directory (nautlius only)
verbose (bool):
CommandLine:
python -m utool.util_cplat --test-view_directory
Example:
>>> # DISABLE_DOCTEST
>>> # DOCTEST_DISABLE
>>> from utool.util_cplat import * # NOQA
>>> import utool as ut
>>> dname = ut.truepath('~')
>>> verbose = True
>>> view_directory(dname, verbose)
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> import utool as ut
>>> base = ut.ensure_app_cache_dir('utool', 'test_vd')
>>> dirs = [
>>> '',
>>> 'dir1',
>>> 'has space',
>>> 'space at end ',
>>> ' space at start ',
>>> '"quotes and spaces"',
>>> "'single quotes and spaces'",
>>> 'Frogram Piles (y2K)',
>>> ]
>>> dirs_ = [ut.ensuredir(join(base, d)) for d in dirs]
>>> for dname in dirs_:
>>> ut.view_directory(dname, verbose=False)
>>> fpath = join(base, 'afile.txt')
>>> ut.touch(fpath)
>>> ut.view_directory(base, fpath, verbose=False) | [
"View",
"a",
"directory",
"in",
"the",
"operating",
"system",
"file",
"browser",
".",
"Currently",
"supports",
"windows",
"explorer",
"mac",
"open",
"and",
"linux",
"nautlius",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cplat.py#L544-L622 | train |
Erotemic/utool | utool/util_cplat.py | platform_cache_dir | def platform_cache_dir():
"""
Returns a directory which should be writable for any application
This should be used for temporary deletable data.
"""
if WIN32: # nocover
dpath_ = '~/AppData/Local'
elif LINUX: # nocover
dpath_ = '~/.cache'
elif DARWIN: # nocover
dpath_ = '~/Library/Caches'
else: # nocover
raise NotImplementedError('Unknown Platform %r' % (sys.platform,))
dpath = normpath(expanduser(dpath_))
return dpath | python | def platform_cache_dir():
"""
Returns a directory which should be writable for any application
This should be used for temporary deletable data.
"""
if WIN32: # nocover
dpath_ = '~/AppData/Local'
elif LINUX: # nocover
dpath_ = '~/.cache'
elif DARWIN: # nocover
dpath_ = '~/Library/Caches'
else: # nocover
raise NotImplementedError('Unknown Platform %r' % (sys.platform,))
dpath = normpath(expanduser(dpath_))
return dpath | [
"def",
"platform_cache_dir",
"(",
")",
":",
"if",
"WIN32",
":",
"# nocover",
"dpath_",
"=",
"'~/AppData/Local'",
"elif",
"LINUX",
":",
"# nocover",
"dpath_",
"=",
"'~/.cache'",
"elif",
"DARWIN",
":",
"# nocover",
"dpath_",
"=",
"'~/Library/Caches'",
"else",
":",
"# nocover",
"raise",
"NotImplementedError",
"(",
"'Unknown Platform %r'",
"%",
"(",
"sys",
".",
"platform",
",",
")",
")",
"dpath",
"=",
"normpath",
"(",
"expanduser",
"(",
"dpath_",
")",
")",
"return",
"dpath"
] | Returns a directory which should be writable for any application
This should be used for temporary deletable data. | [
"Returns",
"a",
"directory",
"which",
"should",
"be",
"writable",
"for",
"any",
"application",
"This",
"should",
"be",
"used",
"for",
"temporary",
"deletable",
"data",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cplat.py#L633-L647 | train |
Erotemic/utool | utool/util_cplat.py | __parse_cmd_args | def __parse_cmd_args(args, sudo, shell):
"""
When shell is True, Popen will only accept strings. No tuples
Shell really should not be true.
Returns:
args suitable for subprocess.Popen
I'm not quite sure what those are yet. Plain old string seem to work
well? But I remember needing shlex at some point.
CommandLine:
python -m utool.util_cplat --test-__parse_cmd_args
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> # build test data
>>> args = 'echo "hello world"'
>>> sudo = False
>>> shell = False
>>> # execute function
>>> args = __parse_cmd_args(args, sudo, shell)
>>> # verify results
>>> result = str(args)
>>> print(result)
"""
# Case where tuple is passed in as only argument
if isinstance(args, tuple) and len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if shell:
# When shell is True, ensure args is a string
if isinstance(args, six.string_types):
pass
elif isinstance(args, (list, tuple)) and len(args) > 1:
args = ' '.join(args)
elif isinstance(args, (list, tuple)) and len(args) == 1:
if isinstance(args[0], (tuple, list)):
args = ' '.join(args)
elif isinstance(args[0], six.string_types):
args = args[0]
else:
# When shell is False, ensure args is a tuple
if isinstance(args, six.string_types):
args = shlex.split(args, posix=not WIN32)
elif isinstance(args, (list, tuple)):
if len(args) > 1:
args = tuple(args)
elif len(args) == 1:
if isinstance(args[0], (tuple, list)):
args = tuple(args[0])
elif isinstance(args[0], six.string_types):
args = shlex.split(args[0], posix=not WIN32)
if sudo is True:
if not WIN32:
if shell:
args = 'sudo ' + args
else:
args = tuple(['sudo']) + tuple(args)
#if isinstance(args, six.string_types):
# args = shlex.split(args)
#args = ['sudo'] + args
## using sudo means we need to use a single string I believe
#args = ' '.join(args)
else:
# TODO: strip out sudos
pass
# HACK FOR WINDOWS AGAIN
# makes this command work:
# python -c "import utool as ut; ut.cmd('build\\hesaffexe.exe ' + ut.grab_test_imgpath('star.png'))"
# and this should still work
# python -c "import utool as ut; ut.cmd('build\\hesaffexe.exe', ut.grab_test_imgpath('star.png'))"
if WIN32:
if len(args) == 1 and isinstance(args[0], six.string_types):
args = shlex.split(args[0], posix=not WIN32)
return args | python | def __parse_cmd_args(args, sudo, shell):
"""
When shell is True, Popen will only accept strings. No tuples
Shell really should not be true.
Returns:
args suitable for subprocess.Popen
I'm not quite sure what those are yet. Plain old string seem to work
well? But I remember needing shlex at some point.
CommandLine:
python -m utool.util_cplat --test-__parse_cmd_args
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> # build test data
>>> args = 'echo "hello world"'
>>> sudo = False
>>> shell = False
>>> # execute function
>>> args = __parse_cmd_args(args, sudo, shell)
>>> # verify results
>>> result = str(args)
>>> print(result)
"""
# Case where tuple is passed in as only argument
if isinstance(args, tuple) and len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if shell:
# When shell is True, ensure args is a string
if isinstance(args, six.string_types):
pass
elif isinstance(args, (list, tuple)) and len(args) > 1:
args = ' '.join(args)
elif isinstance(args, (list, tuple)) and len(args) == 1:
if isinstance(args[0], (tuple, list)):
args = ' '.join(args)
elif isinstance(args[0], six.string_types):
args = args[0]
else:
# When shell is False, ensure args is a tuple
if isinstance(args, six.string_types):
args = shlex.split(args, posix=not WIN32)
elif isinstance(args, (list, tuple)):
if len(args) > 1:
args = tuple(args)
elif len(args) == 1:
if isinstance(args[0], (tuple, list)):
args = tuple(args[0])
elif isinstance(args[0], six.string_types):
args = shlex.split(args[0], posix=not WIN32)
if sudo is True:
if not WIN32:
if shell:
args = 'sudo ' + args
else:
args = tuple(['sudo']) + tuple(args)
#if isinstance(args, six.string_types):
# args = shlex.split(args)
#args = ['sudo'] + args
## using sudo means we need to use a single string I believe
#args = ' '.join(args)
else:
# TODO: strip out sudos
pass
# HACK FOR WINDOWS AGAIN
# makes this command work:
# python -c "import utool as ut; ut.cmd('build\\hesaffexe.exe ' + ut.grab_test_imgpath('star.png'))"
# and this should still work
# python -c "import utool as ut; ut.cmd('build\\hesaffexe.exe', ut.grab_test_imgpath('star.png'))"
if WIN32:
if len(args) == 1 and isinstance(args[0], six.string_types):
args = shlex.split(args[0], posix=not WIN32)
return args | [
"def",
"__parse_cmd_args",
"(",
"args",
",",
"sudo",
",",
"shell",
")",
":",
"# Case where tuple is passed in as only argument",
"if",
"isinstance",
"(",
"args",
",",
"tuple",
")",
"and",
"len",
"(",
"args",
")",
"==",
"1",
"and",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"tuple",
")",
":",
"args",
"=",
"args",
"[",
"0",
"]",
"if",
"shell",
":",
"# When shell is True, ensure args is a string",
"if",
"isinstance",
"(",
"args",
",",
"six",
".",
"string_types",
")",
":",
"pass",
"elif",
"isinstance",
"(",
"args",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"len",
"(",
"args",
")",
">",
"1",
":",
"args",
"=",
"' '",
".",
"join",
"(",
"args",
")",
"elif",
"isinstance",
"(",
"args",
",",
"(",
"list",
",",
"tuple",
")",
")",
"and",
"len",
"(",
"args",
")",
"==",
"1",
":",
"if",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"args",
"=",
"' '",
".",
"join",
"(",
"args",
")",
"elif",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"six",
".",
"string_types",
")",
":",
"args",
"=",
"args",
"[",
"0",
"]",
"else",
":",
"# When shell is False, ensure args is a tuple",
"if",
"isinstance",
"(",
"args",
",",
"six",
".",
"string_types",
")",
":",
"args",
"=",
"shlex",
".",
"split",
"(",
"args",
",",
"posix",
"=",
"not",
"WIN32",
")",
"elif",
"isinstance",
"(",
"args",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"if",
"len",
"(",
"args",
")",
">",
"1",
":",
"args",
"=",
"tuple",
"(",
"args",
")",
"elif",
"len",
"(",
"args",
")",
"==",
"1",
":",
"if",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"args",
"=",
"tuple",
"(",
"args",
"[",
"0",
"]",
")",
"elif",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"six",
".",
"string_types",
")",
":",
"args",
"=",
"shlex",
".",
"split",
"(",
"args",
"[",
"0",
"]",
",",
"posix",
"=",
"not",
"WIN32",
")",
"if",
"sudo",
"is",
"True",
":",
"if",
"not",
"WIN32",
":",
"if",
"shell",
":",
"args",
"=",
"'sudo '",
"+",
"args",
"else",
":",
"args",
"=",
"tuple",
"(",
"[",
"'sudo'",
"]",
")",
"+",
"tuple",
"(",
"args",
")",
"#if isinstance(args, six.string_types):",
"# args = shlex.split(args)",
"#args = ['sudo'] + args",
"## using sudo means we need to use a single string I believe",
"#args = ' '.join(args)",
"else",
":",
"# TODO: strip out sudos",
"pass",
"# HACK FOR WINDOWS AGAIN",
"# makes this command work:",
"# python -c \"import utool as ut; ut.cmd('build\\\\hesaffexe.exe ' + ut.grab_test_imgpath('star.png'))\"",
"# and this should still work",
"# python -c \"import utool as ut; ut.cmd('build\\\\hesaffexe.exe', ut.grab_test_imgpath('star.png'))\"",
"if",
"WIN32",
":",
"if",
"len",
"(",
"args",
")",
"==",
"1",
"and",
"isinstance",
"(",
"args",
"[",
"0",
"]",
",",
"six",
".",
"string_types",
")",
":",
"args",
"=",
"shlex",
".",
"split",
"(",
"args",
"[",
"0",
"]",
",",
"posix",
"=",
"not",
"WIN32",
")",
"return",
"args"
] | When shell is True, Popen will only accept strings. No tuples
Shell really should not be true.
Returns:
args suitable for subprocess.Popen
I'm not quite sure what those are yet. Plain old string seem to work
well? But I remember needing shlex at some point.
CommandLine:
python -m utool.util_cplat --test-__parse_cmd_args
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> # build test data
>>> args = 'echo "hello world"'
>>> sudo = False
>>> shell = False
>>> # execute function
>>> args = __parse_cmd_args(args, sudo, shell)
>>> # verify results
>>> result = str(args)
>>> print(result) | [
"When",
"shell",
"is",
"True",
"Popen",
"will",
"only",
"accept",
"strings",
".",
"No",
"tuples",
"Shell",
"really",
"should",
"not",
"be",
"true",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cplat.py#L714-L790 | train |
Erotemic/utool | utool/util_cplat.py | cmd2 | def cmd2(command, shell=False, detatch=False, verbose=False, verbout=None):
"""
Trying to clean up cmd
Args:
command (str): string command
shell (bool): if True, process is run in shell
detatch (bool): if True, process is run in background
verbose (int): verbosity mode
verbout (bool): if True, `command` writes to stdout in realtime.
defaults to True iff verbose > 0
Returns:
dict: info - information about command status
"""
import shlex
if isinstance(command, (list, tuple)):
raise ValueError('command tuple not supported yet')
args = shlex.split(command, posix=not WIN32)
if verbose is True:
verbose = 2
if verbout is None:
verbout = verbose >= 1
if verbose >= 2:
print('+=== START CMD2 ===')
print('Command:')
print(command)
if verbout:
print('----')
print('Stdout:')
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=shell,
universal_newlines=True)
if detatch:
info = {'proc': proc}
else:
write_fn = sys.stdout.write
flush_fn = sys.stdout.flush
logged_out = []
for line in _run_process(proc):
#line_ = line if six.PY2 else line.decode('utf-8')
line_ = line if six.PY2 else line
if len(line_) > 0:
if verbout:
write_fn(line_)
flush_fn()
logged_out.append(line)
try:
from utool import util_str # NOQA
# out = '\n'.join(logged_out)
out = ''.join(logged_out)
except UnicodeDecodeError:
from utool import util_str # NOQA
logged_out = util_str.ensure_unicode_strlist(logged_out)
# out = '\n'.join(logged_out)
out = ''.join(logged_out)
# print('logged_out = %r' % (logged_out,))
# raise
(out_, err) = proc.communicate()
ret = proc.wait()
info = {
'out': out,
'err': err,
'ret': ret,
}
if verbose >= 2:
print('L___ END CMD2 ___')
return info | python | def cmd2(command, shell=False, detatch=False, verbose=False, verbout=None):
"""
Trying to clean up cmd
Args:
command (str): string command
shell (bool): if True, process is run in shell
detatch (bool): if True, process is run in background
verbose (int): verbosity mode
verbout (bool): if True, `command` writes to stdout in realtime.
defaults to True iff verbose > 0
Returns:
dict: info - information about command status
"""
import shlex
if isinstance(command, (list, tuple)):
raise ValueError('command tuple not supported yet')
args = shlex.split(command, posix=not WIN32)
if verbose is True:
verbose = 2
if verbout is None:
verbout = verbose >= 1
if verbose >= 2:
print('+=== START CMD2 ===')
print('Command:')
print(command)
if verbout:
print('----')
print('Stdout:')
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=shell,
universal_newlines=True)
if detatch:
info = {'proc': proc}
else:
write_fn = sys.stdout.write
flush_fn = sys.stdout.flush
logged_out = []
for line in _run_process(proc):
#line_ = line if six.PY2 else line.decode('utf-8')
line_ = line if six.PY2 else line
if len(line_) > 0:
if verbout:
write_fn(line_)
flush_fn()
logged_out.append(line)
try:
from utool import util_str # NOQA
# out = '\n'.join(logged_out)
out = ''.join(logged_out)
except UnicodeDecodeError:
from utool import util_str # NOQA
logged_out = util_str.ensure_unicode_strlist(logged_out)
# out = '\n'.join(logged_out)
out = ''.join(logged_out)
# print('logged_out = %r' % (logged_out,))
# raise
(out_, err) = proc.communicate()
ret = proc.wait()
info = {
'out': out,
'err': err,
'ret': ret,
}
if verbose >= 2:
print('L___ END CMD2 ___')
return info | [
"def",
"cmd2",
"(",
"command",
",",
"shell",
"=",
"False",
",",
"detatch",
"=",
"False",
",",
"verbose",
"=",
"False",
",",
"verbout",
"=",
"None",
")",
":",
"import",
"shlex",
"if",
"isinstance",
"(",
"command",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"raise",
"ValueError",
"(",
"'command tuple not supported yet'",
")",
"args",
"=",
"shlex",
".",
"split",
"(",
"command",
",",
"posix",
"=",
"not",
"WIN32",
")",
"if",
"verbose",
"is",
"True",
":",
"verbose",
"=",
"2",
"if",
"verbout",
"is",
"None",
":",
"verbout",
"=",
"verbose",
">=",
"1",
"if",
"verbose",
">=",
"2",
":",
"print",
"(",
"'+=== START CMD2 ==='",
")",
"print",
"(",
"'Command:'",
")",
"print",
"(",
"command",
")",
"if",
"verbout",
":",
"print",
"(",
"'----'",
")",
"print",
"(",
"'Stdout:'",
")",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"args",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
",",
"shell",
"=",
"shell",
",",
"universal_newlines",
"=",
"True",
")",
"if",
"detatch",
":",
"info",
"=",
"{",
"'proc'",
":",
"proc",
"}",
"else",
":",
"write_fn",
"=",
"sys",
".",
"stdout",
".",
"write",
"flush_fn",
"=",
"sys",
".",
"stdout",
".",
"flush",
"logged_out",
"=",
"[",
"]",
"for",
"line",
"in",
"_run_process",
"(",
"proc",
")",
":",
"#line_ = line if six.PY2 else line.decode('utf-8')",
"line_",
"=",
"line",
"if",
"six",
".",
"PY2",
"else",
"line",
"if",
"len",
"(",
"line_",
")",
">",
"0",
":",
"if",
"verbout",
":",
"write_fn",
"(",
"line_",
")",
"flush_fn",
"(",
")",
"logged_out",
".",
"append",
"(",
"line",
")",
"try",
":",
"from",
"utool",
"import",
"util_str",
"# NOQA",
"# out = '\\n'.join(logged_out)",
"out",
"=",
"''",
".",
"join",
"(",
"logged_out",
")",
"except",
"UnicodeDecodeError",
":",
"from",
"utool",
"import",
"util_str",
"# NOQA",
"logged_out",
"=",
"util_str",
".",
"ensure_unicode_strlist",
"(",
"logged_out",
")",
"# out = '\\n'.join(logged_out)",
"out",
"=",
"''",
".",
"join",
"(",
"logged_out",
")",
"# print('logged_out = %r' % (logged_out,))",
"# raise",
"(",
"out_",
",",
"err",
")",
"=",
"proc",
".",
"communicate",
"(",
")",
"ret",
"=",
"proc",
".",
"wait",
"(",
")",
"info",
"=",
"{",
"'out'",
":",
"out",
",",
"'err'",
":",
"err",
",",
"'ret'",
":",
"ret",
",",
"}",
"if",
"verbose",
">=",
"2",
":",
"print",
"(",
"'L___ END CMD2 ___'",
")",
"return",
"info"
] | Trying to clean up cmd
Args:
command (str): string command
shell (bool): if True, process is run in shell
detatch (bool): if True, process is run in background
verbose (int): verbosity mode
verbout (bool): if True, `command` writes to stdout in realtime.
defaults to True iff verbose > 0
Returns:
dict: info - information about command status | [
"Trying",
"to",
"clean",
"up",
"cmd"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cplat.py#L1005-L1072 | train |
Erotemic/utool | utool/util_cplat.py | search_env_paths | def search_env_paths(fname, key_list=None, verbose=None):
r"""
Searches your PATH to see if fname exists
Args:
fname (str): file name to search for (can be glob pattern)
CommandLine:
python -m utool search_env_paths --fname msvcr*.dll
python -m utool search_env_paths --fname '*flann*'
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> import utool as ut
>>> fname = 'opencv2/highgui/libopencv_highgui.so'
>>> fname = ut.get_argval('--fname', default='*')
>>> print('fname = %r' % (fname,))
>>> key_list = None # ['PATH']
>>> found = search_env_paths(fname, key_list)
>>> print(ut.repr4(found, nl=True, strvals=True))
Ignore:
OpenCV_DIR:PATH={share_opencv}
OpenCV_CONFIG_PATH:FILEPATH={share_opencv}
"""
import utool as ut
# from os.path import join
if key_list is None:
key_list = [key for key in os.environ if key.find('PATH') > -1]
print('key_list = %r' % (key_list,))
found = ut.ddict(list)
for key in key_list:
dpath_list = os.environ[key].split(os.pathsep)
for dpath in dpath_list:
#if verbose:
# print('dpath = %r' % (dpath,))
# testname = join(dpath, fname)
matches = ut.glob(dpath, fname)
found[key].extend(matches)
#import fnmatch
#import utool
#utool.embed()
#if ut.checkpath(testname, verbose=False):
# if verbose:
# print('Found in key=%r' % (key,))
# ut.checkpath(testname, verbose=True, info=True)
# found += [testname]
return dict(found) | python | def search_env_paths(fname, key_list=None, verbose=None):
r"""
Searches your PATH to see if fname exists
Args:
fname (str): file name to search for (can be glob pattern)
CommandLine:
python -m utool search_env_paths --fname msvcr*.dll
python -m utool search_env_paths --fname '*flann*'
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> import utool as ut
>>> fname = 'opencv2/highgui/libopencv_highgui.so'
>>> fname = ut.get_argval('--fname', default='*')
>>> print('fname = %r' % (fname,))
>>> key_list = None # ['PATH']
>>> found = search_env_paths(fname, key_list)
>>> print(ut.repr4(found, nl=True, strvals=True))
Ignore:
OpenCV_DIR:PATH={share_opencv}
OpenCV_CONFIG_PATH:FILEPATH={share_opencv}
"""
import utool as ut
# from os.path import join
if key_list is None:
key_list = [key for key in os.environ if key.find('PATH') > -1]
print('key_list = %r' % (key_list,))
found = ut.ddict(list)
for key in key_list:
dpath_list = os.environ[key].split(os.pathsep)
for dpath in dpath_list:
#if verbose:
# print('dpath = %r' % (dpath,))
# testname = join(dpath, fname)
matches = ut.glob(dpath, fname)
found[key].extend(matches)
#import fnmatch
#import utool
#utool.embed()
#if ut.checkpath(testname, verbose=False):
# if verbose:
# print('Found in key=%r' % (key,))
# ut.checkpath(testname, verbose=True, info=True)
# found += [testname]
return dict(found) | [
"def",
"search_env_paths",
"(",
"fname",
",",
"key_list",
"=",
"None",
",",
"verbose",
"=",
"None",
")",
":",
"import",
"utool",
"as",
"ut",
"# from os.path import join",
"if",
"key_list",
"is",
"None",
":",
"key_list",
"=",
"[",
"key",
"for",
"key",
"in",
"os",
".",
"environ",
"if",
"key",
".",
"find",
"(",
"'PATH'",
")",
">",
"-",
"1",
"]",
"print",
"(",
"'key_list = %r'",
"%",
"(",
"key_list",
",",
")",
")",
"found",
"=",
"ut",
".",
"ddict",
"(",
"list",
")",
"for",
"key",
"in",
"key_list",
":",
"dpath_list",
"=",
"os",
".",
"environ",
"[",
"key",
"]",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"for",
"dpath",
"in",
"dpath_list",
":",
"#if verbose:",
"# print('dpath = %r' % (dpath,))",
"# testname = join(dpath, fname)",
"matches",
"=",
"ut",
".",
"glob",
"(",
"dpath",
",",
"fname",
")",
"found",
"[",
"key",
"]",
".",
"extend",
"(",
"matches",
")",
"#import fnmatch",
"#import utool",
"#utool.embed()",
"#if ut.checkpath(testname, verbose=False):",
"# if verbose:",
"# print('Found in key=%r' % (key,))",
"# ut.checkpath(testname, verbose=True, info=True)",
"# found += [testname]",
"return",
"dict",
"(",
"found",
")"
] | r"""
Searches your PATH to see if fname exists
Args:
fname (str): file name to search for (can be glob pattern)
CommandLine:
python -m utool search_env_paths --fname msvcr*.dll
python -m utool search_env_paths --fname '*flann*'
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> import utool as ut
>>> fname = 'opencv2/highgui/libopencv_highgui.so'
>>> fname = ut.get_argval('--fname', default='*')
>>> print('fname = %r' % (fname,))
>>> key_list = None # ['PATH']
>>> found = search_env_paths(fname, key_list)
>>> print(ut.repr4(found, nl=True, strvals=True))
Ignore:
OpenCV_DIR:PATH={share_opencv}
OpenCV_CONFIG_PATH:FILEPATH={share_opencv} | [
"r",
"Searches",
"your",
"PATH",
"to",
"see",
"if",
"fname",
"exists"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cplat.py#L1187-L1238 | train |
Erotemic/utool | utool/util_cplat.py | change_term_title | def change_term_title(title):
"""
only works on unix systems only tested on Ubuntu GNOME changes text on
terminal title for identifying debugging tasks.
The title will remain until python exists
Args:
title (str):
References:
http://stackoverflow.com/questions/5343265/setting-title-for-tabs-in-terminator-console-application-in-ubuntu/8850484#8850484
CommandLine:
python -m utool change_term_title
echo -en "\033]0;newtitle\a"
printf "\e]2;newtitle\a";
echo -en "\033]0;DocTest /home/joncrall/code/ibeis/ibeis.algo.graph.core.py --test-AnnotInference._make_state_delta\a"
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> title = 'change title test'
>>> result = change_term_title(title)
>>> print(result)
"""
if True:
# Disabled
return
if not WIN32:
#print("CHANGE TERM TITLE to %r" % (title,))
if title:
#os.environ['PS1'] = os.environ['PS1'] + '''"\e]2;\"''' + title + '''\"\a"'''
cmd_str = r'''echo -en "\033]0;''' + title + '''\a"'''
os.system(cmd_str) | python | def change_term_title(title):
"""
only works on unix systems only tested on Ubuntu GNOME changes text on
terminal title for identifying debugging tasks.
The title will remain until python exists
Args:
title (str):
References:
http://stackoverflow.com/questions/5343265/setting-title-for-tabs-in-terminator-console-application-in-ubuntu/8850484#8850484
CommandLine:
python -m utool change_term_title
echo -en "\033]0;newtitle\a"
printf "\e]2;newtitle\a";
echo -en "\033]0;DocTest /home/joncrall/code/ibeis/ibeis.algo.graph.core.py --test-AnnotInference._make_state_delta\a"
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> title = 'change title test'
>>> result = change_term_title(title)
>>> print(result)
"""
if True:
# Disabled
return
if not WIN32:
#print("CHANGE TERM TITLE to %r" % (title,))
if title:
#os.environ['PS1'] = os.environ['PS1'] + '''"\e]2;\"''' + title + '''\"\a"'''
cmd_str = r'''echo -en "\033]0;''' + title + '''\a"'''
os.system(cmd_str) | [
"def",
"change_term_title",
"(",
"title",
")",
":",
"if",
"True",
":",
"# Disabled",
"return",
"if",
"not",
"WIN32",
":",
"#print(\"CHANGE TERM TITLE to %r\" % (title,))",
"if",
"title",
":",
"#os.environ['PS1'] = os.environ['PS1'] + '''\"\\e]2;\\\"''' + title + '''\\\"\\a\"'''",
"cmd_str",
"=",
"r'''echo -en \"\\033]0;'''",
"+",
"title",
"+",
"'''\\a\"'''",
"os",
".",
"system",
"(",
"cmd_str",
")"
] | only works on unix systems only tested on Ubuntu GNOME changes text on
terminal title for identifying debugging tasks.
The title will remain until python exists
Args:
title (str):
References:
http://stackoverflow.com/questions/5343265/setting-title-for-tabs-in-terminator-console-application-in-ubuntu/8850484#8850484
CommandLine:
python -m utool change_term_title
echo -en "\033]0;newtitle\a"
printf "\e]2;newtitle\a";
echo -en "\033]0;DocTest /home/joncrall/code/ibeis/ibeis.algo.graph.core.py --test-AnnotInference._make_state_delta\a"
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> title = 'change title test'
>>> result = change_term_title(title)
>>> print(result) | [
"only",
"works",
"on",
"unix",
"systems",
"only",
"tested",
"on",
"Ubuntu",
"GNOME",
"changes",
"text",
"on",
"terminal",
"title",
"for",
"identifying",
"debugging",
"tasks",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cplat.py#L1264-L1300 | train |
Erotemic/utool | utool/util_cplat.py | unload_module | def unload_module(modname):
"""
WARNING POTENTIALLY DANGEROUS AND MAY NOT WORK
References:
http://stackoverflow.com/questions/437589/how-do-i-unload-reload-a-python-module
CommandLine:
python -m utool.util_cplat --test-unload_module
Example:
>>> # DISABLE_DOCTEST
>>> import sys, gc # NOQA
>>> import pyhesaff
>>> import utool as ut
>>> modname = 'pyhesaff'
>>> print('%s refcount=%r' % (modname, sys.getrefcount(pyhesaff),))
>>> #referrer_list = gc.get_referrers(sys.modules[modname])
>>> #print('referrer_list = %s' % (ut.repr4(referrer_list),))
>>> ut.unload_module(modname)
>>> assert pyhesaff is None
"""
import sys
import gc
if modname in sys.modules:
referrer_list = gc.get_referrers(sys.modules[modname])
#module = sys.modules[modname]
for referer in referrer_list:
if referer is not sys.modules:
referer[modname] = None
#del referer[modname]
#sys.modules[modname] = module
#del module
refcount = sys.getrefcount(sys.modules[modname])
print('%s refcount=%r' % (modname, refcount))
del sys.modules[modname] | python | def unload_module(modname):
"""
WARNING POTENTIALLY DANGEROUS AND MAY NOT WORK
References:
http://stackoverflow.com/questions/437589/how-do-i-unload-reload-a-python-module
CommandLine:
python -m utool.util_cplat --test-unload_module
Example:
>>> # DISABLE_DOCTEST
>>> import sys, gc # NOQA
>>> import pyhesaff
>>> import utool as ut
>>> modname = 'pyhesaff'
>>> print('%s refcount=%r' % (modname, sys.getrefcount(pyhesaff),))
>>> #referrer_list = gc.get_referrers(sys.modules[modname])
>>> #print('referrer_list = %s' % (ut.repr4(referrer_list),))
>>> ut.unload_module(modname)
>>> assert pyhesaff is None
"""
import sys
import gc
if modname in sys.modules:
referrer_list = gc.get_referrers(sys.modules[modname])
#module = sys.modules[modname]
for referer in referrer_list:
if referer is not sys.modules:
referer[modname] = None
#del referer[modname]
#sys.modules[modname] = module
#del module
refcount = sys.getrefcount(sys.modules[modname])
print('%s refcount=%r' % (modname, refcount))
del sys.modules[modname] | [
"def",
"unload_module",
"(",
"modname",
")",
":",
"import",
"sys",
"import",
"gc",
"if",
"modname",
"in",
"sys",
".",
"modules",
":",
"referrer_list",
"=",
"gc",
".",
"get_referrers",
"(",
"sys",
".",
"modules",
"[",
"modname",
"]",
")",
"#module = sys.modules[modname]",
"for",
"referer",
"in",
"referrer_list",
":",
"if",
"referer",
"is",
"not",
"sys",
".",
"modules",
":",
"referer",
"[",
"modname",
"]",
"=",
"None",
"#del referer[modname]",
"#sys.modules[modname] = module",
"#del module",
"refcount",
"=",
"sys",
".",
"getrefcount",
"(",
"sys",
".",
"modules",
"[",
"modname",
"]",
")",
"print",
"(",
"'%s refcount=%r'",
"%",
"(",
"modname",
",",
"refcount",
")",
")",
"del",
"sys",
".",
"modules",
"[",
"modname",
"]"
] | WARNING POTENTIALLY DANGEROUS AND MAY NOT WORK
References:
http://stackoverflow.com/questions/437589/how-do-i-unload-reload-a-python-module
CommandLine:
python -m utool.util_cplat --test-unload_module
Example:
>>> # DISABLE_DOCTEST
>>> import sys, gc # NOQA
>>> import pyhesaff
>>> import utool as ut
>>> modname = 'pyhesaff'
>>> print('%s refcount=%r' % (modname, sys.getrefcount(pyhesaff),))
>>> #referrer_list = gc.get_referrers(sys.modules[modname])
>>> #print('referrer_list = %s' % (ut.repr4(referrer_list),))
>>> ut.unload_module(modname)
>>> assert pyhesaff is None | [
"WARNING",
"POTENTIALLY",
"DANGEROUS",
"AND",
"MAY",
"NOT",
"WORK"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cplat.py#L1450-L1486 | train |
glormph/msstitch | src/app/actions/shared/pepprot_isoquant.py | base_add_isoquant_data | def base_add_isoquant_data(features, quantfeatures, acc_col, quantacc_col,
quantfields):
"""Generic function that takes a peptide or protein table and adds
quant data from ANOTHER such table."""
quant_map = get_quantmap(quantfeatures, quantacc_col, quantfields)
for feature in features:
feat_acc = feature[acc_col]
outfeat = {k: v for k, v in feature.items()}
try:
outfeat.update(quant_map[feat_acc])
except KeyError:
outfeat.update({field: 'NA' for field in quantfields})
yield outfeat | python | def base_add_isoquant_data(features, quantfeatures, acc_col, quantacc_col,
quantfields):
"""Generic function that takes a peptide or protein table and adds
quant data from ANOTHER such table."""
quant_map = get_quantmap(quantfeatures, quantacc_col, quantfields)
for feature in features:
feat_acc = feature[acc_col]
outfeat = {k: v for k, v in feature.items()}
try:
outfeat.update(quant_map[feat_acc])
except KeyError:
outfeat.update({field: 'NA' for field in quantfields})
yield outfeat | [
"def",
"base_add_isoquant_data",
"(",
"features",
",",
"quantfeatures",
",",
"acc_col",
",",
"quantacc_col",
",",
"quantfields",
")",
":",
"quant_map",
"=",
"get_quantmap",
"(",
"quantfeatures",
",",
"quantacc_col",
",",
"quantfields",
")",
"for",
"feature",
"in",
"features",
":",
"feat_acc",
"=",
"feature",
"[",
"acc_col",
"]",
"outfeat",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"feature",
".",
"items",
"(",
")",
"}",
"try",
":",
"outfeat",
".",
"update",
"(",
"quant_map",
"[",
"feat_acc",
"]",
")",
"except",
"KeyError",
":",
"outfeat",
".",
"update",
"(",
"{",
"field",
":",
"'NA'",
"for",
"field",
"in",
"quantfields",
"}",
")",
"yield",
"outfeat"
] | Generic function that takes a peptide or protein table and adds
quant data from ANOTHER such table. | [
"Generic",
"function",
"that",
"takes",
"a",
"peptide",
"or",
"protein",
"table",
"and",
"adds",
"quant",
"data",
"from",
"ANOTHER",
"such",
"table",
"."
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/shared/pepprot_isoquant.py#L4-L16 | train |
glormph/msstitch | src/app/actions/shared/pepprot_isoquant.py | get_quantmap | def get_quantmap(features, acc_col, quantfields):
"""Runs through proteins that are in a quanted protein table, extracts
and maps their information based on the quantfields list input.
Map is a dict with protein_accessions as keys."""
qmap = {}
for feature in features:
feat_acc = feature.pop(acc_col)
qmap[feat_acc] = {qf: feature[qf] for qf in quantfields}
return qmap | python | def get_quantmap(features, acc_col, quantfields):
"""Runs through proteins that are in a quanted protein table, extracts
and maps their information based on the quantfields list input.
Map is a dict with protein_accessions as keys."""
qmap = {}
for feature in features:
feat_acc = feature.pop(acc_col)
qmap[feat_acc] = {qf: feature[qf] for qf in quantfields}
return qmap | [
"def",
"get_quantmap",
"(",
"features",
",",
"acc_col",
",",
"quantfields",
")",
":",
"qmap",
"=",
"{",
"}",
"for",
"feature",
"in",
"features",
":",
"feat_acc",
"=",
"feature",
".",
"pop",
"(",
"acc_col",
")",
"qmap",
"[",
"feat_acc",
"]",
"=",
"{",
"qf",
":",
"feature",
"[",
"qf",
"]",
"for",
"qf",
"in",
"quantfields",
"}",
"return",
"qmap"
] | Runs through proteins that are in a quanted protein table, extracts
and maps their information based on the quantfields list input.
Map is a dict with protein_accessions as keys. | [
"Runs",
"through",
"proteins",
"that",
"are",
"in",
"a",
"quanted",
"protein",
"table",
"extracts",
"and",
"maps",
"their",
"information",
"based",
"on",
"the",
"quantfields",
"list",
"input",
".",
"Map",
"is",
"a",
"dict",
"with",
"protein_accessions",
"as",
"keys",
"."
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/shared/pepprot_isoquant.py#L19-L27 | train |
Erotemic/utool | utool/util_gridsearch.py | partition_varied_cfg_list | def partition_varied_cfg_list(cfg_list, default_cfg=None, recursive=False):
r"""
Separates varied from non-varied parameters in a list of configs
TODO: partition nested configs
CommandLine:
python -m utool.util_gridsearch --exec-partition_varied_cfg_list:0
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> cfg_list = [{'f': 1, 'b': 1}, {'f': 2, 'b': 1}, {'f': 3, 'b': 1, 'z': 4}]
>>> nonvaried_cfg, varied_cfg_list = partition_varied_cfg_list(cfg_list)
>>> result = ut.repr4({'nonvaried_cfg': nonvaried_cfg,
>>> 'varied_cfg_list': varied_cfg_list}, explicit=1, nobr=True, nl=1)
>>> print(result)
nonvaried_cfg={'b': 1},
varied_cfg_list=[{'f': 1}, {'f': 2}, {'f': 3, 'z': 4}],
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> cfg_list = [{'q1': 1, 'f1': {'a2': {'x3': 1, 'y3': 2}, 'b2': 1}}, {'q1': 1, 'f1': {'a2': {'x3': 1, 'y3': 1}, 'b2': 1}, 'e1': 1}]
>>> print(ut.repr4(cfg_list, nl=1))
>>> nonvaried_cfg, varied_cfg_list = partition_varied_cfg_list(cfg_list, recursive=True)
>>> result = ut.repr4({'nonvaried_cfg': nonvaried_cfg,
>>> 'varied_cfg_list': varied_cfg_list}, explicit=1, nobr=True, nl=1)
>>> print(result)
nonvaried_cfg={'f1': {'a2': {'x3': 1}, 'b2': 1}, 'q1': 1},
varied_cfg_list=[{'f1': {'a2': {'y3': 2}}}, {'e1': 1, 'f1': {'a2': {'y3': 1}}}],
"""
import utool as ut
if default_cfg is None:
nonvaried_cfg = reduce(ut.dict_intersection, cfg_list)
else:
nonvaried_cfg = reduce(ut.dict_intersection, [default_cfg] + cfg_list)
nonvaried_keys = list(nonvaried_cfg.keys())
varied_cfg_list = [
ut.delete_dict_keys(cfg.copy(), nonvaried_keys)
for cfg in cfg_list]
if recursive:
# Find which varied keys have dict values
varied_keys = list(set([key for cfg in varied_cfg_list for key in cfg]))
varied_vals_list = [[cfg[key] for cfg in varied_cfg_list if key in cfg] for key in varied_keys]
for key, varied_vals in zip(varied_keys, varied_vals_list):
if len(varied_vals) == len(cfg_list):
if all([isinstance(val, dict) for val in varied_vals]):
nonvaried_subdict, varied_subdicts = partition_varied_cfg_list(varied_vals, recursive=recursive)
nonvaried_cfg[key] = nonvaried_subdict
for cfg, subdict in zip(varied_cfg_list, varied_subdicts):
cfg[key] = subdict
return nonvaried_cfg, varied_cfg_list | python | def partition_varied_cfg_list(cfg_list, default_cfg=None, recursive=False):
r"""
Separates varied from non-varied parameters in a list of configs
TODO: partition nested configs
CommandLine:
python -m utool.util_gridsearch --exec-partition_varied_cfg_list:0
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> cfg_list = [{'f': 1, 'b': 1}, {'f': 2, 'b': 1}, {'f': 3, 'b': 1, 'z': 4}]
>>> nonvaried_cfg, varied_cfg_list = partition_varied_cfg_list(cfg_list)
>>> result = ut.repr4({'nonvaried_cfg': nonvaried_cfg,
>>> 'varied_cfg_list': varied_cfg_list}, explicit=1, nobr=True, nl=1)
>>> print(result)
nonvaried_cfg={'b': 1},
varied_cfg_list=[{'f': 1}, {'f': 2}, {'f': 3, 'z': 4}],
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> cfg_list = [{'q1': 1, 'f1': {'a2': {'x3': 1, 'y3': 2}, 'b2': 1}}, {'q1': 1, 'f1': {'a2': {'x3': 1, 'y3': 1}, 'b2': 1}, 'e1': 1}]
>>> print(ut.repr4(cfg_list, nl=1))
>>> nonvaried_cfg, varied_cfg_list = partition_varied_cfg_list(cfg_list, recursive=True)
>>> result = ut.repr4({'nonvaried_cfg': nonvaried_cfg,
>>> 'varied_cfg_list': varied_cfg_list}, explicit=1, nobr=True, nl=1)
>>> print(result)
nonvaried_cfg={'f1': {'a2': {'x3': 1}, 'b2': 1}, 'q1': 1},
varied_cfg_list=[{'f1': {'a2': {'y3': 2}}}, {'e1': 1, 'f1': {'a2': {'y3': 1}}}],
"""
import utool as ut
if default_cfg is None:
nonvaried_cfg = reduce(ut.dict_intersection, cfg_list)
else:
nonvaried_cfg = reduce(ut.dict_intersection, [default_cfg] + cfg_list)
nonvaried_keys = list(nonvaried_cfg.keys())
varied_cfg_list = [
ut.delete_dict_keys(cfg.copy(), nonvaried_keys)
for cfg in cfg_list]
if recursive:
# Find which varied keys have dict values
varied_keys = list(set([key for cfg in varied_cfg_list for key in cfg]))
varied_vals_list = [[cfg[key] for cfg in varied_cfg_list if key in cfg] for key in varied_keys]
for key, varied_vals in zip(varied_keys, varied_vals_list):
if len(varied_vals) == len(cfg_list):
if all([isinstance(val, dict) for val in varied_vals]):
nonvaried_subdict, varied_subdicts = partition_varied_cfg_list(varied_vals, recursive=recursive)
nonvaried_cfg[key] = nonvaried_subdict
for cfg, subdict in zip(varied_cfg_list, varied_subdicts):
cfg[key] = subdict
return nonvaried_cfg, varied_cfg_list | [
"def",
"partition_varied_cfg_list",
"(",
"cfg_list",
",",
"default_cfg",
"=",
"None",
",",
"recursive",
"=",
"False",
")",
":",
"import",
"utool",
"as",
"ut",
"if",
"default_cfg",
"is",
"None",
":",
"nonvaried_cfg",
"=",
"reduce",
"(",
"ut",
".",
"dict_intersection",
",",
"cfg_list",
")",
"else",
":",
"nonvaried_cfg",
"=",
"reduce",
"(",
"ut",
".",
"dict_intersection",
",",
"[",
"default_cfg",
"]",
"+",
"cfg_list",
")",
"nonvaried_keys",
"=",
"list",
"(",
"nonvaried_cfg",
".",
"keys",
"(",
")",
")",
"varied_cfg_list",
"=",
"[",
"ut",
".",
"delete_dict_keys",
"(",
"cfg",
".",
"copy",
"(",
")",
",",
"nonvaried_keys",
")",
"for",
"cfg",
"in",
"cfg_list",
"]",
"if",
"recursive",
":",
"# Find which varied keys have dict values",
"varied_keys",
"=",
"list",
"(",
"set",
"(",
"[",
"key",
"for",
"cfg",
"in",
"varied_cfg_list",
"for",
"key",
"in",
"cfg",
"]",
")",
")",
"varied_vals_list",
"=",
"[",
"[",
"cfg",
"[",
"key",
"]",
"for",
"cfg",
"in",
"varied_cfg_list",
"if",
"key",
"in",
"cfg",
"]",
"for",
"key",
"in",
"varied_keys",
"]",
"for",
"key",
",",
"varied_vals",
"in",
"zip",
"(",
"varied_keys",
",",
"varied_vals_list",
")",
":",
"if",
"len",
"(",
"varied_vals",
")",
"==",
"len",
"(",
"cfg_list",
")",
":",
"if",
"all",
"(",
"[",
"isinstance",
"(",
"val",
",",
"dict",
")",
"for",
"val",
"in",
"varied_vals",
"]",
")",
":",
"nonvaried_subdict",
",",
"varied_subdicts",
"=",
"partition_varied_cfg_list",
"(",
"varied_vals",
",",
"recursive",
"=",
"recursive",
")",
"nonvaried_cfg",
"[",
"key",
"]",
"=",
"nonvaried_subdict",
"for",
"cfg",
",",
"subdict",
"in",
"zip",
"(",
"varied_cfg_list",
",",
"varied_subdicts",
")",
":",
"cfg",
"[",
"key",
"]",
"=",
"subdict",
"return",
"nonvaried_cfg",
",",
"varied_cfg_list"
] | r"""
Separates varied from non-varied parameters in a list of configs
TODO: partition nested configs
CommandLine:
python -m utool.util_gridsearch --exec-partition_varied_cfg_list:0
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> cfg_list = [{'f': 1, 'b': 1}, {'f': 2, 'b': 1}, {'f': 3, 'b': 1, 'z': 4}]
>>> nonvaried_cfg, varied_cfg_list = partition_varied_cfg_list(cfg_list)
>>> result = ut.repr4({'nonvaried_cfg': nonvaried_cfg,
>>> 'varied_cfg_list': varied_cfg_list}, explicit=1, nobr=True, nl=1)
>>> print(result)
nonvaried_cfg={'b': 1},
varied_cfg_list=[{'f': 1}, {'f': 2}, {'f': 3, 'z': 4}],
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> cfg_list = [{'q1': 1, 'f1': {'a2': {'x3': 1, 'y3': 2}, 'b2': 1}}, {'q1': 1, 'f1': {'a2': {'x3': 1, 'y3': 1}, 'b2': 1}, 'e1': 1}]
>>> print(ut.repr4(cfg_list, nl=1))
>>> nonvaried_cfg, varied_cfg_list = partition_varied_cfg_list(cfg_list, recursive=True)
>>> result = ut.repr4({'nonvaried_cfg': nonvaried_cfg,
>>> 'varied_cfg_list': varied_cfg_list}, explicit=1, nobr=True, nl=1)
>>> print(result)
nonvaried_cfg={'f1': {'a2': {'x3': 1}, 'b2': 1}, 'q1': 1},
varied_cfg_list=[{'f1': {'a2': {'y3': 2}}}, {'e1': 1, 'f1': {'a2': {'y3': 1}}}], | [
"r",
"Separates",
"varied",
"from",
"non",
"-",
"varied",
"parameters",
"in",
"a",
"list",
"of",
"configs"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_gridsearch.py#L210-L264 | train |
Erotemic/utool | utool/util_gridsearch.py | get_cfg_lbl | def get_cfg_lbl(cfg, name=None, nonlbl_keys=INTERNAL_CFGKEYS, key_order=None,
with_name=True, default_cfg=None, sep=''):
r"""
Formats a flat configuration dict into a short string label. This is useful
for re-creating command line strings.
Args:
cfg (dict):
name (str): (default = None)
nonlbl_keys (list): (default = INTERNAL_CFGKEYS)
Returns:
str: cfg_lbl
CommandLine:
python -m utool.util_gridsearch get_cfg_lbl
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> cfg = {'_cfgname': 'test', 'var1': 'val1', 'var2': 'val2'}
>>> name = None
>>> nonlbl_keys = ['_cfgstr', '_cfgname', '_cfgtype', '_cfgindex']
>>> cfg_lbl = get_cfg_lbl(cfg, name, nonlbl_keys)
>>> result = ('cfg_lbl = %s' % (six.text_type(cfg_lbl),))
>>> print(result)
cfg_lbl = test:var1=val1,var2=val2
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> cfg = {'var1': 'val1', 'var2': 'val2'}
>>> default_cfg = {'var2': 'val1', 'var1': 'val1'}
>>> name = None
>>> cfg_lbl = get_cfg_lbl(cfg, name, default_cfg=default_cfg)
>>> result = ('cfg_lbl = %s' % (six.text_type(cfg_lbl),))
>>> print(result)
cfg_lbl = :var2=val2
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> cfg = {'_cfgname': 'test:K=[1,2,3]', 'K': '1'}
>>> name = None
>>> nonlbl_keys = ['_cfgstr', '_cfgname', '_cfgtype', '_cfgindex']
>>> cfg_lbl = get_cfg_lbl(cfg, name, nonlbl_keys)
>>> result = ('cfg_lbl = %s' % (six.text_type(cfg_lbl),))
>>> print(result)
cfg_lbl = test:K=1
"""
import utool as ut
if name is None:
name = cfg.get('_cfgname', '')
if default_cfg is not None:
# Remove defaulted labels
cfg = ut.partition_varied_cfg_list([cfg], default_cfg)[1][0]
# remove keys that should not belong to the label
_clean_cfg = ut.delete_keys(cfg.copy(), nonlbl_keys)
_lbl = ut.repr4(_clean_cfg, explicit=True, nl=False, strvals=True,
key_order=key_order, itemsep=sep)
# _search = ['dict(', ')', ' ']
_search = ['dict(', ')']
_repl = [''] * len(_search)
_lbl = ut.multi_replace(_lbl, _search, _repl).rstrip(',')
if not with_name:
return _lbl
if NAMEVARSEP in name:
# hack for when name contains a little bit of the _lbl
# VERY HACKY TO PARSE OUT PARTS OF THE GIVEN NAME.
hacked_name, _cfgstr, _ = parse_cfgstr_name_options(name)
_cfgstr_options_list = re.split(
r',\s*' + ut.negative_lookahead(r'[^\[\]]*\]'), _cfgstr)
#cfgstr_options_list = cfgopt_strs.split(',')
_cfg_options = ut.parse_cfgstr_list(
_cfgstr_options_list, smartcast=False, oldmode=False)
#
ut.delete_keys(_cfg_options, cfg.keys())
_preflbl = ut.repr4(_cfg_options, explicit=True, nl=False, strvals=True)
_preflbl = ut.multi_replace(_preflbl, _search, _repl).rstrip(',')
hacked_name += NAMEVARSEP + _preflbl
###
cfg_lbl = hacked_name + _lbl
else:
cfg_lbl = name + NAMEVARSEP + _lbl
return cfg_lbl | python | def get_cfg_lbl(cfg, name=None, nonlbl_keys=INTERNAL_CFGKEYS, key_order=None,
with_name=True, default_cfg=None, sep=''):
r"""
Formats a flat configuration dict into a short string label. This is useful
for re-creating command line strings.
Args:
cfg (dict):
name (str): (default = None)
nonlbl_keys (list): (default = INTERNAL_CFGKEYS)
Returns:
str: cfg_lbl
CommandLine:
python -m utool.util_gridsearch get_cfg_lbl
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> cfg = {'_cfgname': 'test', 'var1': 'val1', 'var2': 'val2'}
>>> name = None
>>> nonlbl_keys = ['_cfgstr', '_cfgname', '_cfgtype', '_cfgindex']
>>> cfg_lbl = get_cfg_lbl(cfg, name, nonlbl_keys)
>>> result = ('cfg_lbl = %s' % (six.text_type(cfg_lbl),))
>>> print(result)
cfg_lbl = test:var1=val1,var2=val2
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> cfg = {'var1': 'val1', 'var2': 'val2'}
>>> default_cfg = {'var2': 'val1', 'var1': 'val1'}
>>> name = None
>>> cfg_lbl = get_cfg_lbl(cfg, name, default_cfg=default_cfg)
>>> result = ('cfg_lbl = %s' % (six.text_type(cfg_lbl),))
>>> print(result)
cfg_lbl = :var2=val2
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> cfg = {'_cfgname': 'test:K=[1,2,3]', 'K': '1'}
>>> name = None
>>> nonlbl_keys = ['_cfgstr', '_cfgname', '_cfgtype', '_cfgindex']
>>> cfg_lbl = get_cfg_lbl(cfg, name, nonlbl_keys)
>>> result = ('cfg_lbl = %s' % (six.text_type(cfg_lbl),))
>>> print(result)
cfg_lbl = test:K=1
"""
import utool as ut
if name is None:
name = cfg.get('_cfgname', '')
if default_cfg is not None:
# Remove defaulted labels
cfg = ut.partition_varied_cfg_list([cfg], default_cfg)[1][0]
# remove keys that should not belong to the label
_clean_cfg = ut.delete_keys(cfg.copy(), nonlbl_keys)
_lbl = ut.repr4(_clean_cfg, explicit=True, nl=False, strvals=True,
key_order=key_order, itemsep=sep)
# _search = ['dict(', ')', ' ']
_search = ['dict(', ')']
_repl = [''] * len(_search)
_lbl = ut.multi_replace(_lbl, _search, _repl).rstrip(',')
if not with_name:
return _lbl
if NAMEVARSEP in name:
# hack for when name contains a little bit of the _lbl
# VERY HACKY TO PARSE OUT PARTS OF THE GIVEN NAME.
hacked_name, _cfgstr, _ = parse_cfgstr_name_options(name)
_cfgstr_options_list = re.split(
r',\s*' + ut.negative_lookahead(r'[^\[\]]*\]'), _cfgstr)
#cfgstr_options_list = cfgopt_strs.split(',')
_cfg_options = ut.parse_cfgstr_list(
_cfgstr_options_list, smartcast=False, oldmode=False)
#
ut.delete_keys(_cfg_options, cfg.keys())
_preflbl = ut.repr4(_cfg_options, explicit=True, nl=False, strvals=True)
_preflbl = ut.multi_replace(_preflbl, _search, _repl).rstrip(',')
hacked_name += NAMEVARSEP + _preflbl
###
cfg_lbl = hacked_name + _lbl
else:
cfg_lbl = name + NAMEVARSEP + _lbl
return cfg_lbl | [
"def",
"get_cfg_lbl",
"(",
"cfg",
",",
"name",
"=",
"None",
",",
"nonlbl_keys",
"=",
"INTERNAL_CFGKEYS",
",",
"key_order",
"=",
"None",
",",
"with_name",
"=",
"True",
",",
"default_cfg",
"=",
"None",
",",
"sep",
"=",
"''",
")",
":",
"import",
"utool",
"as",
"ut",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"cfg",
".",
"get",
"(",
"'_cfgname'",
",",
"''",
")",
"if",
"default_cfg",
"is",
"not",
"None",
":",
"# Remove defaulted labels",
"cfg",
"=",
"ut",
".",
"partition_varied_cfg_list",
"(",
"[",
"cfg",
"]",
",",
"default_cfg",
")",
"[",
"1",
"]",
"[",
"0",
"]",
"# remove keys that should not belong to the label",
"_clean_cfg",
"=",
"ut",
".",
"delete_keys",
"(",
"cfg",
".",
"copy",
"(",
")",
",",
"nonlbl_keys",
")",
"_lbl",
"=",
"ut",
".",
"repr4",
"(",
"_clean_cfg",
",",
"explicit",
"=",
"True",
",",
"nl",
"=",
"False",
",",
"strvals",
"=",
"True",
",",
"key_order",
"=",
"key_order",
",",
"itemsep",
"=",
"sep",
")",
"# _search = ['dict(', ')', ' ']",
"_search",
"=",
"[",
"'dict('",
",",
"')'",
"]",
"_repl",
"=",
"[",
"''",
"]",
"*",
"len",
"(",
"_search",
")",
"_lbl",
"=",
"ut",
".",
"multi_replace",
"(",
"_lbl",
",",
"_search",
",",
"_repl",
")",
".",
"rstrip",
"(",
"','",
")",
"if",
"not",
"with_name",
":",
"return",
"_lbl",
"if",
"NAMEVARSEP",
"in",
"name",
":",
"# hack for when name contains a little bit of the _lbl",
"# VERY HACKY TO PARSE OUT PARTS OF THE GIVEN NAME.",
"hacked_name",
",",
"_cfgstr",
",",
"_",
"=",
"parse_cfgstr_name_options",
"(",
"name",
")",
"_cfgstr_options_list",
"=",
"re",
".",
"split",
"(",
"r',\\s*'",
"+",
"ut",
".",
"negative_lookahead",
"(",
"r'[^\\[\\]]*\\]'",
")",
",",
"_cfgstr",
")",
"#cfgstr_options_list = cfgopt_strs.split(',')",
"_cfg_options",
"=",
"ut",
".",
"parse_cfgstr_list",
"(",
"_cfgstr_options_list",
",",
"smartcast",
"=",
"False",
",",
"oldmode",
"=",
"False",
")",
"#",
"ut",
".",
"delete_keys",
"(",
"_cfg_options",
",",
"cfg",
".",
"keys",
"(",
")",
")",
"_preflbl",
"=",
"ut",
".",
"repr4",
"(",
"_cfg_options",
",",
"explicit",
"=",
"True",
",",
"nl",
"=",
"False",
",",
"strvals",
"=",
"True",
")",
"_preflbl",
"=",
"ut",
".",
"multi_replace",
"(",
"_preflbl",
",",
"_search",
",",
"_repl",
")",
".",
"rstrip",
"(",
"','",
")",
"hacked_name",
"+=",
"NAMEVARSEP",
"+",
"_preflbl",
"###",
"cfg_lbl",
"=",
"hacked_name",
"+",
"_lbl",
"else",
":",
"cfg_lbl",
"=",
"name",
"+",
"NAMEVARSEP",
"+",
"_lbl",
"return",
"cfg_lbl"
] | r"""
Formats a flat configuration dict into a short string label. This is useful
for re-creating command line strings.
Args:
cfg (dict):
name (str): (default = None)
nonlbl_keys (list): (default = INTERNAL_CFGKEYS)
Returns:
str: cfg_lbl
CommandLine:
python -m utool.util_gridsearch get_cfg_lbl
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> cfg = {'_cfgname': 'test', 'var1': 'val1', 'var2': 'val2'}
>>> name = None
>>> nonlbl_keys = ['_cfgstr', '_cfgname', '_cfgtype', '_cfgindex']
>>> cfg_lbl = get_cfg_lbl(cfg, name, nonlbl_keys)
>>> result = ('cfg_lbl = %s' % (six.text_type(cfg_lbl),))
>>> print(result)
cfg_lbl = test:var1=val1,var2=val2
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> cfg = {'var1': 'val1', 'var2': 'val2'}
>>> default_cfg = {'var2': 'val1', 'var1': 'val1'}
>>> name = None
>>> cfg_lbl = get_cfg_lbl(cfg, name, default_cfg=default_cfg)
>>> result = ('cfg_lbl = %s' % (six.text_type(cfg_lbl),))
>>> print(result)
cfg_lbl = :var2=val2
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> cfg = {'_cfgname': 'test:K=[1,2,3]', 'K': '1'}
>>> name = None
>>> nonlbl_keys = ['_cfgstr', '_cfgname', '_cfgtype', '_cfgindex']
>>> cfg_lbl = get_cfg_lbl(cfg, name, nonlbl_keys)
>>> result = ('cfg_lbl = %s' % (six.text_type(cfg_lbl),))
>>> print(result)
cfg_lbl = test:K=1 | [
"r",
"Formats",
"a",
"flat",
"configuration",
"dict",
"into",
"a",
"short",
"string",
"label",
".",
"This",
"is",
"useful",
"for",
"re",
"-",
"creating",
"command",
"line",
"strings",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_gridsearch.py#L267-L356 | train |
Erotemic/utool | utool/util_gridsearch.py | parse_cfgstr_list2 | def parse_cfgstr_list2(cfgstr_list, named_defaults_dict=None, cfgtype=None,
alias_keys=None, valid_keys=None, expand_nested=True,
strict=True, special_join_dict=None, is_nestedcfgtype=False,
metadata=None):
r"""
Parses config strings. By looking up name in a dict of configs
Args:
cfgstr_list (list):
named_defaults_dict (dict): (default = None)
cfgtype (None): (default = None)
alias_keys (None): (default = None)
valid_keys (None): (default = None)
expand_nested (bool): (default = True)
strict (bool): (default = True)
is_nestedcfgtype - used for annot configs so special joins arent geometrically combined
Note:
Normal Case:
--flag name
Custom Arugment Cases:
--flag name:custom_key1=custom_val1,custom_key2=custom_val2
Multiple Config Case:
--flag name1:custom_args1 name2:custom_args2
Multiple Config (special join) Case:
(here name2 and name3 have some special interaction)
--flag name1:custom_args1 name2:custom_args2::name3:custom_args3
Varied Argument Case:
--flag name:key1=[val1,val2]
Returns:
list: cfg_combos_list
CommandLine:
python -m utool.util_gridsearch --test-parse_cfgstr_list2
python -m utool.util_gridsearch --test-parse_cfgstr_list2:0
python -m utool.util_gridsearch --test-parse_cfgstr_list2:1
python -m utool.util_gridsearch --test-parse_cfgstr_list2:2
Setup:
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> named_defaults_dict = None
>>> cfgtype, alias_keys, valid_keys, metadata = None, None, None, None
>>> expand_nested, is_nestedcfgtypel, strict = True, False, False
Example0:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> named_defaults_dict = None
>>> cfgtype, alias_keys, valid_keys, metadata = None, None, None, None
>>> expand_nested, is_nestedcfgtypel, strict = True, False, False
>>> cfgstr_list = ['name', 'name:f=1', 'name:b=[1,2]', 'name1:f=1::name2:f=1,b=2']
>>> #cfgstr_list = ['name', 'name1:f=1::name2:f=1,b=2']
>>> special_join_dict = {'joined': True}
>>> cfg_combos_list = parse_cfgstr_list2(
>>> cfgstr_list, named_defaults_dict, cfgtype, alias_keys, valid_keys,
>>> expand_nested, strict, special_join_dict)
>>> print('b' in cfg_combos_list[2][0])
>>> print('cfg_combos_list = %s' % (ut.repr4(cfg_combos_list, nl=2),))
>>> assert 'b' in cfg_combos_list[2][0], 'second cfg[2] should vary b'
>>> assert 'b' in cfg_combos_list[2][1], 'second cfg[2] should vary b'
>>> print(ut.depth_profile(cfg_combos_list))
>>> cfg_list = ut.flatten(cfg_combos_list)
>>> cfg_list = ut.flatten([cfg if isinstance(cfg, list) else [cfg] for cfg in cfg_list])
>>> result = ut.repr2(ut.get_varied_cfg_lbls(cfg_list))
>>> print(result)
['name:', 'name:f=1', 'name:b=1', 'name:b=2', 'name1:f=1,joined=True', 'name2:b=2,f=1,joined=True']
Example1:
>>> # ENABLE_DOCTEST
>>> # Allow for definition of a named default on the fly
>>> cfgstr_list = ['base=:f=2,c=[1,2]', 'base:f=1', 'base:b=[1,2]']
>>> special_join_dict = None
>>> cfg_combos_list = parse_cfgstr_list2(
>>> cfgstr_list, named_defaults_dict, cfgtype, alias_keys, valid_keys,
>>> expand_nested, strict, special_join_dict)
>>> print('cfg_combos_list = %s' % (ut.repr4(cfg_combos_list, nl=2),))
>>> print(ut.depth_profile(cfg_combos_list))
>>> cfg_list = ut.flatten(cfg_combos_list)
>>> cfg_list = ut.flatten([cfg if isinstance(cfg, list) else [cfg] for cfg in cfg_list])
>>> result = ut.repr2(ut.get_varied_cfg_lbls(cfg_list))
>>> print(result)
['base:c=1,f=1', 'base:c=2,f=1', 'base:b=1,c=1,f=2', 'base:b=1,c=2,f=2', 'base:b=2,c=1,f=2', 'base:b=2,c=2,f=2']
Example2:
>>> # ENABLE_DOCTEST
>>> cfgstr_list = ['base:f=2,c=[(1,2),(3,4)]']
>>> special_join_dict = None
>>> cfg_combos_list = parse_cfgstr_list2(
>>> cfgstr_list, named_defaults_dict, cfgtype, alias_keys, valid_keys,
>>> expand_nested, strict, special_join_dict)
>>> print('cfg_combos_list = %s' % (ut.repr4(cfg_combos_list, nl=2),))
>>> print(ut.depth_profile(cfg_combos_list))
>>> cfg_list = ut.flatten(cfg_combos_list)
>>> cfg_list = ut.flatten([cfg if isinstance(cfg, list) else [cfg] for cfg in cfg_list])
>>> result = ut.repr2(ut.get_varied_cfg_lbls(cfg_list))
>>> print(result)
Example3:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> named_defaults_dict = None
>>> cfgtype, alias_keys, valid_keys, metadata = None, None, None, None
>>> expand_nested, is_nestedcfgtypel, strict = True, False, False
>>> # test simplest case
>>> cfgstr_list = ['name:b=[1,2]']
>>> special_join_dict = {'joined': True}
>>> cfg_combos_list = parse_cfgstr_list2(
>>> cfgstr_list, named_defaults_dict, cfgtype, alias_keys, valid_keys,
>>> expand_nested, strict, special_join_dict)
>>> print('b' in cfg_combos_list[0][0])
>>> print('cfg_combos_list = %s' % (ut.repr4(cfg_combos_list, nl=2),))
>>> assert 'b' in cfg_combos_list[0][0], 'second cfg[2] should vary b'
>>> assert 'b' in cfg_combos_list[0][1], 'second cfg[2] should vary b'
>>> print(ut.depth_profile(cfg_combos_list))
>>> cfg_list = ut.flatten(cfg_combos_list)
>>> cfg_list = ut.flatten([cfg if isinstance(cfg, list) else [cfg] for cfg in cfg_list])
>>> result = ut.repr2(ut.get_varied_cfg_lbls(cfg_list))
>>> print(result)
"""
import utool as ut
#with ut.Indenter(' '):
cfg_combos_list = []
cfgstr_list_ = []
# special named defaults assignment
dyndef_named_defaults = {}
for cfgstr in cfgstr_list:
if cfgstr.find('=:') > -1:
cfgname, cfgopt_strs, subx = parse_cfgstr_name_options(cfgstr)
assert cfgname.endswith('=')
cfgname = cfgname[:-1]
base_cfg_list = lookup_base_cfg_list(cfgname,
named_defaults_dict,
metadata=metadata)
cfg_options = noexpand_parse_cfgstrs(cfgopt_strs)
dyndef_named_defaults[cfgname] = cfg_options
else:
cfgstr_list_.append(cfgstr)
if len(dyndef_named_defaults) > 0 and named_defaults_dict is None:
named_defaults_dict = dyndef_named_defaults
for cfgstr in cfgstr_list_:
cfg_combos = []
# Parse special joined cfg case
if cfgstr.find('::') > -1:
special_cfgstr_list = cfgstr.split('::')
# Recursive call
special_combo_list = parse_cfgstr_list2(
special_cfgstr_list,
named_defaults_dict=named_defaults_dict, cfgtype=cfgtype,
alias_keys=alias_keys, valid_keys=valid_keys,
strict=strict, expand_nested=expand_nested,
is_nestedcfgtype=False, metadata=metadata)
if special_join_dict is not None:
for special_combo in special_combo_list:
for cfg in special_combo:
cfg.update(special_join_dict)
if is_nestedcfgtype:
cfg_combo = tuple([combo for combo in special_combo_list])
else:
# not sure if this is right
cfg_combo = special_combo_list
# FIXME DUPLICATE CODE
if expand_nested:
cfg_combos.extend(cfg_combo)
else:
#print('Appending: ' + str(ut.depth_profile(cfg_combo)))
#if ut.depth_profile(cfg_combo) == [1, 9]:
# ut.embed()
cfg_combos_list.append(cfg_combo)
else:
# Normal Case
cfgname, cfgopt_strs, subx = parse_cfgstr_name_options(cfgstr)
# --
# Lookup named default settings
try:
base_cfg_list = lookup_base_cfg_list(cfgname,
named_defaults_dict,
metadata=metadata)
except Exception as ex:
ut.printex(ex, keys=['cfgstr_list', 'cfgstr_list_'])
raise
# --
for base_cfg in base_cfg_list:
print('cfgname = %r' % (cfgname,))
print('cfgopt_strs = %r' % (cfgopt_strs,))
print('base_cfg = %r' % (base_cfg,))
print('alias_keys = %r' % (alias_keys,))
print('cfgtype = %r' % (cfgtype,))
print('offset = %r' % (len(cfg_combos),))
print('valid_keys = %r' % (valid_keys,))
print('strict = %r' % (strict,))
cfg_combo = customize_base_cfg(
cfgname, cfgopt_strs, base_cfg, cfgtype, alias_keys,
valid_keys, strict=strict, offset=len(cfg_combos))
if is_nestedcfgtype:
cfg_combo = [cfg_combo]
if expand_nested:
cfg_combos.extend(cfg_combo)
else:
cfg_combos_list.append(cfg_combo)
# SUBX Cannot work here because of acfg hackiness
#if subx is not None:
# cfg_combo = ut.take(cfg_combo, subx)
if expand_nested:
cfg_combos_list.append(cfg_combos)
# print('Updated to: ' + str(ut.depth_profile(cfg_combos_list)))
#print('Returning len(cfg_combos_list) = %r' % (len(cfg_combos_list),))
return cfg_combos_list | python | def parse_cfgstr_list2(cfgstr_list, named_defaults_dict=None, cfgtype=None,
alias_keys=None, valid_keys=None, expand_nested=True,
strict=True, special_join_dict=None, is_nestedcfgtype=False,
metadata=None):
r"""
Parses config strings. By looking up name in a dict of configs
Args:
cfgstr_list (list):
named_defaults_dict (dict): (default = None)
cfgtype (None): (default = None)
alias_keys (None): (default = None)
valid_keys (None): (default = None)
expand_nested (bool): (default = True)
strict (bool): (default = True)
is_nestedcfgtype - used for annot configs so special joins arent geometrically combined
Note:
Normal Case:
--flag name
Custom Arugment Cases:
--flag name:custom_key1=custom_val1,custom_key2=custom_val2
Multiple Config Case:
--flag name1:custom_args1 name2:custom_args2
Multiple Config (special join) Case:
(here name2 and name3 have some special interaction)
--flag name1:custom_args1 name2:custom_args2::name3:custom_args3
Varied Argument Case:
--flag name:key1=[val1,val2]
Returns:
list: cfg_combos_list
CommandLine:
python -m utool.util_gridsearch --test-parse_cfgstr_list2
python -m utool.util_gridsearch --test-parse_cfgstr_list2:0
python -m utool.util_gridsearch --test-parse_cfgstr_list2:1
python -m utool.util_gridsearch --test-parse_cfgstr_list2:2
Setup:
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> named_defaults_dict = None
>>> cfgtype, alias_keys, valid_keys, metadata = None, None, None, None
>>> expand_nested, is_nestedcfgtypel, strict = True, False, False
Example0:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> named_defaults_dict = None
>>> cfgtype, alias_keys, valid_keys, metadata = None, None, None, None
>>> expand_nested, is_nestedcfgtypel, strict = True, False, False
>>> cfgstr_list = ['name', 'name:f=1', 'name:b=[1,2]', 'name1:f=1::name2:f=1,b=2']
>>> #cfgstr_list = ['name', 'name1:f=1::name2:f=1,b=2']
>>> special_join_dict = {'joined': True}
>>> cfg_combos_list = parse_cfgstr_list2(
>>> cfgstr_list, named_defaults_dict, cfgtype, alias_keys, valid_keys,
>>> expand_nested, strict, special_join_dict)
>>> print('b' in cfg_combos_list[2][0])
>>> print('cfg_combos_list = %s' % (ut.repr4(cfg_combos_list, nl=2),))
>>> assert 'b' in cfg_combos_list[2][0], 'second cfg[2] should vary b'
>>> assert 'b' in cfg_combos_list[2][1], 'second cfg[2] should vary b'
>>> print(ut.depth_profile(cfg_combos_list))
>>> cfg_list = ut.flatten(cfg_combos_list)
>>> cfg_list = ut.flatten([cfg if isinstance(cfg, list) else [cfg] for cfg in cfg_list])
>>> result = ut.repr2(ut.get_varied_cfg_lbls(cfg_list))
>>> print(result)
['name:', 'name:f=1', 'name:b=1', 'name:b=2', 'name1:f=1,joined=True', 'name2:b=2,f=1,joined=True']
Example1:
>>> # ENABLE_DOCTEST
>>> # Allow for definition of a named default on the fly
>>> cfgstr_list = ['base=:f=2,c=[1,2]', 'base:f=1', 'base:b=[1,2]']
>>> special_join_dict = None
>>> cfg_combos_list = parse_cfgstr_list2(
>>> cfgstr_list, named_defaults_dict, cfgtype, alias_keys, valid_keys,
>>> expand_nested, strict, special_join_dict)
>>> print('cfg_combos_list = %s' % (ut.repr4(cfg_combos_list, nl=2),))
>>> print(ut.depth_profile(cfg_combos_list))
>>> cfg_list = ut.flatten(cfg_combos_list)
>>> cfg_list = ut.flatten([cfg if isinstance(cfg, list) else [cfg] for cfg in cfg_list])
>>> result = ut.repr2(ut.get_varied_cfg_lbls(cfg_list))
>>> print(result)
['base:c=1,f=1', 'base:c=2,f=1', 'base:b=1,c=1,f=2', 'base:b=1,c=2,f=2', 'base:b=2,c=1,f=2', 'base:b=2,c=2,f=2']
Example2:
>>> # ENABLE_DOCTEST
>>> cfgstr_list = ['base:f=2,c=[(1,2),(3,4)]']
>>> special_join_dict = None
>>> cfg_combos_list = parse_cfgstr_list2(
>>> cfgstr_list, named_defaults_dict, cfgtype, alias_keys, valid_keys,
>>> expand_nested, strict, special_join_dict)
>>> print('cfg_combos_list = %s' % (ut.repr4(cfg_combos_list, nl=2),))
>>> print(ut.depth_profile(cfg_combos_list))
>>> cfg_list = ut.flatten(cfg_combos_list)
>>> cfg_list = ut.flatten([cfg if isinstance(cfg, list) else [cfg] for cfg in cfg_list])
>>> result = ut.repr2(ut.get_varied_cfg_lbls(cfg_list))
>>> print(result)
Example3:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> named_defaults_dict = None
>>> cfgtype, alias_keys, valid_keys, metadata = None, None, None, None
>>> expand_nested, is_nestedcfgtypel, strict = True, False, False
>>> # test simplest case
>>> cfgstr_list = ['name:b=[1,2]']
>>> special_join_dict = {'joined': True}
>>> cfg_combos_list = parse_cfgstr_list2(
>>> cfgstr_list, named_defaults_dict, cfgtype, alias_keys, valid_keys,
>>> expand_nested, strict, special_join_dict)
>>> print('b' in cfg_combos_list[0][0])
>>> print('cfg_combos_list = %s' % (ut.repr4(cfg_combos_list, nl=2),))
>>> assert 'b' in cfg_combos_list[0][0], 'second cfg[2] should vary b'
>>> assert 'b' in cfg_combos_list[0][1], 'second cfg[2] should vary b'
>>> print(ut.depth_profile(cfg_combos_list))
>>> cfg_list = ut.flatten(cfg_combos_list)
>>> cfg_list = ut.flatten([cfg if isinstance(cfg, list) else [cfg] for cfg in cfg_list])
>>> result = ut.repr2(ut.get_varied_cfg_lbls(cfg_list))
>>> print(result)
"""
import utool as ut
#with ut.Indenter(' '):
cfg_combos_list = []
cfgstr_list_ = []
# special named defaults assignment
dyndef_named_defaults = {}
for cfgstr in cfgstr_list:
if cfgstr.find('=:') > -1:
cfgname, cfgopt_strs, subx = parse_cfgstr_name_options(cfgstr)
assert cfgname.endswith('=')
cfgname = cfgname[:-1]
base_cfg_list = lookup_base_cfg_list(cfgname,
named_defaults_dict,
metadata=metadata)
cfg_options = noexpand_parse_cfgstrs(cfgopt_strs)
dyndef_named_defaults[cfgname] = cfg_options
else:
cfgstr_list_.append(cfgstr)
if len(dyndef_named_defaults) > 0 and named_defaults_dict is None:
named_defaults_dict = dyndef_named_defaults
for cfgstr in cfgstr_list_:
cfg_combos = []
# Parse special joined cfg case
if cfgstr.find('::') > -1:
special_cfgstr_list = cfgstr.split('::')
# Recursive call
special_combo_list = parse_cfgstr_list2(
special_cfgstr_list,
named_defaults_dict=named_defaults_dict, cfgtype=cfgtype,
alias_keys=alias_keys, valid_keys=valid_keys,
strict=strict, expand_nested=expand_nested,
is_nestedcfgtype=False, metadata=metadata)
if special_join_dict is not None:
for special_combo in special_combo_list:
for cfg in special_combo:
cfg.update(special_join_dict)
if is_nestedcfgtype:
cfg_combo = tuple([combo for combo in special_combo_list])
else:
# not sure if this is right
cfg_combo = special_combo_list
# FIXME DUPLICATE CODE
if expand_nested:
cfg_combos.extend(cfg_combo)
else:
#print('Appending: ' + str(ut.depth_profile(cfg_combo)))
#if ut.depth_profile(cfg_combo) == [1, 9]:
# ut.embed()
cfg_combos_list.append(cfg_combo)
else:
# Normal Case
cfgname, cfgopt_strs, subx = parse_cfgstr_name_options(cfgstr)
# --
# Lookup named default settings
try:
base_cfg_list = lookup_base_cfg_list(cfgname,
named_defaults_dict,
metadata=metadata)
except Exception as ex:
ut.printex(ex, keys=['cfgstr_list', 'cfgstr_list_'])
raise
# --
for base_cfg in base_cfg_list:
print('cfgname = %r' % (cfgname,))
print('cfgopt_strs = %r' % (cfgopt_strs,))
print('base_cfg = %r' % (base_cfg,))
print('alias_keys = %r' % (alias_keys,))
print('cfgtype = %r' % (cfgtype,))
print('offset = %r' % (len(cfg_combos),))
print('valid_keys = %r' % (valid_keys,))
print('strict = %r' % (strict,))
cfg_combo = customize_base_cfg(
cfgname, cfgopt_strs, base_cfg, cfgtype, alias_keys,
valid_keys, strict=strict, offset=len(cfg_combos))
if is_nestedcfgtype:
cfg_combo = [cfg_combo]
if expand_nested:
cfg_combos.extend(cfg_combo)
else:
cfg_combos_list.append(cfg_combo)
# SUBX Cannot work here because of acfg hackiness
#if subx is not None:
# cfg_combo = ut.take(cfg_combo, subx)
if expand_nested:
cfg_combos_list.append(cfg_combos)
# print('Updated to: ' + str(ut.depth_profile(cfg_combos_list)))
#print('Returning len(cfg_combos_list) = %r' % (len(cfg_combos_list),))
return cfg_combos_list | [
"def",
"parse_cfgstr_list2",
"(",
"cfgstr_list",
",",
"named_defaults_dict",
"=",
"None",
",",
"cfgtype",
"=",
"None",
",",
"alias_keys",
"=",
"None",
",",
"valid_keys",
"=",
"None",
",",
"expand_nested",
"=",
"True",
",",
"strict",
"=",
"True",
",",
"special_join_dict",
"=",
"None",
",",
"is_nestedcfgtype",
"=",
"False",
",",
"metadata",
"=",
"None",
")",
":",
"import",
"utool",
"as",
"ut",
"#with ut.Indenter(' '):",
"cfg_combos_list",
"=",
"[",
"]",
"cfgstr_list_",
"=",
"[",
"]",
"# special named defaults assignment",
"dyndef_named_defaults",
"=",
"{",
"}",
"for",
"cfgstr",
"in",
"cfgstr_list",
":",
"if",
"cfgstr",
".",
"find",
"(",
"'=:'",
")",
">",
"-",
"1",
":",
"cfgname",
",",
"cfgopt_strs",
",",
"subx",
"=",
"parse_cfgstr_name_options",
"(",
"cfgstr",
")",
"assert",
"cfgname",
".",
"endswith",
"(",
"'='",
")",
"cfgname",
"=",
"cfgname",
"[",
":",
"-",
"1",
"]",
"base_cfg_list",
"=",
"lookup_base_cfg_list",
"(",
"cfgname",
",",
"named_defaults_dict",
",",
"metadata",
"=",
"metadata",
")",
"cfg_options",
"=",
"noexpand_parse_cfgstrs",
"(",
"cfgopt_strs",
")",
"dyndef_named_defaults",
"[",
"cfgname",
"]",
"=",
"cfg_options",
"else",
":",
"cfgstr_list_",
".",
"append",
"(",
"cfgstr",
")",
"if",
"len",
"(",
"dyndef_named_defaults",
")",
">",
"0",
"and",
"named_defaults_dict",
"is",
"None",
":",
"named_defaults_dict",
"=",
"dyndef_named_defaults",
"for",
"cfgstr",
"in",
"cfgstr_list_",
":",
"cfg_combos",
"=",
"[",
"]",
"# Parse special joined cfg case",
"if",
"cfgstr",
".",
"find",
"(",
"'::'",
")",
">",
"-",
"1",
":",
"special_cfgstr_list",
"=",
"cfgstr",
".",
"split",
"(",
"'::'",
")",
"# Recursive call",
"special_combo_list",
"=",
"parse_cfgstr_list2",
"(",
"special_cfgstr_list",
",",
"named_defaults_dict",
"=",
"named_defaults_dict",
",",
"cfgtype",
"=",
"cfgtype",
",",
"alias_keys",
"=",
"alias_keys",
",",
"valid_keys",
"=",
"valid_keys",
",",
"strict",
"=",
"strict",
",",
"expand_nested",
"=",
"expand_nested",
",",
"is_nestedcfgtype",
"=",
"False",
",",
"metadata",
"=",
"metadata",
")",
"if",
"special_join_dict",
"is",
"not",
"None",
":",
"for",
"special_combo",
"in",
"special_combo_list",
":",
"for",
"cfg",
"in",
"special_combo",
":",
"cfg",
".",
"update",
"(",
"special_join_dict",
")",
"if",
"is_nestedcfgtype",
":",
"cfg_combo",
"=",
"tuple",
"(",
"[",
"combo",
"for",
"combo",
"in",
"special_combo_list",
"]",
")",
"else",
":",
"# not sure if this is right",
"cfg_combo",
"=",
"special_combo_list",
"# FIXME DUPLICATE CODE",
"if",
"expand_nested",
":",
"cfg_combos",
".",
"extend",
"(",
"cfg_combo",
")",
"else",
":",
"#print('Appending: ' + str(ut.depth_profile(cfg_combo)))",
"#if ut.depth_profile(cfg_combo) == [1, 9]:",
"# ut.embed()",
"cfg_combos_list",
".",
"append",
"(",
"cfg_combo",
")",
"else",
":",
"# Normal Case",
"cfgname",
",",
"cfgopt_strs",
",",
"subx",
"=",
"parse_cfgstr_name_options",
"(",
"cfgstr",
")",
"# --",
"# Lookup named default settings",
"try",
":",
"base_cfg_list",
"=",
"lookup_base_cfg_list",
"(",
"cfgname",
",",
"named_defaults_dict",
",",
"metadata",
"=",
"metadata",
")",
"except",
"Exception",
"as",
"ex",
":",
"ut",
".",
"printex",
"(",
"ex",
",",
"keys",
"=",
"[",
"'cfgstr_list'",
",",
"'cfgstr_list_'",
"]",
")",
"raise",
"# --",
"for",
"base_cfg",
"in",
"base_cfg_list",
":",
"print",
"(",
"'cfgname = %r'",
"%",
"(",
"cfgname",
",",
")",
")",
"print",
"(",
"'cfgopt_strs = %r'",
"%",
"(",
"cfgopt_strs",
",",
")",
")",
"print",
"(",
"'base_cfg = %r'",
"%",
"(",
"base_cfg",
",",
")",
")",
"print",
"(",
"'alias_keys = %r'",
"%",
"(",
"alias_keys",
",",
")",
")",
"print",
"(",
"'cfgtype = %r'",
"%",
"(",
"cfgtype",
",",
")",
")",
"print",
"(",
"'offset = %r'",
"%",
"(",
"len",
"(",
"cfg_combos",
")",
",",
")",
")",
"print",
"(",
"'valid_keys = %r'",
"%",
"(",
"valid_keys",
",",
")",
")",
"print",
"(",
"'strict = %r'",
"%",
"(",
"strict",
",",
")",
")",
"cfg_combo",
"=",
"customize_base_cfg",
"(",
"cfgname",
",",
"cfgopt_strs",
",",
"base_cfg",
",",
"cfgtype",
",",
"alias_keys",
",",
"valid_keys",
",",
"strict",
"=",
"strict",
",",
"offset",
"=",
"len",
"(",
"cfg_combos",
")",
")",
"if",
"is_nestedcfgtype",
":",
"cfg_combo",
"=",
"[",
"cfg_combo",
"]",
"if",
"expand_nested",
":",
"cfg_combos",
".",
"extend",
"(",
"cfg_combo",
")",
"else",
":",
"cfg_combos_list",
".",
"append",
"(",
"cfg_combo",
")",
"# SUBX Cannot work here because of acfg hackiness",
"#if subx is not None:",
"# cfg_combo = ut.take(cfg_combo, subx)",
"if",
"expand_nested",
":",
"cfg_combos_list",
".",
"append",
"(",
"cfg_combos",
")",
"# print('Updated to: ' + str(ut.depth_profile(cfg_combos_list)))",
"#print('Returning len(cfg_combos_list) = %r' % (len(cfg_combos_list),))",
"return",
"cfg_combos_list"
] | r"""
Parses config strings. By looking up name in a dict of configs
Args:
cfgstr_list (list):
named_defaults_dict (dict): (default = None)
cfgtype (None): (default = None)
alias_keys (None): (default = None)
valid_keys (None): (default = None)
expand_nested (bool): (default = True)
strict (bool): (default = True)
is_nestedcfgtype - used for annot configs so special joins arent geometrically combined
Note:
Normal Case:
--flag name
Custom Arugment Cases:
--flag name:custom_key1=custom_val1,custom_key2=custom_val2
Multiple Config Case:
--flag name1:custom_args1 name2:custom_args2
Multiple Config (special join) Case:
(here name2 and name3 have some special interaction)
--flag name1:custom_args1 name2:custom_args2::name3:custom_args3
Varied Argument Case:
--flag name:key1=[val1,val2]
Returns:
list: cfg_combos_list
CommandLine:
python -m utool.util_gridsearch --test-parse_cfgstr_list2
python -m utool.util_gridsearch --test-parse_cfgstr_list2:0
python -m utool.util_gridsearch --test-parse_cfgstr_list2:1
python -m utool.util_gridsearch --test-parse_cfgstr_list2:2
Setup:
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> named_defaults_dict = None
>>> cfgtype, alias_keys, valid_keys, metadata = None, None, None, None
>>> expand_nested, is_nestedcfgtypel, strict = True, False, False
Example0:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> named_defaults_dict = None
>>> cfgtype, alias_keys, valid_keys, metadata = None, None, None, None
>>> expand_nested, is_nestedcfgtypel, strict = True, False, False
>>> cfgstr_list = ['name', 'name:f=1', 'name:b=[1,2]', 'name1:f=1::name2:f=1,b=2']
>>> #cfgstr_list = ['name', 'name1:f=1::name2:f=1,b=2']
>>> special_join_dict = {'joined': True}
>>> cfg_combos_list = parse_cfgstr_list2(
>>> cfgstr_list, named_defaults_dict, cfgtype, alias_keys, valid_keys,
>>> expand_nested, strict, special_join_dict)
>>> print('b' in cfg_combos_list[2][0])
>>> print('cfg_combos_list = %s' % (ut.repr4(cfg_combos_list, nl=2),))
>>> assert 'b' in cfg_combos_list[2][0], 'second cfg[2] should vary b'
>>> assert 'b' in cfg_combos_list[2][1], 'second cfg[2] should vary b'
>>> print(ut.depth_profile(cfg_combos_list))
>>> cfg_list = ut.flatten(cfg_combos_list)
>>> cfg_list = ut.flatten([cfg if isinstance(cfg, list) else [cfg] for cfg in cfg_list])
>>> result = ut.repr2(ut.get_varied_cfg_lbls(cfg_list))
>>> print(result)
['name:', 'name:f=1', 'name:b=1', 'name:b=2', 'name1:f=1,joined=True', 'name2:b=2,f=1,joined=True']
Example1:
>>> # ENABLE_DOCTEST
>>> # Allow for definition of a named default on the fly
>>> cfgstr_list = ['base=:f=2,c=[1,2]', 'base:f=1', 'base:b=[1,2]']
>>> special_join_dict = None
>>> cfg_combos_list = parse_cfgstr_list2(
>>> cfgstr_list, named_defaults_dict, cfgtype, alias_keys, valid_keys,
>>> expand_nested, strict, special_join_dict)
>>> print('cfg_combos_list = %s' % (ut.repr4(cfg_combos_list, nl=2),))
>>> print(ut.depth_profile(cfg_combos_list))
>>> cfg_list = ut.flatten(cfg_combos_list)
>>> cfg_list = ut.flatten([cfg if isinstance(cfg, list) else [cfg] for cfg in cfg_list])
>>> result = ut.repr2(ut.get_varied_cfg_lbls(cfg_list))
>>> print(result)
['base:c=1,f=1', 'base:c=2,f=1', 'base:b=1,c=1,f=2', 'base:b=1,c=2,f=2', 'base:b=2,c=1,f=2', 'base:b=2,c=2,f=2']
Example2:
>>> # ENABLE_DOCTEST
>>> cfgstr_list = ['base:f=2,c=[(1,2),(3,4)]']
>>> special_join_dict = None
>>> cfg_combos_list = parse_cfgstr_list2(
>>> cfgstr_list, named_defaults_dict, cfgtype, alias_keys, valid_keys,
>>> expand_nested, strict, special_join_dict)
>>> print('cfg_combos_list = %s' % (ut.repr4(cfg_combos_list, nl=2),))
>>> print(ut.depth_profile(cfg_combos_list))
>>> cfg_list = ut.flatten(cfg_combos_list)
>>> cfg_list = ut.flatten([cfg if isinstance(cfg, list) else [cfg] for cfg in cfg_list])
>>> result = ut.repr2(ut.get_varied_cfg_lbls(cfg_list))
>>> print(result)
Example3:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> named_defaults_dict = None
>>> cfgtype, alias_keys, valid_keys, metadata = None, None, None, None
>>> expand_nested, is_nestedcfgtypel, strict = True, False, False
>>> # test simplest case
>>> cfgstr_list = ['name:b=[1,2]']
>>> special_join_dict = {'joined': True}
>>> cfg_combos_list = parse_cfgstr_list2(
>>> cfgstr_list, named_defaults_dict, cfgtype, alias_keys, valid_keys,
>>> expand_nested, strict, special_join_dict)
>>> print('b' in cfg_combos_list[0][0])
>>> print('cfg_combos_list = %s' % (ut.repr4(cfg_combos_list, nl=2),))
>>> assert 'b' in cfg_combos_list[0][0], 'second cfg[2] should vary b'
>>> assert 'b' in cfg_combos_list[0][1], 'second cfg[2] should vary b'
>>> print(ut.depth_profile(cfg_combos_list))
>>> cfg_list = ut.flatten(cfg_combos_list)
>>> cfg_list = ut.flatten([cfg if isinstance(cfg, list) else [cfg] for cfg in cfg_list])
>>> result = ut.repr2(ut.get_varied_cfg_lbls(cfg_list))
>>> print(result) | [
"r",
"Parses",
"config",
"strings",
".",
"By",
"looking",
"up",
"name",
"in",
"a",
"dict",
"of",
"configs"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_gridsearch.py#L1078-L1294 | train |
Erotemic/utool | utool/util_gridsearch.py | grid_search_generator | def grid_search_generator(grid_basis=[], *args, **kwargs):
r"""
Iteratively yeilds individual configuration points
inside a defined basis.
Args:
grid_basis (list): a list of 2-component tuple. The named tuple looks
like this:
CommandLine:
python -m utool.util_gridsearch --test-grid_search_generator
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> # build test data
>>> grid_basis = [
... DimensionBasis('dim1', [.1, .2, .3]),
... DimensionBasis('dim2', [.1, .4, .5]),
... ]
>>> args = tuple()
>>> kwargs = {}
>>> # execute function
>>> point_list = list(grid_search_generator(grid_basis))
>>> # verify results
>>> column_lbls = ut.get_list_column(grid_basis, 0)
>>> column_list = ut.get_list_column(grid_basis, 1)
>>> first_vals = ut.get_list_column(ut.get_list_column(grid_basis, 1), 0)
>>> column_types = list(map(type, first_vals))
>>> header = 'grid search'
>>> result = ut.make_csv_table(column_list, column_lbls, header, column_types)
>>> print(result)
grid search
# num_rows=3
# dim1, dim2
0.10, 0.10
0.20, 0.40
0.30, 0.50
"""
grid_basis_ = grid_basis + list(args) + list(kwargs.items())
grid_basis_dict = OrderedDict(grid_basis_)
grid_point_iter = util_dict.iter_all_dict_combinations_ordered(grid_basis_dict)
for grid_point in grid_point_iter:
yield grid_point | python | def grid_search_generator(grid_basis=[], *args, **kwargs):
r"""
Iteratively yeilds individual configuration points
inside a defined basis.
Args:
grid_basis (list): a list of 2-component tuple. The named tuple looks
like this:
CommandLine:
python -m utool.util_gridsearch --test-grid_search_generator
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> # build test data
>>> grid_basis = [
... DimensionBasis('dim1', [.1, .2, .3]),
... DimensionBasis('dim2', [.1, .4, .5]),
... ]
>>> args = tuple()
>>> kwargs = {}
>>> # execute function
>>> point_list = list(grid_search_generator(grid_basis))
>>> # verify results
>>> column_lbls = ut.get_list_column(grid_basis, 0)
>>> column_list = ut.get_list_column(grid_basis, 1)
>>> first_vals = ut.get_list_column(ut.get_list_column(grid_basis, 1), 0)
>>> column_types = list(map(type, first_vals))
>>> header = 'grid search'
>>> result = ut.make_csv_table(column_list, column_lbls, header, column_types)
>>> print(result)
grid search
# num_rows=3
# dim1, dim2
0.10, 0.10
0.20, 0.40
0.30, 0.50
"""
grid_basis_ = grid_basis + list(args) + list(kwargs.items())
grid_basis_dict = OrderedDict(grid_basis_)
grid_point_iter = util_dict.iter_all_dict_combinations_ordered(grid_basis_dict)
for grid_point in grid_point_iter:
yield grid_point | [
"def",
"grid_search_generator",
"(",
"grid_basis",
"=",
"[",
"]",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"grid_basis_",
"=",
"grid_basis",
"+",
"list",
"(",
"args",
")",
"+",
"list",
"(",
"kwargs",
".",
"items",
"(",
")",
")",
"grid_basis_dict",
"=",
"OrderedDict",
"(",
"grid_basis_",
")",
"grid_point_iter",
"=",
"util_dict",
".",
"iter_all_dict_combinations_ordered",
"(",
"grid_basis_dict",
")",
"for",
"grid_point",
"in",
"grid_point_iter",
":",
"yield",
"grid_point"
] | r"""
Iteratively yeilds individual configuration points
inside a defined basis.
Args:
grid_basis (list): a list of 2-component tuple. The named tuple looks
like this:
CommandLine:
python -m utool.util_gridsearch --test-grid_search_generator
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> # build test data
>>> grid_basis = [
... DimensionBasis('dim1', [.1, .2, .3]),
... DimensionBasis('dim2', [.1, .4, .5]),
... ]
>>> args = tuple()
>>> kwargs = {}
>>> # execute function
>>> point_list = list(grid_search_generator(grid_basis))
>>> # verify results
>>> column_lbls = ut.get_list_column(grid_basis, 0)
>>> column_list = ut.get_list_column(grid_basis, 1)
>>> first_vals = ut.get_list_column(ut.get_list_column(grid_basis, 1), 0)
>>> column_types = list(map(type, first_vals))
>>> header = 'grid search'
>>> result = ut.make_csv_table(column_list, column_lbls, header, column_types)
>>> print(result)
grid search
# num_rows=3
# dim1, dim2
0.10, 0.10
0.20, 0.40
0.30, 0.50 | [
"r",
"Iteratively",
"yeilds",
"individual",
"configuration",
"points",
"inside",
"a",
"defined",
"basis",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_gridsearch.py#L1837-L1882 | train |
Erotemic/utool | utool/util_gridsearch.py | get_cfgdict_list_subset | def get_cfgdict_list_subset(cfgdict_list, keys):
r"""
returns list of unique dictionaries only with keys specified in keys
Args:
cfgdict_list (list):
keys (list):
Returns:
list: cfglbl_list
CommandLine:
python -m utool.util_gridsearch --test-get_cfgdict_list_subset
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> # build test data
>>> cfgdict_list = [
... {'K': 3, 'dcvs_clip_max': 0.1, 'p': 0.1},
... {'K': 5, 'dcvs_clip_max': 0.1, 'p': 0.1},
... {'K': 5, 'dcvs_clip_max': 0.1, 'p': 0.2},
... {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1},
... {'K': 5, 'dcvs_clip_max': 0.2, 'p': 0.1},
... {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1}]
>>> keys = ['K', 'dcvs_clip_max']
>>> # execute function
>>> cfgdict_sublist = get_cfgdict_list_subset(cfgdict_list, keys)
>>> # verify results
>>> result = ut.repr4(cfgdict_sublist)
>>> print(result)
[
{'K': 3, 'dcvs_clip_max': 0.1},
{'K': 5, 'dcvs_clip_max': 0.1},
{'K': 3, 'dcvs_clip_max': 0.2},
{'K': 5, 'dcvs_clip_max': 0.2},
]
"""
import utool as ut
cfgdict_sublist_ = [ut.dict_subset(cfgdict, keys) for cfgdict in cfgdict_list]
cfgtups_sublist_ = [tuple(ut.dict_to_keyvals(cfgdict)) for cfgdict in cfgdict_sublist_]
cfgtups_sublist = ut.unique_ordered(cfgtups_sublist_)
cfgdict_sublist = list(map(dict, cfgtups_sublist))
return cfgdict_sublist | python | def get_cfgdict_list_subset(cfgdict_list, keys):
r"""
returns list of unique dictionaries only with keys specified in keys
Args:
cfgdict_list (list):
keys (list):
Returns:
list: cfglbl_list
CommandLine:
python -m utool.util_gridsearch --test-get_cfgdict_list_subset
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> # build test data
>>> cfgdict_list = [
... {'K': 3, 'dcvs_clip_max': 0.1, 'p': 0.1},
... {'K': 5, 'dcvs_clip_max': 0.1, 'p': 0.1},
... {'K': 5, 'dcvs_clip_max': 0.1, 'p': 0.2},
... {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1},
... {'K': 5, 'dcvs_clip_max': 0.2, 'p': 0.1},
... {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1}]
>>> keys = ['K', 'dcvs_clip_max']
>>> # execute function
>>> cfgdict_sublist = get_cfgdict_list_subset(cfgdict_list, keys)
>>> # verify results
>>> result = ut.repr4(cfgdict_sublist)
>>> print(result)
[
{'K': 3, 'dcvs_clip_max': 0.1},
{'K': 5, 'dcvs_clip_max': 0.1},
{'K': 3, 'dcvs_clip_max': 0.2},
{'K': 5, 'dcvs_clip_max': 0.2},
]
"""
import utool as ut
cfgdict_sublist_ = [ut.dict_subset(cfgdict, keys) for cfgdict in cfgdict_list]
cfgtups_sublist_ = [tuple(ut.dict_to_keyvals(cfgdict)) for cfgdict in cfgdict_sublist_]
cfgtups_sublist = ut.unique_ordered(cfgtups_sublist_)
cfgdict_sublist = list(map(dict, cfgtups_sublist))
return cfgdict_sublist | [
"def",
"get_cfgdict_list_subset",
"(",
"cfgdict_list",
",",
"keys",
")",
":",
"import",
"utool",
"as",
"ut",
"cfgdict_sublist_",
"=",
"[",
"ut",
".",
"dict_subset",
"(",
"cfgdict",
",",
"keys",
")",
"for",
"cfgdict",
"in",
"cfgdict_list",
"]",
"cfgtups_sublist_",
"=",
"[",
"tuple",
"(",
"ut",
".",
"dict_to_keyvals",
"(",
"cfgdict",
")",
")",
"for",
"cfgdict",
"in",
"cfgdict_sublist_",
"]",
"cfgtups_sublist",
"=",
"ut",
".",
"unique_ordered",
"(",
"cfgtups_sublist_",
")",
"cfgdict_sublist",
"=",
"list",
"(",
"map",
"(",
"dict",
",",
"cfgtups_sublist",
")",
")",
"return",
"cfgdict_sublist"
] | r"""
returns list of unique dictionaries only with keys specified in keys
Args:
cfgdict_list (list):
keys (list):
Returns:
list: cfglbl_list
CommandLine:
python -m utool.util_gridsearch --test-get_cfgdict_list_subset
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> # build test data
>>> cfgdict_list = [
... {'K': 3, 'dcvs_clip_max': 0.1, 'p': 0.1},
... {'K': 5, 'dcvs_clip_max': 0.1, 'p': 0.1},
... {'K': 5, 'dcvs_clip_max': 0.1, 'p': 0.2},
... {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1},
... {'K': 5, 'dcvs_clip_max': 0.2, 'p': 0.1},
... {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1}]
>>> keys = ['K', 'dcvs_clip_max']
>>> # execute function
>>> cfgdict_sublist = get_cfgdict_list_subset(cfgdict_list, keys)
>>> # verify results
>>> result = ut.repr4(cfgdict_sublist)
>>> print(result)
[
{'K': 3, 'dcvs_clip_max': 0.1},
{'K': 5, 'dcvs_clip_max': 0.1},
{'K': 3, 'dcvs_clip_max': 0.2},
{'K': 5, 'dcvs_clip_max': 0.2},
] | [
"r",
"returns",
"list",
"of",
"unique",
"dictionaries",
"only",
"with",
"keys",
"specified",
"in",
"keys"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_gridsearch.py#L1944-L1988 | train |
Erotemic/utool | utool/util_gridsearch.py | constrain_cfgdict_list | def constrain_cfgdict_list(cfgdict_list_, constraint_func):
""" constrains configurations and removes duplicates """
cfgdict_list = []
for cfg_ in cfgdict_list_:
cfg = cfg_.copy()
if constraint_func(cfg) is not False and len(cfg) > 0:
if cfg not in cfgdict_list:
cfgdict_list.append(cfg)
return cfgdict_list | python | def constrain_cfgdict_list(cfgdict_list_, constraint_func):
""" constrains configurations and removes duplicates """
cfgdict_list = []
for cfg_ in cfgdict_list_:
cfg = cfg_.copy()
if constraint_func(cfg) is not False and len(cfg) > 0:
if cfg not in cfgdict_list:
cfgdict_list.append(cfg)
return cfgdict_list | [
"def",
"constrain_cfgdict_list",
"(",
"cfgdict_list_",
",",
"constraint_func",
")",
":",
"cfgdict_list",
"=",
"[",
"]",
"for",
"cfg_",
"in",
"cfgdict_list_",
":",
"cfg",
"=",
"cfg_",
".",
"copy",
"(",
")",
"if",
"constraint_func",
"(",
"cfg",
")",
"is",
"not",
"False",
"and",
"len",
"(",
"cfg",
")",
">",
"0",
":",
"if",
"cfg",
"not",
"in",
"cfgdict_list",
":",
"cfgdict_list",
".",
"append",
"(",
"cfg",
")",
"return",
"cfgdict_list"
] | constrains configurations and removes duplicates | [
"constrains",
"configurations",
"and",
"removes",
"duplicates"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_gridsearch.py#L1991-L1999 | train |
Erotemic/utool | utool/util_gridsearch.py | make_cfglbls | def make_cfglbls(cfgdict_list, varied_dict):
""" Show only the text in labels that mater from the cfgdict """
import textwrap
wrapper = textwrap.TextWrapper(width=50)
cfglbl_list = []
for cfgdict_ in cfgdict_list:
cfgdict = cfgdict_.copy()
for key in six.iterkeys(cfgdict_):
try:
vals = varied_dict[key]
# Dont print label if not varied
if len(vals) == 1:
del cfgdict[key]
else:
# Dont print label if it is None (irrelevant)
if cfgdict[key] is None:
del cfgdict[key]
except KeyError:
# Don't print keys not in varydict
del cfgdict[key]
cfglbl = six.text_type(cfgdict)
search_repl_list = [('\'', ''), ('}', ''),
('{', ''), (': ', '=')]
for search, repl in search_repl_list:
cfglbl = cfglbl.replace(search, repl)
#cfglbl = str(cfgdict).replace('\'', '').replace('}', '').replace('{', '').replace(': ', '=')
cfglbl = ('\n'.join(wrapper.wrap(cfglbl)))
cfglbl_list.append(cfglbl)
return cfglbl_list | python | def make_cfglbls(cfgdict_list, varied_dict):
""" Show only the text in labels that mater from the cfgdict """
import textwrap
wrapper = textwrap.TextWrapper(width=50)
cfglbl_list = []
for cfgdict_ in cfgdict_list:
cfgdict = cfgdict_.copy()
for key in six.iterkeys(cfgdict_):
try:
vals = varied_dict[key]
# Dont print label if not varied
if len(vals) == 1:
del cfgdict[key]
else:
# Dont print label if it is None (irrelevant)
if cfgdict[key] is None:
del cfgdict[key]
except KeyError:
# Don't print keys not in varydict
del cfgdict[key]
cfglbl = six.text_type(cfgdict)
search_repl_list = [('\'', ''), ('}', ''),
('{', ''), (': ', '=')]
for search, repl in search_repl_list:
cfglbl = cfglbl.replace(search, repl)
#cfglbl = str(cfgdict).replace('\'', '').replace('}', '').replace('{', '').replace(': ', '=')
cfglbl = ('\n'.join(wrapper.wrap(cfglbl)))
cfglbl_list.append(cfglbl)
return cfglbl_list | [
"def",
"make_cfglbls",
"(",
"cfgdict_list",
",",
"varied_dict",
")",
":",
"import",
"textwrap",
"wrapper",
"=",
"textwrap",
".",
"TextWrapper",
"(",
"width",
"=",
"50",
")",
"cfglbl_list",
"=",
"[",
"]",
"for",
"cfgdict_",
"in",
"cfgdict_list",
":",
"cfgdict",
"=",
"cfgdict_",
".",
"copy",
"(",
")",
"for",
"key",
"in",
"six",
".",
"iterkeys",
"(",
"cfgdict_",
")",
":",
"try",
":",
"vals",
"=",
"varied_dict",
"[",
"key",
"]",
"# Dont print label if not varied",
"if",
"len",
"(",
"vals",
")",
"==",
"1",
":",
"del",
"cfgdict",
"[",
"key",
"]",
"else",
":",
"# Dont print label if it is None (irrelevant)",
"if",
"cfgdict",
"[",
"key",
"]",
"is",
"None",
":",
"del",
"cfgdict",
"[",
"key",
"]",
"except",
"KeyError",
":",
"# Don't print keys not in varydict",
"del",
"cfgdict",
"[",
"key",
"]",
"cfglbl",
"=",
"six",
".",
"text_type",
"(",
"cfgdict",
")",
"search_repl_list",
"=",
"[",
"(",
"'\\''",
",",
"''",
")",
",",
"(",
"'}'",
",",
"''",
")",
",",
"(",
"'{'",
",",
"''",
")",
",",
"(",
"': '",
",",
"'='",
")",
"]",
"for",
"search",
",",
"repl",
"in",
"search_repl_list",
":",
"cfglbl",
"=",
"cfglbl",
".",
"replace",
"(",
"search",
",",
"repl",
")",
"#cfglbl = str(cfgdict).replace('\\'', '').replace('}', '').replace('{', '').replace(': ', '=')",
"cfglbl",
"=",
"(",
"'\\n'",
".",
"join",
"(",
"wrapper",
".",
"wrap",
"(",
"cfglbl",
")",
")",
")",
"cfglbl_list",
".",
"append",
"(",
"cfglbl",
")",
"return",
"cfglbl_list"
] | Show only the text in labels that mater from the cfgdict | [
"Show",
"only",
"the",
"text",
"in",
"labels",
"that",
"mater",
"from",
"the",
"cfgdict"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_gridsearch.py#L2002-L2030 | train |
Erotemic/utool | utool/util_gridsearch.py | gridsearch_timer | def gridsearch_timer(func_list, args_list, niters=None, **searchkw):
"""
Times a series of functions on a series of inputs
args_list is a list should vary the input sizes
can also be a func that take a count param
items in args_list list or returned by the func should be a tuple so it can be
unpacked
CommandLine:
python -m ibeis.annotmatch_funcs --exec-get_annotmatch_rowids_from_aid2 --show
python -m ibeis.annotmatch_funcs --exec-get_annotmatch_rowids_from_aid:1 --show
Args:
func_list (list):
args_list (list):
niters (None): (default = None)
Returns:
dict: time_result
CommandLine:
python -m utool.util_gridsearch --exec-gridsearch_timer --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> func_list = [ut.fibonacci_recursive, ut.fibonacci_iterative]
>>> args_list = list(range(1, 35))
>>> niters = None
>>> searchkw = {}
>>> time_result = gridsearch_timer(func_list, args_list, niters, **searchkw)
>>> result = ('time_result = %s' % (six.text_type(time_result),))
>>> print(result)
>>> time_result['plot_timings']()
>>> ut.show_if_requested()
"""
import utool as ut
timings = ut.ddict(list)
if niters is None:
niters = len(args_list)
if ut.is_funclike(args_list):
get_args = args_list
else:
get_args = args_list.__getitem__
#func_labels = searchkw.get('func_labels', list(range(len(func_list))))
func_labels = searchkw.get('func_labels', [ut.get_funcname(func) for func in func_list])
use_cache = searchkw.get('use_cache', not ut.get_argflag(('--nocache', '--nocache-time')))
assert_eq = searchkw.get('assert_eq', True)
count_list = list(range(niters))
xlabel_list = []
cache = ut.ShelfCacher('timeings.shelf', enabled=use_cache)
for count in ut.ProgressIter(count_list, lbl='Testing Timeings'):
args_ = get_args(count)
xlabel_list.append(args_)
if True:
# HACK
# There is an unhandled corner case that will fail if the function expects a tuple.
if not isinstance(args_, tuple):
args_ = (args_,)
assert isinstance(args_, tuple), 'args_ should be a tuple so it can be unpacked'
ret_list = []
for func_ in func_list:
try:
kwargs_ = {}
func_cachekey = ut.get_func_result_cachekey(func_, args_, kwargs_)
ellapsed = cache.load(func_cachekey)
except ut.CacheMissException:
with ut.Timer(verbose=False) as t:
ret = func_(*args_)
ret_list.append(ret)
ellapsed = t.ellapsed
cache.save(func_cachekey, ellapsed)
timings[func_].append(ellapsed)
if assert_eq:
# Hacky, not guarenteed to work if cache is one
ut.assert_all_eq(list(map(ut.cachestr_repr, ret_list)))
cache.close()
count_to_xtick = searchkw.get('count_to_xtick', lambda x, y: x)
xtick_list = [count_to_xtick(count, get_args(count)) for count in count_list]
def plot_timings():
import plottool as pt
ydata_list = ut.dict_take(timings, func_list)
xdata = xtick_list
ylabel = 'seconds'
xlabel = 'input size'
pt.multi_plot(
xdata, ydata_list, label_list=func_labels,
ylabel=ylabel, xlabel=xlabel,
**searchkw
)
time_result = {
'plot_timings': plot_timings,
'timings': timings,
}
return time_result | python | def gridsearch_timer(func_list, args_list, niters=None, **searchkw):
"""
Times a series of functions on a series of inputs
args_list is a list should vary the input sizes
can also be a func that take a count param
items in args_list list or returned by the func should be a tuple so it can be
unpacked
CommandLine:
python -m ibeis.annotmatch_funcs --exec-get_annotmatch_rowids_from_aid2 --show
python -m ibeis.annotmatch_funcs --exec-get_annotmatch_rowids_from_aid:1 --show
Args:
func_list (list):
args_list (list):
niters (None): (default = None)
Returns:
dict: time_result
CommandLine:
python -m utool.util_gridsearch --exec-gridsearch_timer --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> func_list = [ut.fibonacci_recursive, ut.fibonacci_iterative]
>>> args_list = list(range(1, 35))
>>> niters = None
>>> searchkw = {}
>>> time_result = gridsearch_timer(func_list, args_list, niters, **searchkw)
>>> result = ('time_result = %s' % (six.text_type(time_result),))
>>> print(result)
>>> time_result['plot_timings']()
>>> ut.show_if_requested()
"""
import utool as ut
timings = ut.ddict(list)
if niters is None:
niters = len(args_list)
if ut.is_funclike(args_list):
get_args = args_list
else:
get_args = args_list.__getitem__
#func_labels = searchkw.get('func_labels', list(range(len(func_list))))
func_labels = searchkw.get('func_labels', [ut.get_funcname(func) for func in func_list])
use_cache = searchkw.get('use_cache', not ut.get_argflag(('--nocache', '--nocache-time')))
assert_eq = searchkw.get('assert_eq', True)
count_list = list(range(niters))
xlabel_list = []
cache = ut.ShelfCacher('timeings.shelf', enabled=use_cache)
for count in ut.ProgressIter(count_list, lbl='Testing Timeings'):
args_ = get_args(count)
xlabel_list.append(args_)
if True:
# HACK
# There is an unhandled corner case that will fail if the function expects a tuple.
if not isinstance(args_, tuple):
args_ = (args_,)
assert isinstance(args_, tuple), 'args_ should be a tuple so it can be unpacked'
ret_list = []
for func_ in func_list:
try:
kwargs_ = {}
func_cachekey = ut.get_func_result_cachekey(func_, args_, kwargs_)
ellapsed = cache.load(func_cachekey)
except ut.CacheMissException:
with ut.Timer(verbose=False) as t:
ret = func_(*args_)
ret_list.append(ret)
ellapsed = t.ellapsed
cache.save(func_cachekey, ellapsed)
timings[func_].append(ellapsed)
if assert_eq:
# Hacky, not guarenteed to work if cache is one
ut.assert_all_eq(list(map(ut.cachestr_repr, ret_list)))
cache.close()
count_to_xtick = searchkw.get('count_to_xtick', lambda x, y: x)
xtick_list = [count_to_xtick(count, get_args(count)) for count in count_list]
def plot_timings():
import plottool as pt
ydata_list = ut.dict_take(timings, func_list)
xdata = xtick_list
ylabel = 'seconds'
xlabel = 'input size'
pt.multi_plot(
xdata, ydata_list, label_list=func_labels,
ylabel=ylabel, xlabel=xlabel,
**searchkw
)
time_result = {
'plot_timings': plot_timings,
'timings': timings,
}
return time_result | [
"def",
"gridsearch_timer",
"(",
"func_list",
",",
"args_list",
",",
"niters",
"=",
"None",
",",
"*",
"*",
"searchkw",
")",
":",
"import",
"utool",
"as",
"ut",
"timings",
"=",
"ut",
".",
"ddict",
"(",
"list",
")",
"if",
"niters",
"is",
"None",
":",
"niters",
"=",
"len",
"(",
"args_list",
")",
"if",
"ut",
".",
"is_funclike",
"(",
"args_list",
")",
":",
"get_args",
"=",
"args_list",
"else",
":",
"get_args",
"=",
"args_list",
".",
"__getitem__",
"#func_labels = searchkw.get('func_labels', list(range(len(func_list))))",
"func_labels",
"=",
"searchkw",
".",
"get",
"(",
"'func_labels'",
",",
"[",
"ut",
".",
"get_funcname",
"(",
"func",
")",
"for",
"func",
"in",
"func_list",
"]",
")",
"use_cache",
"=",
"searchkw",
".",
"get",
"(",
"'use_cache'",
",",
"not",
"ut",
".",
"get_argflag",
"(",
"(",
"'--nocache'",
",",
"'--nocache-time'",
")",
")",
")",
"assert_eq",
"=",
"searchkw",
".",
"get",
"(",
"'assert_eq'",
",",
"True",
")",
"count_list",
"=",
"list",
"(",
"range",
"(",
"niters",
")",
")",
"xlabel_list",
"=",
"[",
"]",
"cache",
"=",
"ut",
".",
"ShelfCacher",
"(",
"'timeings.shelf'",
",",
"enabled",
"=",
"use_cache",
")",
"for",
"count",
"in",
"ut",
".",
"ProgressIter",
"(",
"count_list",
",",
"lbl",
"=",
"'Testing Timeings'",
")",
":",
"args_",
"=",
"get_args",
"(",
"count",
")",
"xlabel_list",
".",
"append",
"(",
"args_",
")",
"if",
"True",
":",
"# HACK",
"# There is an unhandled corner case that will fail if the function expects a tuple.",
"if",
"not",
"isinstance",
"(",
"args_",
",",
"tuple",
")",
":",
"args_",
"=",
"(",
"args_",
",",
")",
"assert",
"isinstance",
"(",
"args_",
",",
"tuple",
")",
",",
"'args_ should be a tuple so it can be unpacked'",
"ret_list",
"=",
"[",
"]",
"for",
"func_",
"in",
"func_list",
":",
"try",
":",
"kwargs_",
"=",
"{",
"}",
"func_cachekey",
"=",
"ut",
".",
"get_func_result_cachekey",
"(",
"func_",
",",
"args_",
",",
"kwargs_",
")",
"ellapsed",
"=",
"cache",
".",
"load",
"(",
"func_cachekey",
")",
"except",
"ut",
".",
"CacheMissException",
":",
"with",
"ut",
".",
"Timer",
"(",
"verbose",
"=",
"False",
")",
"as",
"t",
":",
"ret",
"=",
"func_",
"(",
"*",
"args_",
")",
"ret_list",
".",
"append",
"(",
"ret",
")",
"ellapsed",
"=",
"t",
".",
"ellapsed",
"cache",
".",
"save",
"(",
"func_cachekey",
",",
"ellapsed",
")",
"timings",
"[",
"func_",
"]",
".",
"append",
"(",
"ellapsed",
")",
"if",
"assert_eq",
":",
"# Hacky, not guarenteed to work if cache is one",
"ut",
".",
"assert_all_eq",
"(",
"list",
"(",
"map",
"(",
"ut",
".",
"cachestr_repr",
",",
"ret_list",
")",
")",
")",
"cache",
".",
"close",
"(",
")",
"count_to_xtick",
"=",
"searchkw",
".",
"get",
"(",
"'count_to_xtick'",
",",
"lambda",
"x",
",",
"y",
":",
"x",
")",
"xtick_list",
"=",
"[",
"count_to_xtick",
"(",
"count",
",",
"get_args",
"(",
"count",
")",
")",
"for",
"count",
"in",
"count_list",
"]",
"def",
"plot_timings",
"(",
")",
":",
"import",
"plottool",
"as",
"pt",
"ydata_list",
"=",
"ut",
".",
"dict_take",
"(",
"timings",
",",
"func_list",
")",
"xdata",
"=",
"xtick_list",
"ylabel",
"=",
"'seconds'",
"xlabel",
"=",
"'input size'",
"pt",
".",
"multi_plot",
"(",
"xdata",
",",
"ydata_list",
",",
"label_list",
"=",
"func_labels",
",",
"ylabel",
"=",
"ylabel",
",",
"xlabel",
"=",
"xlabel",
",",
"*",
"*",
"searchkw",
")",
"time_result",
"=",
"{",
"'plot_timings'",
":",
"plot_timings",
",",
"'timings'",
":",
"timings",
",",
"}",
"return",
"time_result"
] | Times a series of functions on a series of inputs
args_list is a list should vary the input sizes
can also be a func that take a count param
items in args_list list or returned by the func should be a tuple so it can be
unpacked
CommandLine:
python -m ibeis.annotmatch_funcs --exec-get_annotmatch_rowids_from_aid2 --show
python -m ibeis.annotmatch_funcs --exec-get_annotmatch_rowids_from_aid:1 --show
Args:
func_list (list):
args_list (list):
niters (None): (default = None)
Returns:
dict: time_result
CommandLine:
python -m utool.util_gridsearch --exec-gridsearch_timer --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> func_list = [ut.fibonacci_recursive, ut.fibonacci_iterative]
>>> args_list = list(range(1, 35))
>>> niters = None
>>> searchkw = {}
>>> time_result = gridsearch_timer(func_list, args_list, niters, **searchkw)
>>> result = ('time_result = %s' % (six.text_type(time_result),))
>>> print(result)
>>> time_result['plot_timings']()
>>> ut.show_if_requested() | [
"Times",
"a",
"series",
"of",
"functions",
"on",
"a",
"series",
"of",
"inputs"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_gridsearch.py#L2120-L2227 | train |
Thermondo/django-heroku-connect | heroku_connect/utils.py | get_mapping | def get_mapping(version=1, exported_at=None, app_name=None):
"""
Return Heroku Connect mapping for the entire project.
Args:
version (int): Version of the Heroku Connect mapping, default: ``1``.
exported_at (datetime.datetime): Time the export was created, default is ``now()``.
app_name (str): Name of Heroku application associated with Heroku Connect the add-on.
Returns:
dict: Heroku Connect mapping.
Note:
The version does not need to be incremented. Exports from the Heroku Connect
website will always have the version number ``1``.
"""
if exported_at is None:
exported_at = timezone.now()
app_name = app_name or settings.HEROKU_CONNECT_APP_NAME
return {
'version': version,
'connection': {
'organization_id': settings.HEROKU_CONNECT_ORGANIZATION_ID,
'app_name': app_name,
'exported_at': exported_at.isoformat(),
},
'mappings': [
model.get_heroku_connect_mapping()
for model in get_heroku_connect_models()
]
} | python | def get_mapping(version=1, exported_at=None, app_name=None):
"""
Return Heroku Connect mapping for the entire project.
Args:
version (int): Version of the Heroku Connect mapping, default: ``1``.
exported_at (datetime.datetime): Time the export was created, default is ``now()``.
app_name (str): Name of Heroku application associated with Heroku Connect the add-on.
Returns:
dict: Heroku Connect mapping.
Note:
The version does not need to be incremented. Exports from the Heroku Connect
website will always have the version number ``1``.
"""
if exported_at is None:
exported_at = timezone.now()
app_name = app_name or settings.HEROKU_CONNECT_APP_NAME
return {
'version': version,
'connection': {
'organization_id': settings.HEROKU_CONNECT_ORGANIZATION_ID,
'app_name': app_name,
'exported_at': exported_at.isoformat(),
},
'mappings': [
model.get_heroku_connect_mapping()
for model in get_heroku_connect_models()
]
} | [
"def",
"get_mapping",
"(",
"version",
"=",
"1",
",",
"exported_at",
"=",
"None",
",",
"app_name",
"=",
"None",
")",
":",
"if",
"exported_at",
"is",
"None",
":",
"exported_at",
"=",
"timezone",
".",
"now",
"(",
")",
"app_name",
"=",
"app_name",
"or",
"settings",
".",
"HEROKU_CONNECT_APP_NAME",
"return",
"{",
"'version'",
":",
"version",
",",
"'connection'",
":",
"{",
"'organization_id'",
":",
"settings",
".",
"HEROKU_CONNECT_ORGANIZATION_ID",
",",
"'app_name'",
":",
"app_name",
",",
"'exported_at'",
":",
"exported_at",
".",
"isoformat",
"(",
")",
",",
"}",
",",
"'mappings'",
":",
"[",
"model",
".",
"get_heroku_connect_mapping",
"(",
")",
"for",
"model",
"in",
"get_heroku_connect_models",
"(",
")",
"]",
"}"
] | Return Heroku Connect mapping for the entire project.
Args:
version (int): Version of the Heroku Connect mapping, default: ``1``.
exported_at (datetime.datetime): Time the export was created, default is ``now()``.
app_name (str): Name of Heroku application associated with Heroku Connect the add-on.
Returns:
dict: Heroku Connect mapping.
Note:
The version does not need to be incremented. Exports from the Heroku Connect
website will always have the version number ``1``. | [
"Return",
"Heroku",
"Connect",
"mapping",
"for",
"the",
"entire",
"project",
"."
] | f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5 | https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/utils.py#L30-L61 | train |
Thermondo/django-heroku-connect | heroku_connect/utils.py | get_heroku_connect_models | def get_heroku_connect_models():
"""
Return all registered Heroku Connect Models.
Returns:
(Iterator):
All registered models that are subclasses of `.HerokuConnectModel`.
Abstract models are excluded, since they are not registered.
"""
from django.apps import apps
apps.check_models_ready()
from heroku_connect.db.models import HerokuConnectModel
return (
model
for models in apps.all_models.values()
for model in models.values()
if issubclass(model, HerokuConnectModel)
and not model._meta.managed
) | python | def get_heroku_connect_models():
"""
Return all registered Heroku Connect Models.
Returns:
(Iterator):
All registered models that are subclasses of `.HerokuConnectModel`.
Abstract models are excluded, since they are not registered.
"""
from django.apps import apps
apps.check_models_ready()
from heroku_connect.db.models import HerokuConnectModel
return (
model
for models in apps.all_models.values()
for model in models.values()
if issubclass(model, HerokuConnectModel)
and not model._meta.managed
) | [
"def",
"get_heroku_connect_models",
"(",
")",
":",
"from",
"django",
".",
"apps",
"import",
"apps",
"apps",
".",
"check_models_ready",
"(",
")",
"from",
"heroku_connect",
".",
"db",
".",
"models",
"import",
"HerokuConnectModel",
"return",
"(",
"model",
"for",
"models",
"in",
"apps",
".",
"all_models",
".",
"values",
"(",
")",
"for",
"model",
"in",
"models",
".",
"values",
"(",
")",
"if",
"issubclass",
"(",
"model",
",",
"HerokuConnectModel",
")",
"and",
"not",
"model",
".",
"_meta",
".",
"managed",
")"
] | Return all registered Heroku Connect Models.
Returns:
(Iterator):
All registered models that are subclasses of `.HerokuConnectModel`.
Abstract models are excluded, since they are not registered. | [
"Return",
"all",
"registered",
"Heroku",
"Connect",
"Models",
"."
] | f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5 | https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/utils.py#L64-L84 | train |
Thermondo/django-heroku-connect | heroku_connect/utils.py | create_heroku_connect_schema | def create_heroku_connect_schema(using=DEFAULT_DB_ALIAS):
"""
Create Heroku Connect schema.
Note:
This function is only meant to be used for local development.
In a production environment the schema will be created by
Heroku Connect.
Args:
using (str): Alias for database connection.
Returns:
bool: ``True`` if the schema was created, ``False`` if the
schema already exists.
"""
connection = connections[using]
with connection.cursor() as cursor:
cursor.execute(_SCHEMA_EXISTS_QUERY, [settings.HEROKU_CONNECT_SCHEMA])
schema_exists = cursor.fetchone()[0]
if schema_exists:
return False
cursor.execute("CREATE SCHEMA %s;", [AsIs(settings.HEROKU_CONNECT_SCHEMA)])
with connection.schema_editor() as editor:
for model in get_heroku_connect_models():
editor.create_model(model)
# Needs PostgreSQL and database superuser privileges (which is the case on Heroku):
editor.execute('CREATE EXTENSION IF NOT EXISTS "hstore";')
from heroku_connect.models import (TriggerLog, TriggerLogArchive)
for cls in [TriggerLog, TriggerLogArchive]:
editor.create_model(cls)
return True | python | def create_heroku_connect_schema(using=DEFAULT_DB_ALIAS):
"""
Create Heroku Connect schema.
Note:
This function is only meant to be used for local development.
In a production environment the schema will be created by
Heroku Connect.
Args:
using (str): Alias for database connection.
Returns:
bool: ``True`` if the schema was created, ``False`` if the
schema already exists.
"""
connection = connections[using]
with connection.cursor() as cursor:
cursor.execute(_SCHEMA_EXISTS_QUERY, [settings.HEROKU_CONNECT_SCHEMA])
schema_exists = cursor.fetchone()[0]
if schema_exists:
return False
cursor.execute("CREATE SCHEMA %s;", [AsIs(settings.HEROKU_CONNECT_SCHEMA)])
with connection.schema_editor() as editor:
for model in get_heroku_connect_models():
editor.create_model(model)
# Needs PostgreSQL and database superuser privileges (which is the case on Heroku):
editor.execute('CREATE EXTENSION IF NOT EXISTS "hstore";')
from heroku_connect.models import (TriggerLog, TriggerLogArchive)
for cls in [TriggerLog, TriggerLogArchive]:
editor.create_model(cls)
return True | [
"def",
"create_heroku_connect_schema",
"(",
"using",
"=",
"DEFAULT_DB_ALIAS",
")",
":",
"connection",
"=",
"connections",
"[",
"using",
"]",
"with",
"connection",
".",
"cursor",
"(",
")",
"as",
"cursor",
":",
"cursor",
".",
"execute",
"(",
"_SCHEMA_EXISTS_QUERY",
",",
"[",
"settings",
".",
"HEROKU_CONNECT_SCHEMA",
"]",
")",
"schema_exists",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"[",
"0",
"]",
"if",
"schema_exists",
":",
"return",
"False",
"cursor",
".",
"execute",
"(",
"\"CREATE SCHEMA %s;\"",
",",
"[",
"AsIs",
"(",
"settings",
".",
"HEROKU_CONNECT_SCHEMA",
")",
"]",
")",
"with",
"connection",
".",
"schema_editor",
"(",
")",
"as",
"editor",
":",
"for",
"model",
"in",
"get_heroku_connect_models",
"(",
")",
":",
"editor",
".",
"create_model",
"(",
"model",
")",
"# Needs PostgreSQL and database superuser privileges (which is the case on Heroku):",
"editor",
".",
"execute",
"(",
"'CREATE EXTENSION IF NOT EXISTS \"hstore\";'",
")",
"from",
"heroku_connect",
".",
"models",
"import",
"(",
"TriggerLog",
",",
"TriggerLogArchive",
")",
"for",
"cls",
"in",
"[",
"TriggerLog",
",",
"TriggerLogArchive",
"]",
":",
"editor",
".",
"create_model",
"(",
"cls",
")",
"return",
"True"
] | Create Heroku Connect schema.
Note:
This function is only meant to be used for local development.
In a production environment the schema will be created by
Heroku Connect.
Args:
using (str): Alias for database connection.
Returns:
bool: ``True`` if the schema was created, ``False`` if the
schema already exists. | [
"Create",
"Heroku",
"Connect",
"schema",
"."
] | f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5 | https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/utils.py#L105-L142 | train |
Thermondo/django-heroku-connect | heroku_connect/utils.py | get_connections | def get_connections(app):
"""
Return all Heroku Connect connections setup with the given application.
For more details check the link -
https://devcenter.heroku.com/articles/heroku-connect-api#step-4-retrieve-the-new-connection-s-id
Sample response from the API call is below::
{
"count": 1,
"results":[{
"id": "<connection_id>",
"name": "<app_name>",
"resource_name": "<resource_name>",
…
}],
…
}
Args:
app (str): Heroku application name.
Returns:
List[dict]: List of all Heroku Connect connections associated with the Heroku application.
Raises:
requests.HTTPError: If an error occurred when accessing the connections API.
ValueError: If response is not a valid JSON.
"""
payload = {'app': app}
url = os.path.join(settings.HEROKU_CONNECT_API_ENDPOINT, 'connections')
response = requests.get(url, params=payload, headers=_get_authorization_headers())
response.raise_for_status()
return response.json()['results'] | python | def get_connections(app):
"""
Return all Heroku Connect connections setup with the given application.
For more details check the link -
https://devcenter.heroku.com/articles/heroku-connect-api#step-4-retrieve-the-new-connection-s-id
Sample response from the API call is below::
{
"count": 1,
"results":[{
"id": "<connection_id>",
"name": "<app_name>",
"resource_name": "<resource_name>",
…
}],
…
}
Args:
app (str): Heroku application name.
Returns:
List[dict]: List of all Heroku Connect connections associated with the Heroku application.
Raises:
requests.HTTPError: If an error occurred when accessing the connections API.
ValueError: If response is not a valid JSON.
"""
payload = {'app': app}
url = os.path.join(settings.HEROKU_CONNECT_API_ENDPOINT, 'connections')
response = requests.get(url, params=payload, headers=_get_authorization_headers())
response.raise_for_status()
return response.json()['results'] | [
"def",
"get_connections",
"(",
"app",
")",
":",
"payload",
"=",
"{",
"'app'",
":",
"app",
"}",
"url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"settings",
".",
"HEROKU_CONNECT_API_ENDPOINT",
",",
"'connections'",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"params",
"=",
"payload",
",",
"headers",
"=",
"_get_authorization_headers",
"(",
")",
")",
"response",
".",
"raise_for_status",
"(",
")",
"return",
"response",
".",
"json",
"(",
")",
"[",
"'results'",
"]"
] | Return all Heroku Connect connections setup with the given application.
For more details check the link -
https://devcenter.heroku.com/articles/heroku-connect-api#step-4-retrieve-the-new-connection-s-id
Sample response from the API call is below::
{
"count": 1,
"results":[{
"id": "<connection_id>",
"name": "<app_name>",
"resource_name": "<resource_name>",
…
}],
…
}
Args:
app (str): Heroku application name.
Returns:
List[dict]: List of all Heroku Connect connections associated with the Heroku application.
Raises:
requests.HTTPError: If an error occurred when accessing the connections API.
ValueError: If response is not a valid JSON. | [
"Return",
"all",
"Heroku",
"Connect",
"connections",
"setup",
"with",
"the",
"given",
"application",
"."
] | f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5 | https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/utils.py#L151-L186 | train |
Thermondo/django-heroku-connect | heroku_connect/utils.py | get_connection | def get_connection(connection_id, deep=False):
"""
Get Heroku Connection connection information.
For more details check the link -
https://devcenter.heroku.com/articles/heroku-connect-api#step-8-monitor-the-connection-and-mapping-status
Sample response from API call is below::
{
"id": "<connection_id>",
"name": "<app_name>",
"resource_name": "<resource_name>",
"schema_name": "salesforce",
"db_key": "DATABASE_URL",
"state": "IDLE",
"mappings":[
{
"id": "<mapping_id>",
"object_name": "Account",
"state": "SCHEMA_CHANGED",
…
},
{
"id": "<mapping_id>",
"object_name": "Contact",
"state": "SCHEMA_CHANGED",
…
},
…
]
…
}
Args:
connection_id (str): ID for Heroku Connect's connection.
deep (bool): Return information about the connection’s mappings,
in addition to the connection itself. Defaults to ``False``.
Returns:
dict: Heroku Connection connection information.
Raises:
requests.HTTPError: If an error occurred when accessing the connection detail API.
ValueError: If response is not a valid JSON.
"""
url = os.path.join(settings.HEROKU_CONNECT_API_ENDPOINT, 'connections', connection_id)
payload = {'deep': deep}
response = requests.get(url, params=payload, headers=_get_authorization_headers())
response.raise_for_status()
return response.json() | python | def get_connection(connection_id, deep=False):
"""
Get Heroku Connection connection information.
For more details check the link -
https://devcenter.heroku.com/articles/heroku-connect-api#step-8-monitor-the-connection-and-mapping-status
Sample response from API call is below::
{
"id": "<connection_id>",
"name": "<app_name>",
"resource_name": "<resource_name>",
"schema_name": "salesforce",
"db_key": "DATABASE_URL",
"state": "IDLE",
"mappings":[
{
"id": "<mapping_id>",
"object_name": "Account",
"state": "SCHEMA_CHANGED",
…
},
{
"id": "<mapping_id>",
"object_name": "Contact",
"state": "SCHEMA_CHANGED",
…
},
…
]
…
}
Args:
connection_id (str): ID for Heroku Connect's connection.
deep (bool): Return information about the connection’s mappings,
in addition to the connection itself. Defaults to ``False``.
Returns:
dict: Heroku Connection connection information.
Raises:
requests.HTTPError: If an error occurred when accessing the connection detail API.
ValueError: If response is not a valid JSON.
"""
url = os.path.join(settings.HEROKU_CONNECT_API_ENDPOINT, 'connections', connection_id)
payload = {'deep': deep}
response = requests.get(url, params=payload, headers=_get_authorization_headers())
response.raise_for_status()
return response.json() | [
"def",
"get_connection",
"(",
"connection_id",
",",
"deep",
"=",
"False",
")",
":",
"url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"settings",
".",
"HEROKU_CONNECT_API_ENDPOINT",
",",
"'connections'",
",",
"connection_id",
")",
"payload",
"=",
"{",
"'deep'",
":",
"deep",
"}",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"params",
"=",
"payload",
",",
"headers",
"=",
"_get_authorization_headers",
"(",
")",
")",
"response",
".",
"raise_for_status",
"(",
")",
"return",
"response",
".",
"json",
"(",
")"
] | Get Heroku Connection connection information.
For more details check the link -
https://devcenter.heroku.com/articles/heroku-connect-api#step-8-monitor-the-connection-and-mapping-status
Sample response from API call is below::
{
"id": "<connection_id>",
"name": "<app_name>",
"resource_name": "<resource_name>",
"schema_name": "salesforce",
"db_key": "DATABASE_URL",
"state": "IDLE",
"mappings":[
{
"id": "<mapping_id>",
"object_name": "Account",
"state": "SCHEMA_CHANGED",
…
},
{
"id": "<mapping_id>",
"object_name": "Contact",
"state": "SCHEMA_CHANGED",
…
},
…
]
…
}
Args:
connection_id (str): ID for Heroku Connect's connection.
deep (bool): Return information about the connection’s mappings,
in addition to the connection itself. Defaults to ``False``.
Returns:
dict: Heroku Connection connection information.
Raises:
requests.HTTPError: If an error occurred when accessing the connection detail API.
ValueError: If response is not a valid JSON. | [
"Get",
"Heroku",
"Connection",
"connection",
"information",
"."
] | f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5 | https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/utils.py#L189-L240 | train |
Thermondo/django-heroku-connect | heroku_connect/utils.py | import_mapping | def import_mapping(connection_id, mapping):
"""
Import Heroku Connection mapping for given connection.
Args:
connection_id (str): Heroku Connection connection ID.
mapping (dict): Heroku Connect mapping.
Raises:
requests.HTTPError: If an error occurs uploading the mapping.
ValueError: If the mapping is not JSON serializable.
"""
url = os.path.join(settings.HEROKU_CONNECT_API_ENDPOINT,
'connections', connection_id, 'actions', 'import')
response = requests.post(
url=url,
json=mapping,
headers=_get_authorization_headers()
)
response.raise_for_status() | python | def import_mapping(connection_id, mapping):
"""
Import Heroku Connection mapping for given connection.
Args:
connection_id (str): Heroku Connection connection ID.
mapping (dict): Heroku Connect mapping.
Raises:
requests.HTTPError: If an error occurs uploading the mapping.
ValueError: If the mapping is not JSON serializable.
"""
url = os.path.join(settings.HEROKU_CONNECT_API_ENDPOINT,
'connections', connection_id, 'actions', 'import')
response = requests.post(
url=url,
json=mapping,
headers=_get_authorization_headers()
)
response.raise_for_status() | [
"def",
"import_mapping",
"(",
"connection_id",
",",
"mapping",
")",
":",
"url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"settings",
".",
"HEROKU_CONNECT_API_ENDPOINT",
",",
"'connections'",
",",
"connection_id",
",",
"'actions'",
",",
"'import'",
")",
"response",
"=",
"requests",
".",
"post",
"(",
"url",
"=",
"url",
",",
"json",
"=",
"mapping",
",",
"headers",
"=",
"_get_authorization_headers",
"(",
")",
")",
"response",
".",
"raise_for_status",
"(",
")"
] | Import Heroku Connection mapping for given connection.
Args:
connection_id (str): Heroku Connection connection ID.
mapping (dict): Heroku Connect mapping.
Raises:
requests.HTTPError: If an error occurs uploading the mapping.
ValueError: If the mapping is not JSON serializable. | [
"Import",
"Heroku",
"Connection",
"mapping",
"for",
"given",
"connection",
"."
] | f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5 | https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/utils.py#L243-L264 | train |
Thermondo/django-heroku-connect | heroku_connect/utils.py | link_connection_to_account | def link_connection_to_account(app):
"""
Link the connection to your Heroku user account.
https://devcenter.heroku.com/articles/heroku-connect-api#step-3-link-the-connection-to-your-heroku-user-account
"""
url = os.path.join(settings.HEROKU_CONNECT_API_ENDPOINT, 'users', 'me', 'apps', app, 'auth')
response = requests.post(
url=url,
headers=_get_authorization_headers()
)
response.raise_for_status() | python | def link_connection_to_account(app):
"""
Link the connection to your Heroku user account.
https://devcenter.heroku.com/articles/heroku-connect-api#step-3-link-the-connection-to-your-heroku-user-account
"""
url = os.path.join(settings.HEROKU_CONNECT_API_ENDPOINT, 'users', 'me', 'apps', app, 'auth')
response = requests.post(
url=url,
headers=_get_authorization_headers()
)
response.raise_for_status() | [
"def",
"link_connection_to_account",
"(",
"app",
")",
":",
"url",
"=",
"os",
".",
"path",
".",
"join",
"(",
"settings",
".",
"HEROKU_CONNECT_API_ENDPOINT",
",",
"'users'",
",",
"'me'",
",",
"'apps'",
",",
"app",
",",
"'auth'",
")",
"response",
"=",
"requests",
".",
"post",
"(",
"url",
"=",
"url",
",",
"headers",
"=",
"_get_authorization_headers",
"(",
")",
")",
"response",
".",
"raise_for_status",
"(",
")"
] | Link the connection to your Heroku user account.
https://devcenter.heroku.com/articles/heroku-connect-api#step-3-link-the-connection-to-your-heroku-user-account | [
"Link",
"the",
"connection",
"to",
"your",
"Heroku",
"user",
"account",
"."
] | f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5 | https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/utils.py#L267-L278 | train |
glormph/msstitch | src/app/readers/spectra.py | fetch_cvparams_values_from_subel | def fetch_cvparams_values_from_subel(base, subelname, paramnames, ns):
"""Searches a base element for subelement by name, then takes the
cvParams of that subelement and returns the values as a list
for the paramnames that match. Value order in list equals input
paramnames order."""
sub_el = basereader.find_element_xpath(base, subelname, ns)
cvparams = get_all_cvparams(sub_el, ns)
output = []
for param in paramnames:
output.append(fetch_cvparam_value_by_name(cvparams, param))
return output | python | def fetch_cvparams_values_from_subel(base, subelname, paramnames, ns):
"""Searches a base element for subelement by name, then takes the
cvParams of that subelement and returns the values as a list
for the paramnames that match. Value order in list equals input
paramnames order."""
sub_el = basereader.find_element_xpath(base, subelname, ns)
cvparams = get_all_cvparams(sub_el, ns)
output = []
for param in paramnames:
output.append(fetch_cvparam_value_by_name(cvparams, param))
return output | [
"def",
"fetch_cvparams_values_from_subel",
"(",
"base",
",",
"subelname",
",",
"paramnames",
",",
"ns",
")",
":",
"sub_el",
"=",
"basereader",
".",
"find_element_xpath",
"(",
"base",
",",
"subelname",
",",
"ns",
")",
"cvparams",
"=",
"get_all_cvparams",
"(",
"sub_el",
",",
"ns",
")",
"output",
"=",
"[",
"]",
"for",
"param",
"in",
"paramnames",
":",
"output",
".",
"append",
"(",
"fetch_cvparam_value_by_name",
"(",
"cvparams",
",",
"param",
")",
")",
"return",
"output"
] | Searches a base element for subelement by name, then takes the
cvParams of that subelement and returns the values as a list
for the paramnames that match. Value order in list equals input
paramnames order. | [
"Searches",
"a",
"base",
"element",
"for",
"subelement",
"by",
"name",
"then",
"takes",
"the",
"cvParams",
"of",
"that",
"subelement",
"and",
"returns",
"the",
"values",
"as",
"a",
"list",
"for",
"the",
"paramnames",
"that",
"match",
".",
"Value",
"order",
"in",
"list",
"equals",
"input",
"paramnames",
"order",
"."
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/readers/spectra.py#L39-L49 | train |
glormph/msstitch | src/app/lookups/sqlite/base.py | DatabaseConnection.create_tables | def create_tables(self, tables):
"""Creates database tables in sqlite lookup db"""
cursor = self.get_cursor()
for table in tables:
columns = mslookup_tables[table]
try:
cursor.execute('CREATE TABLE {0}({1})'.format(
table, ', '.join(columns)))
except sqlite3.OperationalError as error:
print(error)
print('Warning: Table {} already exists in database, will '
'add to existing tables instead of creating '
'new.'.format(table))
else:
self.conn.commit() | python | def create_tables(self, tables):
"""Creates database tables in sqlite lookup db"""
cursor = self.get_cursor()
for table in tables:
columns = mslookup_tables[table]
try:
cursor.execute('CREATE TABLE {0}({1})'.format(
table, ', '.join(columns)))
except sqlite3.OperationalError as error:
print(error)
print('Warning: Table {} already exists in database, will '
'add to existing tables instead of creating '
'new.'.format(table))
else:
self.conn.commit() | [
"def",
"create_tables",
"(",
"self",
",",
"tables",
")",
":",
"cursor",
"=",
"self",
".",
"get_cursor",
"(",
")",
"for",
"table",
"in",
"tables",
":",
"columns",
"=",
"mslookup_tables",
"[",
"table",
"]",
"try",
":",
"cursor",
".",
"execute",
"(",
"'CREATE TABLE {0}({1})'",
".",
"format",
"(",
"table",
",",
"', '",
".",
"join",
"(",
"columns",
")",
")",
")",
"except",
"sqlite3",
".",
"OperationalError",
"as",
"error",
":",
"print",
"(",
"error",
")",
"print",
"(",
"'Warning: Table {} already exists in database, will '",
"'add to existing tables instead of creating '",
"'new.'",
".",
"format",
"(",
"table",
")",
")",
"else",
":",
"self",
".",
"conn",
".",
"commit",
"(",
")"
] | Creates database tables in sqlite lookup db | [
"Creates",
"database",
"tables",
"in",
"sqlite",
"lookup",
"db"
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/lookups/sqlite/base.py#L368-L382 | train |
glormph/msstitch | src/app/lookups/sqlite/base.py | DatabaseConnection.connect | def connect(self, fn):
"""SQLite connect method initialize db"""
self.conn = sqlite3.connect(fn)
cur = self.get_cursor()
cur.execute('PRAGMA page_size=4096')
cur.execute('PRAGMA FOREIGN_KEYS=ON')
cur.execute('PRAGMA cache_size=10000')
cur.execute('PRAGMA journal_mode=MEMORY') | python | def connect(self, fn):
"""SQLite connect method initialize db"""
self.conn = sqlite3.connect(fn)
cur = self.get_cursor()
cur.execute('PRAGMA page_size=4096')
cur.execute('PRAGMA FOREIGN_KEYS=ON')
cur.execute('PRAGMA cache_size=10000')
cur.execute('PRAGMA journal_mode=MEMORY') | [
"def",
"connect",
"(",
"self",
",",
"fn",
")",
":",
"self",
".",
"conn",
"=",
"sqlite3",
".",
"connect",
"(",
"fn",
")",
"cur",
"=",
"self",
".",
"get_cursor",
"(",
")",
"cur",
".",
"execute",
"(",
"'PRAGMA page_size=4096'",
")",
"cur",
".",
"execute",
"(",
"'PRAGMA FOREIGN_KEYS=ON'",
")",
"cur",
".",
"execute",
"(",
"'PRAGMA cache_size=10000'",
")",
"cur",
".",
"execute",
"(",
"'PRAGMA journal_mode=MEMORY'",
")"
] | SQLite connect method initialize db | [
"SQLite",
"connect",
"method",
"initialize",
"db"
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/lookups/sqlite/base.py#L384-L391 | train |
glormph/msstitch | src/app/lookups/sqlite/base.py | DatabaseConnection.index_column | def index_column(self, index_name, table, column):
"""Called by interfaces to index specific column in table"""
cursor = self.get_cursor()
try:
cursor.execute(
'CREATE INDEX {0} on {1}({2})'.format(index_name, table, column))
except sqlite3.OperationalError as error:
print(error)
print('Skipping index creation and assuming it exists already')
else:
self.conn.commit() | python | def index_column(self, index_name, table, column):
"""Called by interfaces to index specific column in table"""
cursor = self.get_cursor()
try:
cursor.execute(
'CREATE INDEX {0} on {1}({2})'.format(index_name, table, column))
except sqlite3.OperationalError as error:
print(error)
print('Skipping index creation and assuming it exists already')
else:
self.conn.commit() | [
"def",
"index_column",
"(",
"self",
",",
"index_name",
",",
"table",
",",
"column",
")",
":",
"cursor",
"=",
"self",
".",
"get_cursor",
"(",
")",
"try",
":",
"cursor",
".",
"execute",
"(",
"'CREATE INDEX {0} on {1}({2})'",
".",
"format",
"(",
"index_name",
",",
"table",
",",
"column",
")",
")",
"except",
"sqlite3",
".",
"OperationalError",
"as",
"error",
":",
"print",
"(",
"error",
")",
"print",
"(",
"'Skipping index creation and assuming it exists already'",
")",
"else",
":",
"self",
".",
"conn",
".",
"commit",
"(",
")"
] | Called by interfaces to index specific column in table | [
"Called",
"by",
"interfaces",
"to",
"index",
"specific",
"column",
"in",
"table"
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/lookups/sqlite/base.py#L401-L411 | train |
glormph/msstitch | src/app/lookups/sqlite/base.py | DatabaseConnection.get_sql_select | def get_sql_select(self, columns, table, distinct=False):
"""Creates and returns an SQL SELECT statement"""
sql = 'SELECT {0} {1} FROM {2}'
dist = {True: 'DISTINCT', False: ''}[distinct]
return sql.format(dist, ', '.join(columns), table) | python | def get_sql_select(self, columns, table, distinct=False):
"""Creates and returns an SQL SELECT statement"""
sql = 'SELECT {0} {1} FROM {2}'
dist = {True: 'DISTINCT', False: ''}[distinct]
return sql.format(dist, ', '.join(columns), table) | [
"def",
"get_sql_select",
"(",
"self",
",",
"columns",
",",
"table",
",",
"distinct",
"=",
"False",
")",
":",
"sql",
"=",
"'SELECT {0} {1} FROM {2}'",
"dist",
"=",
"{",
"True",
":",
"'DISTINCT'",
",",
"False",
":",
"''",
"}",
"[",
"distinct",
"]",
"return",
"sql",
".",
"format",
"(",
"dist",
",",
"', '",
".",
"join",
"(",
"columns",
")",
",",
"table",
")"
] | Creates and returns an SQL SELECT statement | [
"Creates",
"and",
"returns",
"an",
"SQL",
"SELECT",
"statement"
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/lookups/sqlite/base.py#L417-L421 | train |
glormph/msstitch | src/app/lookups/sqlite/base.py | DatabaseConnection.store_many | def store_many(self, sql, values):
"""Abstraction over executemany method"""
cursor = self.get_cursor()
cursor.executemany(sql, values)
self.conn.commit() | python | def store_many(self, sql, values):
"""Abstraction over executemany method"""
cursor = self.get_cursor()
cursor.executemany(sql, values)
self.conn.commit() | [
"def",
"store_many",
"(",
"self",
",",
"sql",
",",
"values",
")",
":",
"cursor",
"=",
"self",
".",
"get_cursor",
"(",
")",
"cursor",
".",
"executemany",
"(",
"sql",
",",
"values",
")",
"self",
".",
"conn",
".",
"commit",
"(",
")"
] | Abstraction over executemany method | [
"Abstraction",
"over",
"executemany",
"method"
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/lookups/sqlite/base.py#L423-L427 | train |
glormph/msstitch | src/app/lookups/sqlite/base.py | DatabaseConnection.execute_sql | def execute_sql(self, sql):
"""Executes SQL and returns cursor for it"""
cursor = self.get_cursor()
cursor.execute(sql)
return cursor | python | def execute_sql(self, sql):
"""Executes SQL and returns cursor for it"""
cursor = self.get_cursor()
cursor.execute(sql)
return cursor | [
"def",
"execute_sql",
"(",
"self",
",",
"sql",
")",
":",
"cursor",
"=",
"self",
".",
"get_cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"sql",
")",
"return",
"cursor"
] | Executes SQL and returns cursor for it | [
"Executes",
"SQL",
"and",
"returns",
"cursor",
"for",
"it"
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/lookups/sqlite/base.py#L429-L433 | train |
glormph/msstitch | src/app/lookups/sqlite/base.py | ResultLookupInterface.get_mzmlfile_map | def get_mzmlfile_map(self):
"""Returns dict of mzmlfilenames and their db ids"""
cursor = self.get_cursor()
cursor.execute('SELECT mzmlfile_id, mzmlfilename FROM mzmlfiles')
return {fn: fnid for fnid, fn in cursor.fetchall()} | python | def get_mzmlfile_map(self):
"""Returns dict of mzmlfilenames and their db ids"""
cursor = self.get_cursor()
cursor.execute('SELECT mzmlfile_id, mzmlfilename FROM mzmlfiles')
return {fn: fnid for fnid, fn in cursor.fetchall()} | [
"def",
"get_mzmlfile_map",
"(",
"self",
")",
":",
"cursor",
"=",
"self",
".",
"get_cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"'SELECT mzmlfile_id, mzmlfilename FROM mzmlfiles'",
")",
"return",
"{",
"fn",
":",
"fnid",
"for",
"fnid",
",",
"fn",
"in",
"cursor",
".",
"fetchall",
"(",
")",
"}"
] | Returns dict of mzmlfilenames and their db ids | [
"Returns",
"dict",
"of",
"mzmlfilenames",
"and",
"their",
"db",
"ids"
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/lookups/sqlite/base.py#L440-L444 | train |
glormph/msstitch | src/app/lookups/sqlite/base.py | ResultLookupInterface.get_spectra_id | def get_spectra_id(self, fn_id, retention_time=None, scan_nr=None):
"""Returns spectra id for spectra filename and retention time"""
cursor = self.get_cursor()
sql = 'SELECT spectra_id FROM mzml WHERE mzmlfile_id=? '
values = [fn_id]
if retention_time is not None:
sql = '{0} AND retention_time=?'.format(sql)
values.append(retention_time)
if scan_nr is not None:
sql = '{0} AND scan_nr=?'.format(sql)
values.append(scan_nr)
cursor.execute(sql, tuple(values))
return cursor.fetchone()[0] | python | def get_spectra_id(self, fn_id, retention_time=None, scan_nr=None):
"""Returns spectra id for spectra filename and retention time"""
cursor = self.get_cursor()
sql = 'SELECT spectra_id FROM mzml WHERE mzmlfile_id=? '
values = [fn_id]
if retention_time is not None:
sql = '{0} AND retention_time=?'.format(sql)
values.append(retention_time)
if scan_nr is not None:
sql = '{0} AND scan_nr=?'.format(sql)
values.append(scan_nr)
cursor.execute(sql, tuple(values))
return cursor.fetchone()[0] | [
"def",
"get_spectra_id",
"(",
"self",
",",
"fn_id",
",",
"retention_time",
"=",
"None",
",",
"scan_nr",
"=",
"None",
")",
":",
"cursor",
"=",
"self",
".",
"get_cursor",
"(",
")",
"sql",
"=",
"'SELECT spectra_id FROM mzml WHERE mzmlfile_id=? '",
"values",
"=",
"[",
"fn_id",
"]",
"if",
"retention_time",
"is",
"not",
"None",
":",
"sql",
"=",
"'{0} AND retention_time=?'",
".",
"format",
"(",
"sql",
")",
"values",
".",
"append",
"(",
"retention_time",
")",
"if",
"scan_nr",
"is",
"not",
"None",
":",
"sql",
"=",
"'{0} AND scan_nr=?'",
".",
"format",
"(",
"sql",
")",
"values",
".",
"append",
"(",
"scan_nr",
")",
"cursor",
".",
"execute",
"(",
"sql",
",",
"tuple",
"(",
"values",
")",
")",
"return",
"cursor",
".",
"fetchone",
"(",
")",
"[",
"0",
"]"
] | Returns spectra id for spectra filename and retention time | [
"Returns",
"spectra",
"id",
"for",
"spectra",
"filename",
"and",
"retention",
"time"
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/lookups/sqlite/base.py#L446-L458 | train |
Erotemic/utool | utool/experimental/pandas_highlight.py | to_string_monkey | def to_string_monkey(df, highlight_cols=None, latex=False):
""" monkey patch to pandas to highlight the maximum value in specified
cols of a row
Example:
>>> from utool.experimental.pandas_highlight import *
>>> import pandas as pd
>>> df = pd.DataFrame(
>>> np.array([[ 0.9, 0.86886931, 0.86842073, 0.9 ],
>>> [ 0.34196218, 0.34289191, 0.34206377, 0.34252863],
>>> [ 0.34827074, 0.34827074, 0.34827074, 0.34827074],
>>> [ 0.76979453, 0.77214855, 0.77547518, 0.38850962]]),
>>> columns=['sum(fgweights)', 'sum(weighted_ratio)', 'len(matches)', 'score_lnbnn_1vM'],
>>> index=['match_state(match-v-rest)', 'match_state(nomatch-v-rest)', 'match_state(notcomp-v-rest)', 'photobomb_state']
>>> )
>>> highlight_cols = 'all'
>>> print(to_string_monkey(df, highlight_cols))
>>> print(to_string_monkey(df, highlight_cols, latex=True))
ut.editfile(pd.io.formats.printing.adjoin)
"""
try:
import pandas as pd
import utool as ut
import numpy as np
import six
if isinstance(highlight_cols, six.string_types) and highlight_cols == 'all':
highlight_cols = np.arange(len(df.columns))
# kwds = dict(buf=None, columns=None, col_space=None, header=True,
# index=True, na_rep='NaN', formatters=None,
# float_format=None, sparsify=None, index_names=True,
# justify=None, line_width=None, max_rows=None,
# max_cols=None, show_dimensions=False)
# self = pd.formats.format.DataFrameFormatter(df, **kwds)
try:
self = pd.formats.format.DataFrameFormatter(df)
except AttributeError:
self = pd.io.formats.format.DataFrameFormatter(df)
self.highlight_cols = highlight_cols
def monkey(self):
return monkey_to_str_columns(self, latex=latex)
ut.inject_func_as_method(self, monkey, '_to_str_columns', override=True, force=True)
def strip_ansi(text):
import re
ansi_escape = re.compile(r'\x1b[^m]*m')
return ansi_escape.sub('', text)
def justify_ansi(self, texts, max_len, mode='right'):
if mode == 'left':
return [x.ljust(max_len + (len(x) - len(strip_ansi(x)))) for x in texts]
elif mode == 'center':
return [x.center(max_len + (len(x) - len(strip_ansi(x)))) for x in texts]
else:
return [x.rjust(max_len + (len(x) - len(strip_ansi(x)))) for x in texts]
ut.inject_func_as_method(self.adj, justify_ansi, 'justify', override=True, force=True)
def strlen_ansii(self, text):
return pd.compat.strlen(strip_ansi(text), encoding=self.encoding)
ut.inject_func_as_method(self.adj, strlen_ansii, 'len', override=True, force=True)
if False:
strlen = ut.partial(strlen_ansii, self.adj) # NOQA
justfunc = ut.partial(justify_ansi, self.adj) # NOQA
# Essentially what to_string does
strcols = monkey_to_str_columns(self)
# texts = strcols[2]
space = 1
lists = strcols
str_ = self.adj.adjoin(space, *lists)
print(str_)
print(strip_ansi(str_))
self.to_string()
result = self.buf.getvalue()
# hack because adjoin is not working correctly with injected strlen
result = '\n'.join([x.rstrip() for x in result.split('\n')])
return result
except Exception as ex:
ut.printex('pandas monkey-patch is broken: {}'.format(str(ex)),
tb=True, iswarning=True)
return str(df) | python | def to_string_monkey(df, highlight_cols=None, latex=False):
""" monkey patch to pandas to highlight the maximum value in specified
cols of a row
Example:
>>> from utool.experimental.pandas_highlight import *
>>> import pandas as pd
>>> df = pd.DataFrame(
>>> np.array([[ 0.9, 0.86886931, 0.86842073, 0.9 ],
>>> [ 0.34196218, 0.34289191, 0.34206377, 0.34252863],
>>> [ 0.34827074, 0.34827074, 0.34827074, 0.34827074],
>>> [ 0.76979453, 0.77214855, 0.77547518, 0.38850962]]),
>>> columns=['sum(fgweights)', 'sum(weighted_ratio)', 'len(matches)', 'score_lnbnn_1vM'],
>>> index=['match_state(match-v-rest)', 'match_state(nomatch-v-rest)', 'match_state(notcomp-v-rest)', 'photobomb_state']
>>> )
>>> highlight_cols = 'all'
>>> print(to_string_monkey(df, highlight_cols))
>>> print(to_string_monkey(df, highlight_cols, latex=True))
ut.editfile(pd.io.formats.printing.adjoin)
"""
try:
import pandas as pd
import utool as ut
import numpy as np
import six
if isinstance(highlight_cols, six.string_types) and highlight_cols == 'all':
highlight_cols = np.arange(len(df.columns))
# kwds = dict(buf=None, columns=None, col_space=None, header=True,
# index=True, na_rep='NaN', formatters=None,
# float_format=None, sparsify=None, index_names=True,
# justify=None, line_width=None, max_rows=None,
# max_cols=None, show_dimensions=False)
# self = pd.formats.format.DataFrameFormatter(df, **kwds)
try:
self = pd.formats.format.DataFrameFormatter(df)
except AttributeError:
self = pd.io.formats.format.DataFrameFormatter(df)
self.highlight_cols = highlight_cols
def monkey(self):
return monkey_to_str_columns(self, latex=latex)
ut.inject_func_as_method(self, monkey, '_to_str_columns', override=True, force=True)
def strip_ansi(text):
import re
ansi_escape = re.compile(r'\x1b[^m]*m')
return ansi_escape.sub('', text)
def justify_ansi(self, texts, max_len, mode='right'):
if mode == 'left':
return [x.ljust(max_len + (len(x) - len(strip_ansi(x)))) for x in texts]
elif mode == 'center':
return [x.center(max_len + (len(x) - len(strip_ansi(x)))) for x in texts]
else:
return [x.rjust(max_len + (len(x) - len(strip_ansi(x)))) for x in texts]
ut.inject_func_as_method(self.adj, justify_ansi, 'justify', override=True, force=True)
def strlen_ansii(self, text):
return pd.compat.strlen(strip_ansi(text), encoding=self.encoding)
ut.inject_func_as_method(self.adj, strlen_ansii, 'len', override=True, force=True)
if False:
strlen = ut.partial(strlen_ansii, self.adj) # NOQA
justfunc = ut.partial(justify_ansi, self.adj) # NOQA
# Essentially what to_string does
strcols = monkey_to_str_columns(self)
# texts = strcols[2]
space = 1
lists = strcols
str_ = self.adj.adjoin(space, *lists)
print(str_)
print(strip_ansi(str_))
self.to_string()
result = self.buf.getvalue()
# hack because adjoin is not working correctly with injected strlen
result = '\n'.join([x.rstrip() for x in result.split('\n')])
return result
except Exception as ex:
ut.printex('pandas monkey-patch is broken: {}'.format(str(ex)),
tb=True, iswarning=True)
return str(df) | [
"def",
"to_string_monkey",
"(",
"df",
",",
"highlight_cols",
"=",
"None",
",",
"latex",
"=",
"False",
")",
":",
"try",
":",
"import",
"pandas",
"as",
"pd",
"import",
"utool",
"as",
"ut",
"import",
"numpy",
"as",
"np",
"import",
"six",
"if",
"isinstance",
"(",
"highlight_cols",
",",
"six",
".",
"string_types",
")",
"and",
"highlight_cols",
"==",
"'all'",
":",
"highlight_cols",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"df",
".",
"columns",
")",
")",
"# kwds = dict(buf=None, columns=None, col_space=None, header=True,",
"# index=True, na_rep='NaN', formatters=None,",
"# float_format=None, sparsify=None, index_names=True,",
"# justify=None, line_width=None, max_rows=None,",
"# max_cols=None, show_dimensions=False)",
"# self = pd.formats.format.DataFrameFormatter(df, **kwds)",
"try",
":",
"self",
"=",
"pd",
".",
"formats",
".",
"format",
".",
"DataFrameFormatter",
"(",
"df",
")",
"except",
"AttributeError",
":",
"self",
"=",
"pd",
".",
"io",
".",
"formats",
".",
"format",
".",
"DataFrameFormatter",
"(",
"df",
")",
"self",
".",
"highlight_cols",
"=",
"highlight_cols",
"def",
"monkey",
"(",
"self",
")",
":",
"return",
"monkey_to_str_columns",
"(",
"self",
",",
"latex",
"=",
"latex",
")",
"ut",
".",
"inject_func_as_method",
"(",
"self",
",",
"monkey",
",",
"'_to_str_columns'",
",",
"override",
"=",
"True",
",",
"force",
"=",
"True",
")",
"def",
"strip_ansi",
"(",
"text",
")",
":",
"import",
"re",
"ansi_escape",
"=",
"re",
".",
"compile",
"(",
"r'\\x1b[^m]*m'",
")",
"return",
"ansi_escape",
".",
"sub",
"(",
"''",
",",
"text",
")",
"def",
"justify_ansi",
"(",
"self",
",",
"texts",
",",
"max_len",
",",
"mode",
"=",
"'right'",
")",
":",
"if",
"mode",
"==",
"'left'",
":",
"return",
"[",
"x",
".",
"ljust",
"(",
"max_len",
"+",
"(",
"len",
"(",
"x",
")",
"-",
"len",
"(",
"strip_ansi",
"(",
"x",
")",
")",
")",
")",
"for",
"x",
"in",
"texts",
"]",
"elif",
"mode",
"==",
"'center'",
":",
"return",
"[",
"x",
".",
"center",
"(",
"max_len",
"+",
"(",
"len",
"(",
"x",
")",
"-",
"len",
"(",
"strip_ansi",
"(",
"x",
")",
")",
")",
")",
"for",
"x",
"in",
"texts",
"]",
"else",
":",
"return",
"[",
"x",
".",
"rjust",
"(",
"max_len",
"+",
"(",
"len",
"(",
"x",
")",
"-",
"len",
"(",
"strip_ansi",
"(",
"x",
")",
")",
")",
")",
"for",
"x",
"in",
"texts",
"]",
"ut",
".",
"inject_func_as_method",
"(",
"self",
".",
"adj",
",",
"justify_ansi",
",",
"'justify'",
",",
"override",
"=",
"True",
",",
"force",
"=",
"True",
")",
"def",
"strlen_ansii",
"(",
"self",
",",
"text",
")",
":",
"return",
"pd",
".",
"compat",
".",
"strlen",
"(",
"strip_ansi",
"(",
"text",
")",
",",
"encoding",
"=",
"self",
".",
"encoding",
")",
"ut",
".",
"inject_func_as_method",
"(",
"self",
".",
"adj",
",",
"strlen_ansii",
",",
"'len'",
",",
"override",
"=",
"True",
",",
"force",
"=",
"True",
")",
"if",
"False",
":",
"strlen",
"=",
"ut",
".",
"partial",
"(",
"strlen_ansii",
",",
"self",
".",
"adj",
")",
"# NOQA",
"justfunc",
"=",
"ut",
".",
"partial",
"(",
"justify_ansi",
",",
"self",
".",
"adj",
")",
"# NOQA",
"# Essentially what to_string does",
"strcols",
"=",
"monkey_to_str_columns",
"(",
"self",
")",
"# texts = strcols[2]",
"space",
"=",
"1",
"lists",
"=",
"strcols",
"str_",
"=",
"self",
".",
"adj",
".",
"adjoin",
"(",
"space",
",",
"*",
"lists",
")",
"print",
"(",
"str_",
")",
"print",
"(",
"strip_ansi",
"(",
"str_",
")",
")",
"self",
".",
"to_string",
"(",
")",
"result",
"=",
"self",
".",
"buf",
".",
"getvalue",
"(",
")",
"# hack because adjoin is not working correctly with injected strlen",
"result",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"x",
".",
"rstrip",
"(",
")",
"for",
"x",
"in",
"result",
".",
"split",
"(",
"'\\n'",
")",
"]",
")",
"return",
"result",
"except",
"Exception",
"as",
"ex",
":",
"ut",
".",
"printex",
"(",
"'pandas monkey-patch is broken: {}'",
".",
"format",
"(",
"str",
"(",
"ex",
")",
")",
",",
"tb",
"=",
"True",
",",
"iswarning",
"=",
"True",
")",
"return",
"str",
"(",
"df",
")"
] | monkey patch to pandas to highlight the maximum value in specified
cols of a row
Example:
>>> from utool.experimental.pandas_highlight import *
>>> import pandas as pd
>>> df = pd.DataFrame(
>>> np.array([[ 0.9, 0.86886931, 0.86842073, 0.9 ],
>>> [ 0.34196218, 0.34289191, 0.34206377, 0.34252863],
>>> [ 0.34827074, 0.34827074, 0.34827074, 0.34827074],
>>> [ 0.76979453, 0.77214855, 0.77547518, 0.38850962]]),
>>> columns=['sum(fgweights)', 'sum(weighted_ratio)', 'len(matches)', 'score_lnbnn_1vM'],
>>> index=['match_state(match-v-rest)', 'match_state(nomatch-v-rest)', 'match_state(notcomp-v-rest)', 'photobomb_state']
>>> )
>>> highlight_cols = 'all'
>>> print(to_string_monkey(df, highlight_cols))
>>> print(to_string_monkey(df, highlight_cols, latex=True))
ut.editfile(pd.io.formats.printing.adjoin) | [
"monkey",
"patch",
"to",
"pandas",
"to",
"highlight",
"the",
"maximum",
"value",
"in",
"specified",
"cols",
"of",
"a",
"row"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/experimental/pandas_highlight.py#L131-L214 | train |
neithere/monk | monk/validators.py | translate | def translate(value):
"""
Translates given schema from "pythonic" syntax to a validator.
Usage::
>>> translate(str)
IsA(str)
>>> translate('hello')
IsA(str, default='hello')
"""
if isinstance(value, BaseValidator):
return value
if value is None:
return Anything()
if isinstance(value, type):
return IsA(value)
if type(value) in compat.func_types:
real_value = value()
return IsA(type(real_value), default=real_value)
if isinstance(value, list):
if value == []:
# no inner spec, just an empty list as the default value
return IsA(list)
elif len(value) == 1:
# the only item as spec for each item of the collection
return ListOf(translate(value[0]))
else:
raise StructureSpecificationError(
'Expected a list containing exactly 1 item; '
'got {cnt}: {spec}'.format(cnt=len(value), spec=value))
if isinstance(value, dict):
if not value:
return IsA(dict)
items = []
for k, v in value.items():
if isinstance(k, BaseValidator):
k_validator = k
else:
k_validator = translate(k)
default = k_validator.get_default_for(None)
if default is not None:
k_validator = Equals(default)
v_validator = translate(v)
items.append((k_validator, v_validator))
return DictOf(items)
return IsA(type(value), default=value) | python | def translate(value):
"""
Translates given schema from "pythonic" syntax to a validator.
Usage::
>>> translate(str)
IsA(str)
>>> translate('hello')
IsA(str, default='hello')
"""
if isinstance(value, BaseValidator):
return value
if value is None:
return Anything()
if isinstance(value, type):
return IsA(value)
if type(value) in compat.func_types:
real_value = value()
return IsA(type(real_value), default=real_value)
if isinstance(value, list):
if value == []:
# no inner spec, just an empty list as the default value
return IsA(list)
elif len(value) == 1:
# the only item as spec for each item of the collection
return ListOf(translate(value[0]))
else:
raise StructureSpecificationError(
'Expected a list containing exactly 1 item; '
'got {cnt}: {spec}'.format(cnt=len(value), spec=value))
if isinstance(value, dict):
if not value:
return IsA(dict)
items = []
for k, v in value.items():
if isinstance(k, BaseValidator):
k_validator = k
else:
k_validator = translate(k)
default = k_validator.get_default_for(None)
if default is not None:
k_validator = Equals(default)
v_validator = translate(v)
items.append((k_validator, v_validator))
return DictOf(items)
return IsA(type(value), default=value) | [
"def",
"translate",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"BaseValidator",
")",
":",
"return",
"value",
"if",
"value",
"is",
"None",
":",
"return",
"Anything",
"(",
")",
"if",
"isinstance",
"(",
"value",
",",
"type",
")",
":",
"return",
"IsA",
"(",
"value",
")",
"if",
"type",
"(",
"value",
")",
"in",
"compat",
".",
"func_types",
":",
"real_value",
"=",
"value",
"(",
")",
"return",
"IsA",
"(",
"type",
"(",
"real_value",
")",
",",
"default",
"=",
"real_value",
")",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"if",
"value",
"==",
"[",
"]",
":",
"# no inner spec, just an empty list as the default value",
"return",
"IsA",
"(",
"list",
")",
"elif",
"len",
"(",
"value",
")",
"==",
"1",
":",
"# the only item as spec for each item of the collection",
"return",
"ListOf",
"(",
"translate",
"(",
"value",
"[",
"0",
"]",
")",
")",
"else",
":",
"raise",
"StructureSpecificationError",
"(",
"'Expected a list containing exactly 1 item; '",
"'got {cnt}: {spec}'",
".",
"format",
"(",
"cnt",
"=",
"len",
"(",
"value",
")",
",",
"spec",
"=",
"value",
")",
")",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"if",
"not",
"value",
":",
"return",
"IsA",
"(",
"dict",
")",
"items",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"value",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"k",
",",
"BaseValidator",
")",
":",
"k_validator",
"=",
"k",
"else",
":",
"k_validator",
"=",
"translate",
"(",
"k",
")",
"default",
"=",
"k_validator",
".",
"get_default_for",
"(",
"None",
")",
"if",
"default",
"is",
"not",
"None",
":",
"k_validator",
"=",
"Equals",
"(",
"default",
")",
"v_validator",
"=",
"translate",
"(",
"v",
")",
"items",
".",
"append",
"(",
"(",
"k_validator",
",",
"v_validator",
")",
")",
"return",
"DictOf",
"(",
"items",
")",
"return",
"IsA",
"(",
"type",
"(",
"value",
")",
",",
"default",
"=",
"value",
")"
] | Translates given schema from "pythonic" syntax to a validator.
Usage::
>>> translate(str)
IsA(str)
>>> translate('hello')
IsA(str, default='hello') | [
"Translates",
"given",
"schema",
"from",
"pythonic",
"syntax",
"to",
"a",
"validator",
"."
] | 4b2ee5152b081ac288ce8568422a027b5e7d2b1c | https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/validators.py#L699-L753 | train |
neithere/monk | monk/validators.py | DictOf._merge | def _merge(self, value):
"""
Returns a dictionary based on `value` with each value recursively
merged with `spec`.
"""
if value is not None and not isinstance(value, dict):
# bogus value; will not pass validation but should be preserved
return value
if not self._pairs:
return {}
collected = {}
# collected.update(value)
for k_validator, v_validator in self._pairs:
k_default = k_validator.get_default_for(None)
if k_default is None:
continue
# even None is ok
if value:
v_for_this_k = value.get(k_default)
else:
v_for_this_k = None
v_default = v_validator.get_default_for(v_for_this_k)
collected.update({k_default: v_default})
if value:
for k, v in value.items():
if k not in collected:
collected[k] = v
return collected | python | def _merge(self, value):
"""
Returns a dictionary based on `value` with each value recursively
merged with `spec`.
"""
if value is not None and not isinstance(value, dict):
# bogus value; will not pass validation but should be preserved
return value
if not self._pairs:
return {}
collected = {}
# collected.update(value)
for k_validator, v_validator in self._pairs:
k_default = k_validator.get_default_for(None)
if k_default is None:
continue
# even None is ok
if value:
v_for_this_k = value.get(k_default)
else:
v_for_this_k = None
v_default = v_validator.get_default_for(v_for_this_k)
collected.update({k_default: v_default})
if value:
for k, v in value.items():
if k not in collected:
collected[k] = v
return collected | [
"def",
"_merge",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"# bogus value; will not pass validation but should be preserved",
"return",
"value",
"if",
"not",
"self",
".",
"_pairs",
":",
"return",
"{",
"}",
"collected",
"=",
"{",
"}",
"# collected.update(value)",
"for",
"k_validator",
",",
"v_validator",
"in",
"self",
".",
"_pairs",
":",
"k_default",
"=",
"k_validator",
".",
"get_default_for",
"(",
"None",
")",
"if",
"k_default",
"is",
"None",
":",
"continue",
"# even None is ok",
"if",
"value",
":",
"v_for_this_k",
"=",
"value",
".",
"get",
"(",
"k_default",
")",
"else",
":",
"v_for_this_k",
"=",
"None",
"v_default",
"=",
"v_validator",
".",
"get_default_for",
"(",
"v_for_this_k",
")",
"collected",
".",
"update",
"(",
"{",
"k_default",
":",
"v_default",
"}",
")",
"if",
"value",
":",
"for",
"k",
",",
"v",
"in",
"value",
".",
"items",
"(",
")",
":",
"if",
"k",
"not",
"in",
"collected",
":",
"collected",
"[",
"k",
"]",
"=",
"v",
"return",
"collected"
] | Returns a dictionary based on `value` with each value recursively
merged with `spec`. | [
"Returns",
"a",
"dictionary",
"based",
"on",
"value",
"with",
"each",
"value",
"recursively",
"merged",
"with",
"spec",
"."
] | 4b2ee5152b081ac288ce8568422a027b5e7d2b1c | https://github.com/neithere/monk/blob/4b2ee5152b081ac288ce8568422a027b5e7d2b1c/monk/validators.py#L589-L622 | train |
Erotemic/utool | utool/_internal/win32_send_keys.py | handle_code | def handle_code(code):
"Handle a key or sequence of keys in braces"
code_keys = []
# it is a known code (e.g. {DOWN}, {ENTER}, etc)
if code in CODES:
code_keys.append(VirtualKeyAction(CODES[code]))
# it is an escaped modifier e.g. {%}, {^}, {+}
elif len(code) == 1:
code_keys.append(KeyAction(code))
# it is a repetition or a pause {DOWN 5}, {PAUSE 1.3}
elif ' ' in code:
to_repeat, count = code.rsplit(None, 1)
if to_repeat == "PAUSE":
try:
pause_time = float(count)
except ValueError:
raise KeySequenceError('invalid pause time %s'% count)
code_keys.append(PauseAction(pause_time))
else:
try:
count = int(count)
except ValueError:
raise KeySequenceError(
'invalid repetition count %s'% count)
# If the value in to_repeat is a VK e.g. DOWN
# we need to add the code repeated
if to_repeat in CODES:
code_keys.extend(
[VirtualKeyAction(CODES[to_repeat])] * count)
# otherwise parse the keys and we get back a KeyAction
else:
to_repeat = parse_keys(to_repeat)
if isinstance(to_repeat, list):
keys = to_repeat * count
else:
keys = [to_repeat] * count
code_keys.extend(keys)
else:
raise RuntimeError("Unknown code: %s"% code)
return code_keys | python | def handle_code(code):
"Handle a key or sequence of keys in braces"
code_keys = []
# it is a known code (e.g. {DOWN}, {ENTER}, etc)
if code in CODES:
code_keys.append(VirtualKeyAction(CODES[code]))
# it is an escaped modifier e.g. {%}, {^}, {+}
elif len(code) == 1:
code_keys.append(KeyAction(code))
# it is a repetition or a pause {DOWN 5}, {PAUSE 1.3}
elif ' ' in code:
to_repeat, count = code.rsplit(None, 1)
if to_repeat == "PAUSE":
try:
pause_time = float(count)
except ValueError:
raise KeySequenceError('invalid pause time %s'% count)
code_keys.append(PauseAction(pause_time))
else:
try:
count = int(count)
except ValueError:
raise KeySequenceError(
'invalid repetition count %s'% count)
# If the value in to_repeat is a VK e.g. DOWN
# we need to add the code repeated
if to_repeat in CODES:
code_keys.extend(
[VirtualKeyAction(CODES[to_repeat])] * count)
# otherwise parse the keys and we get back a KeyAction
else:
to_repeat = parse_keys(to_repeat)
if isinstance(to_repeat, list):
keys = to_repeat * count
else:
keys = [to_repeat] * count
code_keys.extend(keys)
else:
raise RuntimeError("Unknown code: %s"% code)
return code_keys | [
"def",
"handle_code",
"(",
"code",
")",
":",
"code_keys",
"=",
"[",
"]",
"# it is a known code (e.g. {DOWN}, {ENTER}, etc)",
"if",
"code",
"in",
"CODES",
":",
"code_keys",
".",
"append",
"(",
"VirtualKeyAction",
"(",
"CODES",
"[",
"code",
"]",
")",
")",
"# it is an escaped modifier e.g. {%}, {^}, {+}",
"elif",
"len",
"(",
"code",
")",
"==",
"1",
":",
"code_keys",
".",
"append",
"(",
"KeyAction",
"(",
"code",
")",
")",
"# it is a repetition or a pause {DOWN 5}, {PAUSE 1.3}",
"elif",
"' '",
"in",
"code",
":",
"to_repeat",
",",
"count",
"=",
"code",
".",
"rsplit",
"(",
"None",
",",
"1",
")",
"if",
"to_repeat",
"==",
"\"PAUSE\"",
":",
"try",
":",
"pause_time",
"=",
"float",
"(",
"count",
")",
"except",
"ValueError",
":",
"raise",
"KeySequenceError",
"(",
"'invalid pause time %s'",
"%",
"count",
")",
"code_keys",
".",
"append",
"(",
"PauseAction",
"(",
"pause_time",
")",
")",
"else",
":",
"try",
":",
"count",
"=",
"int",
"(",
"count",
")",
"except",
"ValueError",
":",
"raise",
"KeySequenceError",
"(",
"'invalid repetition count %s'",
"%",
"count",
")",
"# If the value in to_repeat is a VK e.g. DOWN",
"# we need to add the code repeated",
"if",
"to_repeat",
"in",
"CODES",
":",
"code_keys",
".",
"extend",
"(",
"[",
"VirtualKeyAction",
"(",
"CODES",
"[",
"to_repeat",
"]",
")",
"]",
"*",
"count",
")",
"# otherwise parse the keys and we get back a KeyAction",
"else",
":",
"to_repeat",
"=",
"parse_keys",
"(",
"to_repeat",
")",
"if",
"isinstance",
"(",
"to_repeat",
",",
"list",
")",
":",
"keys",
"=",
"to_repeat",
"*",
"count",
"else",
":",
"keys",
"=",
"[",
"to_repeat",
"]",
"*",
"count",
"code_keys",
".",
"extend",
"(",
"keys",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Unknown code: %s\"",
"%",
"code",
")",
"return",
"code_keys"
] | Handle a key or sequence of keys in braces | [
"Handle",
"a",
"key",
"or",
"sequence",
"of",
"keys",
"in",
"braces"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/_internal/win32_send_keys.py#L478-L523 | train |
Erotemic/utool | utool/_internal/win32_send_keys.py | parse_keys | def parse_keys(string,
with_spaces = False,
with_tabs = False,
with_newlines = False,
modifiers = None):
"Return the parsed keys"
keys = []
if not modifiers:
modifiers = []
index = 0
while index < len(string):
c = string[index]
index += 1
# check if one of CTRL, SHIFT, ALT has been pressed
if c in MODIFIERS.keys():
modifier = MODIFIERS[c]
# remember that we are currently modified
modifiers.append(modifier)
# hold down the modifier key
keys.append(VirtualKeyAction(modifier, up = False))
if DEBUG:
print("MODS+", modifiers)
continue
# Apply modifiers over a bunch of characters (not just one!)
elif c == "(":
# find the end of the bracketed text
end_pos = string.find(")", index)
if end_pos == -1:
raise KeySequenceError('`)` not found')
keys.extend(
parse_keys(string[index:end_pos], modifiers = modifiers))
index = end_pos + 1
# Escape or named key
elif c == "{":
# We start searching from index + 1 to account for the case {}}
end_pos = string.find("}", index + 1)
if end_pos == -1:
raise KeySequenceError('`}` not found')
code = string[index:end_pos]
index = end_pos + 1
keys.extend(handle_code(code))
# unmatched ")"
elif c == ')':
raise KeySequenceError('`)` should be preceeded by `(`')
# unmatched "}"
elif c == '}':
raise KeySequenceError('`}` should be preceeded by `{`')
# so it is a normal character
else:
# don't output white space unless flags to output have been set
if (c == ' ' and not with_spaces or
c == '\t' and not with_tabs or
c == '\n' and not with_newlines):
continue
# output nuewline
if c in ('~', '\n'):
keys.append(VirtualKeyAction(CODES["ENTER"]))
# safest are the virtual keys - so if our key is a virtual key
# use a VirtualKeyAction
#if ord(c) in CODE_NAMES:
# keys.append(VirtualKeyAction(ord(c)))
elif modifiers:
keys.append(EscapedKeyAction(c))
else:
keys.append(KeyAction(c))
# as we have handled the text - release the modifiers
while modifiers:
if DEBUG:
print("MODS-", modifiers)
keys.append(VirtualKeyAction(modifiers.pop(), down = False))
# just in case there were any modifiers left pressed - release them
while modifiers:
keys.append(VirtualKeyAction(modifiers.pop(), down = False))
return keys | python | def parse_keys(string,
with_spaces = False,
with_tabs = False,
with_newlines = False,
modifiers = None):
"Return the parsed keys"
keys = []
if not modifiers:
modifiers = []
index = 0
while index < len(string):
c = string[index]
index += 1
# check if one of CTRL, SHIFT, ALT has been pressed
if c in MODIFIERS.keys():
modifier = MODIFIERS[c]
# remember that we are currently modified
modifiers.append(modifier)
# hold down the modifier key
keys.append(VirtualKeyAction(modifier, up = False))
if DEBUG:
print("MODS+", modifiers)
continue
# Apply modifiers over a bunch of characters (not just one!)
elif c == "(":
# find the end of the bracketed text
end_pos = string.find(")", index)
if end_pos == -1:
raise KeySequenceError('`)` not found')
keys.extend(
parse_keys(string[index:end_pos], modifiers = modifiers))
index = end_pos + 1
# Escape or named key
elif c == "{":
# We start searching from index + 1 to account for the case {}}
end_pos = string.find("}", index + 1)
if end_pos == -1:
raise KeySequenceError('`}` not found')
code = string[index:end_pos]
index = end_pos + 1
keys.extend(handle_code(code))
# unmatched ")"
elif c == ')':
raise KeySequenceError('`)` should be preceeded by `(`')
# unmatched "}"
elif c == '}':
raise KeySequenceError('`}` should be preceeded by `{`')
# so it is a normal character
else:
# don't output white space unless flags to output have been set
if (c == ' ' and not with_spaces or
c == '\t' and not with_tabs or
c == '\n' and not with_newlines):
continue
# output nuewline
if c in ('~', '\n'):
keys.append(VirtualKeyAction(CODES["ENTER"]))
# safest are the virtual keys - so if our key is a virtual key
# use a VirtualKeyAction
#if ord(c) in CODE_NAMES:
# keys.append(VirtualKeyAction(ord(c)))
elif modifiers:
keys.append(EscapedKeyAction(c))
else:
keys.append(KeyAction(c))
# as we have handled the text - release the modifiers
while modifiers:
if DEBUG:
print("MODS-", modifiers)
keys.append(VirtualKeyAction(modifiers.pop(), down = False))
# just in case there were any modifiers left pressed - release them
while modifiers:
keys.append(VirtualKeyAction(modifiers.pop(), down = False))
return keys | [
"def",
"parse_keys",
"(",
"string",
",",
"with_spaces",
"=",
"False",
",",
"with_tabs",
"=",
"False",
",",
"with_newlines",
"=",
"False",
",",
"modifiers",
"=",
"None",
")",
":",
"keys",
"=",
"[",
"]",
"if",
"not",
"modifiers",
":",
"modifiers",
"=",
"[",
"]",
"index",
"=",
"0",
"while",
"index",
"<",
"len",
"(",
"string",
")",
":",
"c",
"=",
"string",
"[",
"index",
"]",
"index",
"+=",
"1",
"# check if one of CTRL, SHIFT, ALT has been pressed",
"if",
"c",
"in",
"MODIFIERS",
".",
"keys",
"(",
")",
":",
"modifier",
"=",
"MODIFIERS",
"[",
"c",
"]",
"# remember that we are currently modified",
"modifiers",
".",
"append",
"(",
"modifier",
")",
"# hold down the modifier key",
"keys",
".",
"append",
"(",
"VirtualKeyAction",
"(",
"modifier",
",",
"up",
"=",
"False",
")",
")",
"if",
"DEBUG",
":",
"print",
"(",
"\"MODS+\"",
",",
"modifiers",
")",
"continue",
"# Apply modifiers over a bunch of characters (not just one!)",
"elif",
"c",
"==",
"\"(\"",
":",
"# find the end of the bracketed text",
"end_pos",
"=",
"string",
".",
"find",
"(",
"\")\"",
",",
"index",
")",
"if",
"end_pos",
"==",
"-",
"1",
":",
"raise",
"KeySequenceError",
"(",
"'`)` not found'",
")",
"keys",
".",
"extend",
"(",
"parse_keys",
"(",
"string",
"[",
"index",
":",
"end_pos",
"]",
",",
"modifiers",
"=",
"modifiers",
")",
")",
"index",
"=",
"end_pos",
"+",
"1",
"# Escape or named key",
"elif",
"c",
"==",
"\"{\"",
":",
"# We start searching from index + 1 to account for the case {}}",
"end_pos",
"=",
"string",
".",
"find",
"(",
"\"}\"",
",",
"index",
"+",
"1",
")",
"if",
"end_pos",
"==",
"-",
"1",
":",
"raise",
"KeySequenceError",
"(",
"'`}` not found'",
")",
"code",
"=",
"string",
"[",
"index",
":",
"end_pos",
"]",
"index",
"=",
"end_pos",
"+",
"1",
"keys",
".",
"extend",
"(",
"handle_code",
"(",
"code",
")",
")",
"# unmatched \")\"",
"elif",
"c",
"==",
"')'",
":",
"raise",
"KeySequenceError",
"(",
"'`)` should be preceeded by `(`'",
")",
"# unmatched \"}\"",
"elif",
"c",
"==",
"'}'",
":",
"raise",
"KeySequenceError",
"(",
"'`}` should be preceeded by `{`'",
")",
"# so it is a normal character",
"else",
":",
"# don't output white space unless flags to output have been set",
"if",
"(",
"c",
"==",
"' '",
"and",
"not",
"with_spaces",
"or",
"c",
"==",
"'\\t'",
"and",
"not",
"with_tabs",
"or",
"c",
"==",
"'\\n'",
"and",
"not",
"with_newlines",
")",
":",
"continue",
"# output nuewline",
"if",
"c",
"in",
"(",
"'~'",
",",
"'\\n'",
")",
":",
"keys",
".",
"append",
"(",
"VirtualKeyAction",
"(",
"CODES",
"[",
"\"ENTER\"",
"]",
")",
")",
"# safest are the virtual keys - so if our key is a virtual key",
"# use a VirtualKeyAction",
"#if ord(c) in CODE_NAMES:",
"# keys.append(VirtualKeyAction(ord(c)))",
"elif",
"modifiers",
":",
"keys",
".",
"append",
"(",
"EscapedKeyAction",
"(",
"c",
")",
")",
"else",
":",
"keys",
".",
"append",
"(",
"KeyAction",
"(",
"c",
")",
")",
"# as we have handled the text - release the modifiers",
"while",
"modifiers",
":",
"if",
"DEBUG",
":",
"print",
"(",
"\"MODS-\"",
",",
"modifiers",
")",
"keys",
".",
"append",
"(",
"VirtualKeyAction",
"(",
"modifiers",
".",
"pop",
"(",
")",
",",
"down",
"=",
"False",
")",
")",
"# just in case there were any modifiers left pressed - release them",
"while",
"modifiers",
":",
"keys",
".",
"append",
"(",
"VirtualKeyAction",
"(",
"modifiers",
".",
"pop",
"(",
")",
",",
"down",
"=",
"False",
")",
")",
"return",
"keys"
] | Return the parsed keys | [
"Return",
"the",
"parsed",
"keys"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/_internal/win32_send_keys.py#L526-L614 | train |
Erotemic/utool | utool/_internal/win32_send_keys.py | SendKeys | def SendKeys(keys,
pause=0.05,
with_spaces=False,
with_tabs=False,
with_newlines=False,
turn_off_numlock=True):
"Parse the keys and type them"
keys = parse_keys(keys, with_spaces, with_tabs, with_newlines)
for k in keys:
k.Run()
time.sleep(pause) | python | def SendKeys(keys,
pause=0.05,
with_spaces=False,
with_tabs=False,
with_newlines=False,
turn_off_numlock=True):
"Parse the keys and type them"
keys = parse_keys(keys, with_spaces, with_tabs, with_newlines)
for k in keys:
k.Run()
time.sleep(pause) | [
"def",
"SendKeys",
"(",
"keys",
",",
"pause",
"=",
"0.05",
",",
"with_spaces",
"=",
"False",
",",
"with_tabs",
"=",
"False",
",",
"with_newlines",
"=",
"False",
",",
"turn_off_numlock",
"=",
"True",
")",
":",
"keys",
"=",
"parse_keys",
"(",
"keys",
",",
"with_spaces",
",",
"with_tabs",
",",
"with_newlines",
")",
"for",
"k",
"in",
"keys",
":",
"k",
".",
"Run",
"(",
")",
"time",
".",
"sleep",
"(",
"pause",
")"
] | Parse the keys and type them | [
"Parse",
"the",
"keys",
"and",
"type",
"them"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/_internal/win32_send_keys.py#L624-L635 | train |
Erotemic/utool | utool/_internal/win32_send_keys.py | main | def main():
"Send some test strings"
actions = """
{LWIN}
{PAUSE .25}
r
{PAUSE .25}
Notepad.exe{ENTER}
{PAUSE 1}
Hello{SPACE}World!
{PAUSE 1}
%{F4}
{PAUSE .25}
n
"""
SendKeys(actions, pause = .1)
keys = parse_keys(actions)
for k in keys:
print(k)
k.Run()
time.sleep(.1)
test_strings = [
"\n"
"(aa)some text\n",
"(a)some{ }text\n",
"(b)some{{}text\n",
"(c)some{+}text\n",
"(d)so%me{ab 4}text",
"(e)so%me{LEFT 4}text",
"(f)so%me{ENTER 4}text",
"(g)so%me{^aa 4}text",
"(h)some +(asdf)text",
"(i)some %^+(asdf)text",
"(j)some %^+a text+",
"(k)some %^+a tex+{&}",
"(l)some %^+a tex+(dsf)",
"",
]
for s in test_strings:
print(repr(s))
keys = parse_keys(s, with_newlines = True)
print(keys)
for k in keys:
k.Run()
time.sleep(.1)
print() | python | def main():
"Send some test strings"
actions = """
{LWIN}
{PAUSE .25}
r
{PAUSE .25}
Notepad.exe{ENTER}
{PAUSE 1}
Hello{SPACE}World!
{PAUSE 1}
%{F4}
{PAUSE .25}
n
"""
SendKeys(actions, pause = .1)
keys = parse_keys(actions)
for k in keys:
print(k)
k.Run()
time.sleep(.1)
test_strings = [
"\n"
"(aa)some text\n",
"(a)some{ }text\n",
"(b)some{{}text\n",
"(c)some{+}text\n",
"(d)so%me{ab 4}text",
"(e)so%me{LEFT 4}text",
"(f)so%me{ENTER 4}text",
"(g)so%me{^aa 4}text",
"(h)some +(asdf)text",
"(i)some %^+(asdf)text",
"(j)some %^+a text+",
"(k)some %^+a tex+{&}",
"(l)some %^+a tex+(dsf)",
"",
]
for s in test_strings:
print(repr(s))
keys = parse_keys(s, with_newlines = True)
print(keys)
for k in keys:
k.Run()
time.sleep(.1)
print() | [
"def",
"main",
"(",
")",
":",
"actions",
"=",
"\"\"\"\n {LWIN}\n {PAUSE .25}\n r\n {PAUSE .25}\n Notepad.exe{ENTER}\n {PAUSE 1}\n Hello{SPACE}World!\n {PAUSE 1}\n %{F4}\n {PAUSE .25}\n n\n \"\"\"",
"SendKeys",
"(",
"actions",
",",
"pause",
"=",
".1",
")",
"keys",
"=",
"parse_keys",
"(",
"actions",
")",
"for",
"k",
"in",
"keys",
":",
"print",
"(",
"k",
")",
"k",
".",
"Run",
"(",
")",
"time",
".",
"sleep",
"(",
".1",
")",
"test_strings",
"=",
"[",
"\"\\n\"",
"\"(aa)some text\\n\"",
",",
"\"(a)some{ }text\\n\"",
",",
"\"(b)some{{}text\\n\"",
",",
"\"(c)some{+}text\\n\"",
",",
"\"(d)so%me{ab 4}text\"",
",",
"\"(e)so%me{LEFT 4}text\"",
",",
"\"(f)so%me{ENTER 4}text\"",
",",
"\"(g)so%me{^aa 4}text\"",
",",
"\"(h)some +(asdf)text\"",
",",
"\"(i)some %^+(asdf)text\"",
",",
"\"(j)some %^+a text+\"",
",",
"\"(k)some %^+a tex+{&}\"",
",",
"\"(l)some %^+a tex+(dsf)\"",
",",
"\"\"",
",",
"]",
"for",
"s",
"in",
"test_strings",
":",
"print",
"(",
"repr",
"(",
"s",
")",
")",
"keys",
"=",
"parse_keys",
"(",
"s",
",",
"with_newlines",
"=",
"True",
")",
"print",
"(",
"keys",
")",
"for",
"k",
"in",
"keys",
":",
"k",
".",
"Run",
"(",
")",
"time",
".",
"sleep",
"(",
".1",
")",
"print",
"(",
")"
] | Send some test strings | [
"Send",
"some",
"test",
"strings"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/_internal/win32_send_keys.py#L638-L688 | train |
Erotemic/utool | utool/_internal/win32_send_keys.py | KeyAction.GetInput | def GetInput(self):
"Build the INPUT structure for the action"
actions = 1
# if both up and down
if self.up and self.down:
actions = 2
inputs = (INPUT * actions)()
vk, scan, flags = self._get_key_info()
for inp in inputs:
inp.type = INPUT_KEYBOARD
inp._.ki.wVk = vk
inp._.ki.wScan = scan
inp._.ki.dwFlags |= flags
# if we are releasing - then let it up
if self.up:
inputs[-1]._.ki.dwFlags |= KEYEVENTF_KEYUP
return inputs | python | def GetInput(self):
"Build the INPUT structure for the action"
actions = 1
# if both up and down
if self.up and self.down:
actions = 2
inputs = (INPUT * actions)()
vk, scan, flags = self._get_key_info()
for inp in inputs:
inp.type = INPUT_KEYBOARD
inp._.ki.wVk = vk
inp._.ki.wScan = scan
inp._.ki.dwFlags |= flags
# if we are releasing - then let it up
if self.up:
inputs[-1]._.ki.dwFlags |= KEYEVENTF_KEYUP
return inputs | [
"def",
"GetInput",
"(",
"self",
")",
":",
"actions",
"=",
"1",
"# if both up and down",
"if",
"self",
".",
"up",
"and",
"self",
".",
"down",
":",
"actions",
"=",
"2",
"inputs",
"=",
"(",
"INPUT",
"*",
"actions",
")",
"(",
")",
"vk",
",",
"scan",
",",
"flags",
"=",
"self",
".",
"_get_key_info",
"(",
")",
"for",
"inp",
"in",
"inputs",
":",
"inp",
".",
"type",
"=",
"INPUT_KEYBOARD",
"inp",
".",
"_",
".",
"ki",
".",
"wVk",
"=",
"vk",
"inp",
".",
"_",
".",
"ki",
".",
"wScan",
"=",
"scan",
"inp",
".",
"_",
".",
"ki",
".",
"dwFlags",
"|=",
"flags",
"# if we are releasing - then let it up",
"if",
"self",
".",
"up",
":",
"inputs",
"[",
"-",
"1",
"]",
".",
"_",
".",
"ki",
".",
"dwFlags",
"|=",
"KEYEVENTF_KEYUP",
"return",
"inputs"
] | Build the INPUT structure for the action | [
"Build",
"the",
"INPUT",
"structure",
"for",
"the",
"action"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/_internal/win32_send_keys.py#L312-L334 | train |
Erotemic/utool | utool/_internal/win32_send_keys.py | KeyAction.Run | def Run(self):
"Execute the action"
inputs = self.GetInput()
return SendInput(
len(inputs),
ctypes.byref(inputs),
ctypes.sizeof(INPUT)) | python | def Run(self):
"Execute the action"
inputs = self.GetInput()
return SendInput(
len(inputs),
ctypes.byref(inputs),
ctypes.sizeof(INPUT)) | [
"def",
"Run",
"(",
"self",
")",
":",
"inputs",
"=",
"self",
".",
"GetInput",
"(",
")",
"return",
"SendInput",
"(",
"len",
"(",
"inputs",
")",
",",
"ctypes",
".",
"byref",
"(",
"inputs",
")",
",",
"ctypes",
".",
"sizeof",
"(",
"INPUT",
")",
")"
] | Execute the action | [
"Execute",
"the",
"action"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/_internal/win32_send_keys.py#L336-L342 | train |
Erotemic/utool | utool/_internal/win32_send_keys.py | KeyAction._get_down_up_string | def _get_down_up_string(self):
"""Return a string that will show whether the string is up or down
return 'down' if the key is a press only
return 'up' if the key is up only
return '' if the key is up & down (as default)
"""
down_up = ""
if not (self.down and self.up):
if self.down:
down_up = "down"
elif self.up:
down_up = "up"
return down_up | python | def _get_down_up_string(self):
"""Return a string that will show whether the string is up or down
return 'down' if the key is a press only
return 'up' if the key is up only
return '' if the key is up & down (as default)
"""
down_up = ""
if not (self.down and self.up):
if self.down:
down_up = "down"
elif self.up:
down_up = "up"
return down_up | [
"def",
"_get_down_up_string",
"(",
"self",
")",
":",
"down_up",
"=",
"\"\"",
"if",
"not",
"(",
"self",
".",
"down",
"and",
"self",
".",
"up",
")",
":",
"if",
"self",
".",
"down",
":",
"down_up",
"=",
"\"down\"",
"elif",
"self",
".",
"up",
":",
"down_up",
"=",
"\"up\"",
"return",
"down_up"
] | Return a string that will show whether the string is up or down
return 'down' if the key is a press only
return 'up' if the key is up only
return '' if the key is up & down (as default) | [
"Return",
"a",
"string",
"that",
"will",
"show",
"whether",
"the",
"string",
"is",
"up",
"or",
"down"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/_internal/win32_send_keys.py#L344-L357 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.