repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
architv/harvey | harvey/harvey.py | _get_license_description | def _get_license_description(license_code):
""" Gets the body for a license based on a license code """
req = requests.get("{base_url}/licenses/{license_code}".format(
base_url=BASE_URL, license_code=license_code), headers=_HEADERS)
if req.status_code == requests.codes.ok:
s = req.json()["body"]
search_curly = re.search(r'\{(.*)\}', s)
search_square = re.search(r'\[(.*)\]', s)
license = ""
replace_string = '{year} {name}'.format(year=date.today().year,
name=_get_config_name())
if search_curly:
license = re.sub(r'\{(.+)\}', replace_string, s)
elif search_square:
license = re.sub(r'\[(.+)\]', replace_string, s)
else:
license = s
return license
else:
print(Fore.RED + 'No such license. Please check again.'),
print(Style.RESET_ALL),
sys.exit() | python | def _get_license_description(license_code):
""" Gets the body for a license based on a license code """
req = requests.get("{base_url}/licenses/{license_code}".format(
base_url=BASE_URL, license_code=license_code), headers=_HEADERS)
if req.status_code == requests.codes.ok:
s = req.json()["body"]
search_curly = re.search(r'\{(.*)\}', s)
search_square = re.search(r'\[(.*)\]', s)
license = ""
replace_string = '{year} {name}'.format(year=date.today().year,
name=_get_config_name())
if search_curly:
license = re.sub(r'\{(.+)\}', replace_string, s)
elif search_square:
license = re.sub(r'\[(.+)\]', replace_string, s)
else:
license = s
return license
else:
print(Fore.RED + 'No such license. Please check again.'),
print(Style.RESET_ALL),
sys.exit() | [
"def",
"_get_license_description",
"(",
"license_code",
")",
":",
"req",
"=",
"requests",
".",
"get",
"(",
"\"{base_url}/licenses/{license_code}\"",
".",
"format",
"(",
"base_url",
"=",
"BASE_URL",
",",
"license_code",
"=",
"license_code",
")",
",",
"headers",
"=",
"_HEADERS",
")",
"if",
"req",
".",
"status_code",
"==",
"requests",
".",
"codes",
".",
"ok",
":",
"s",
"=",
"req",
".",
"json",
"(",
")",
"[",
"\"body\"",
"]",
"search_curly",
"=",
"re",
".",
"search",
"(",
"r'\\{(.*)\\}'",
",",
"s",
")",
"search_square",
"=",
"re",
".",
"search",
"(",
"r'\\[(.*)\\]'",
",",
"s",
")",
"license",
"=",
"\"\"",
"replace_string",
"=",
"'{year} {name}'",
".",
"format",
"(",
"year",
"=",
"date",
".",
"today",
"(",
")",
".",
"year",
",",
"name",
"=",
"_get_config_name",
"(",
")",
")",
"if",
"search_curly",
":",
"license",
"=",
"re",
".",
"sub",
"(",
"r'\\{(.+)\\}'",
",",
"replace_string",
",",
"s",
")",
"elif",
"search_square",
":",
"license",
"=",
"re",
".",
"sub",
"(",
"r'\\[(.+)\\]'",
",",
"replace_string",
",",
"s",
")",
"else",
":",
"license",
"=",
"s",
"return",
"license",
"else",
":",
"print",
"(",
"Fore",
".",
"RED",
"+",
"'No such license. Please check again.'",
")",
",",
"print",
"(",
"Style",
".",
"RESET_ALL",
")",
",",
"sys",
".",
"exit",
"(",
")"
] | Gets the body for a license based on a license code | [
"Gets",
"the",
"body",
"for",
"a",
"license",
"based",
"on",
"a",
"license",
"code"
] | 2b96d57b7a1e0dd706f1f00aba3d92a7ae702960 | https://github.com/architv/harvey/blob/2b96d57b7a1e0dd706f1f00aba3d92a7ae702960/harvey/harvey.py#L70-L94 | train |
architv/harvey | harvey/harvey.py | get_license_summary | def get_license_summary(license_code):
""" Gets the license summary and permitted, forbidden and required
behaviour """
try:
abs_file = os.path.join(_ROOT, "summary.json")
with open(abs_file, 'r') as f:
summary_license = json.loads(f.read())[license_code]
# prints summary
print(Fore.YELLOW + 'SUMMARY')
print(Style.RESET_ALL),
print(summary_license['summary'])
# prints source for summary
print(Style.BRIGHT + 'Source:'),
print(Style.RESET_ALL),
print(Fore.BLUE + summary_license['source'])
print(Style.RESET_ALL)
# prints cans
print(Fore.GREEN + 'CAN')
print(Style.RESET_ALL),
for rule in summary_license['can']:
print(rule)
print('')
# prints cannot
print(Fore.RED + 'CANNOT')
print(Style.RESET_ALL),
for rule in summary_license['cannot']:
print(rule)
print('')
# prints musts
print(Fore.BLUE + 'MUST')
print(Style.RESET_ALL),
for rule in summary_license['must']:
print(rule)
print('')
except KeyError:
print(Fore.RED + 'No such license. Please check again.'),
print(Style.RESET_ALL), | python | def get_license_summary(license_code):
""" Gets the license summary and permitted, forbidden and required
behaviour """
try:
abs_file = os.path.join(_ROOT, "summary.json")
with open(abs_file, 'r') as f:
summary_license = json.loads(f.read())[license_code]
# prints summary
print(Fore.YELLOW + 'SUMMARY')
print(Style.RESET_ALL),
print(summary_license['summary'])
# prints source for summary
print(Style.BRIGHT + 'Source:'),
print(Style.RESET_ALL),
print(Fore.BLUE + summary_license['source'])
print(Style.RESET_ALL)
# prints cans
print(Fore.GREEN + 'CAN')
print(Style.RESET_ALL),
for rule in summary_license['can']:
print(rule)
print('')
# prints cannot
print(Fore.RED + 'CANNOT')
print(Style.RESET_ALL),
for rule in summary_license['cannot']:
print(rule)
print('')
# prints musts
print(Fore.BLUE + 'MUST')
print(Style.RESET_ALL),
for rule in summary_license['must']:
print(rule)
print('')
except KeyError:
print(Fore.RED + 'No such license. Please check again.'),
print(Style.RESET_ALL), | [
"def",
"get_license_summary",
"(",
"license_code",
")",
":",
"try",
":",
"abs_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_ROOT",
",",
"\"summary.json\"",
")",
"with",
"open",
"(",
"abs_file",
",",
"'r'",
")",
"as",
"f",
":",
"summary_license",
"=",
"json",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
")",
"[",
"license_code",
"]",
"# prints summary",
"print",
"(",
"Fore",
".",
"YELLOW",
"+",
"'SUMMARY'",
")",
"print",
"(",
"Style",
".",
"RESET_ALL",
")",
",",
"print",
"(",
"summary_license",
"[",
"'summary'",
"]",
")",
"# prints source for summary",
"print",
"(",
"Style",
".",
"BRIGHT",
"+",
"'Source:'",
")",
",",
"print",
"(",
"Style",
".",
"RESET_ALL",
")",
",",
"print",
"(",
"Fore",
".",
"BLUE",
"+",
"summary_license",
"[",
"'source'",
"]",
")",
"print",
"(",
"Style",
".",
"RESET_ALL",
")",
"# prints cans",
"print",
"(",
"Fore",
".",
"GREEN",
"+",
"'CAN'",
")",
"print",
"(",
"Style",
".",
"RESET_ALL",
")",
",",
"for",
"rule",
"in",
"summary_license",
"[",
"'can'",
"]",
":",
"print",
"(",
"rule",
")",
"print",
"(",
"''",
")",
"# prints cannot",
"print",
"(",
"Fore",
".",
"RED",
"+",
"'CANNOT'",
")",
"print",
"(",
"Style",
".",
"RESET_ALL",
")",
",",
"for",
"rule",
"in",
"summary_license",
"[",
"'cannot'",
"]",
":",
"print",
"(",
"rule",
")",
"print",
"(",
"''",
")",
"# prints musts",
"print",
"(",
"Fore",
".",
"BLUE",
"+",
"'MUST'",
")",
"print",
"(",
"Style",
".",
"RESET_ALL",
")",
",",
"for",
"rule",
"in",
"summary_license",
"[",
"'must'",
"]",
":",
"print",
"(",
"rule",
")",
"print",
"(",
"''",
")",
"except",
"KeyError",
":",
"print",
"(",
"Fore",
".",
"RED",
"+",
"'No such license. Please check again.'",
")",
",",
"print",
"(",
"Style",
".",
"RESET_ALL",
")",
","
] | Gets the license summary and permitted, forbidden and required
behaviour | [
"Gets",
"the",
"license",
"summary",
"and",
"permitted",
"forbidden",
"and",
"required",
"behaviour"
] | 2b96d57b7a1e0dd706f1f00aba3d92a7ae702960 | https://github.com/architv/harvey/blob/2b96d57b7a1e0dd706f1f00aba3d92a7ae702960/harvey/harvey.py#L97-L139 | train |
architv/harvey | harvey/harvey.py | main | def main():
''' harvey helps you manage and add license from the command line '''
arguments = docopt(__doc__, version=__version__)
if arguments['ls'] or arguments['list']:
_get_licences()
elif arguments['--tldr'] and arguments['<NAME>']:
get_license_summary(arguments['<NAME>'].lower())
elif arguments['--export'] and arguments['<NAME>']:
save_license(arguments['<NAME>'].lower())
elif arguments['<NAME>']:
print(_get_license_description(arguments['<NAME>'].lower()))
else:
print(__doc__) | python | def main():
''' harvey helps you manage and add license from the command line '''
arguments = docopt(__doc__, version=__version__)
if arguments['ls'] or arguments['list']:
_get_licences()
elif arguments['--tldr'] and arguments['<NAME>']:
get_license_summary(arguments['<NAME>'].lower())
elif arguments['--export'] and arguments['<NAME>']:
save_license(arguments['<NAME>'].lower())
elif arguments['<NAME>']:
print(_get_license_description(arguments['<NAME>'].lower()))
else:
print(__doc__) | [
"def",
"main",
"(",
")",
":",
"arguments",
"=",
"docopt",
"(",
"__doc__",
",",
"version",
"=",
"__version__",
")",
"if",
"arguments",
"[",
"'ls'",
"]",
"or",
"arguments",
"[",
"'list'",
"]",
":",
"_get_licences",
"(",
")",
"elif",
"arguments",
"[",
"'--tldr'",
"]",
"and",
"arguments",
"[",
"'<NAME>'",
"]",
":",
"get_license_summary",
"(",
"arguments",
"[",
"'<NAME>'",
"]",
".",
"lower",
"(",
")",
")",
"elif",
"arguments",
"[",
"'--export'",
"]",
"and",
"arguments",
"[",
"'<NAME>'",
"]",
":",
"save_license",
"(",
"arguments",
"[",
"'<NAME>'",
"]",
".",
"lower",
"(",
")",
")",
"elif",
"arguments",
"[",
"'<NAME>'",
"]",
":",
"print",
"(",
"_get_license_description",
"(",
"arguments",
"[",
"'<NAME>'",
"]",
".",
"lower",
"(",
")",
")",
")",
"else",
":",
"print",
"(",
"__doc__",
")"
] | harvey helps you manage and add license from the command line | [
"harvey",
"helps",
"you",
"manage",
"and",
"add",
"license",
"from",
"the",
"command",
"line"
] | 2b96d57b7a1e0dd706f1f00aba3d92a7ae702960 | https://github.com/architv/harvey/blob/2b96d57b7a1e0dd706f1f00aba3d92a7ae702960/harvey/harvey.py#L152-L165 | train |
rfverbruggen/rachiopy | rachiopy/person.py | Person.get | def get(self, user_id):
"""Retrieve the information for a person entity."""
path = '/'.join(['person', user_id])
return self.rachio.get(path) | python | def get(self, user_id):
"""Retrieve the information for a person entity."""
path = '/'.join(['person', user_id])
return self.rachio.get(path) | [
"def",
"get",
"(",
"self",
",",
"user_id",
")",
":",
"path",
"=",
"'/'",
".",
"join",
"(",
"[",
"'person'",
",",
"user_id",
"]",
")",
"return",
"self",
".",
"rachio",
".",
"get",
"(",
"path",
")"
] | Retrieve the information for a person entity. | [
"Retrieve",
"the",
"information",
"for",
"a",
"person",
"entity",
"."
] | c91abc9984f0f453e60fa905285c1b640c3390ae | https://github.com/rfverbruggen/rachiopy/blob/c91abc9984f0f453e60fa905285c1b640c3390ae/rachiopy/person.py#L16-L19 | train |
ptmcg/littletable | littletable.py | Table.copy_template | def copy_template(self, name=None):
"""Create empty copy of the current table, with copies of all
index definitions.
"""
ret = Table(self.table_name)
ret._indexes.update(dict((k, v.copy_template()) for k, v in self._indexes.items()))
ret(name)
return ret | python | def copy_template(self, name=None):
"""Create empty copy of the current table, with copies of all
index definitions.
"""
ret = Table(self.table_name)
ret._indexes.update(dict((k, v.copy_template()) for k, v in self._indexes.items()))
ret(name)
return ret | [
"def",
"copy_template",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"ret",
"=",
"Table",
"(",
"self",
".",
"table_name",
")",
"ret",
".",
"_indexes",
".",
"update",
"(",
"dict",
"(",
"(",
"k",
",",
"v",
".",
"copy_template",
"(",
")",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_indexes",
".",
"items",
"(",
")",
")",
")",
"ret",
"(",
"name",
")",
"return",
"ret"
] | Create empty copy of the current table, with copies of all
index definitions. | [
"Create",
"empty",
"copy",
"of",
"the",
"current",
"table",
"with",
"copies",
"of",
"all",
"index",
"definitions",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L632-L639 | train |
ptmcg/littletable | littletable.py | Table.clone | def clone(self, name=None):
"""Create full copy of the current table, including table contents
and index definitions.
"""
ret = self.copy_template().insert_many(self.obs)(name)
return ret | python | def clone(self, name=None):
"""Create full copy of the current table, including table contents
and index definitions.
"""
ret = self.copy_template().insert_many(self.obs)(name)
return ret | [
"def",
"clone",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"ret",
"=",
"self",
".",
"copy_template",
"(",
")",
".",
"insert_many",
"(",
"self",
".",
"obs",
")",
"(",
"name",
")",
"return",
"ret"
] | Create full copy of the current table, including table contents
and index definitions. | [
"Create",
"full",
"copy",
"of",
"the",
"current",
"table",
"including",
"table",
"contents",
"and",
"index",
"definitions",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L641-L646 | train |
ptmcg/littletable | littletable.py | Table.delete_index | def delete_index(self, attr):
"""Deletes an index from the Table. Can be used to drop and rebuild an index,
or to convert a non-unique index to a unique index, or vice versa.
@param attr: name of an indexed attribute
@type attr: string
"""
if attr in self._indexes:
del self._indexes[attr]
self._uniqueIndexes = [ind for ind in self._indexes.values() if ind.is_unique]
return self | python | def delete_index(self, attr):
"""Deletes an index from the Table. Can be used to drop and rebuild an index,
or to convert a non-unique index to a unique index, or vice versa.
@param attr: name of an indexed attribute
@type attr: string
"""
if attr in self._indexes:
del self._indexes[attr]
self._uniqueIndexes = [ind for ind in self._indexes.values() if ind.is_unique]
return self | [
"def",
"delete_index",
"(",
"self",
",",
"attr",
")",
":",
"if",
"attr",
"in",
"self",
".",
"_indexes",
":",
"del",
"self",
".",
"_indexes",
"[",
"attr",
"]",
"self",
".",
"_uniqueIndexes",
"=",
"[",
"ind",
"for",
"ind",
"in",
"self",
".",
"_indexes",
".",
"values",
"(",
")",
"if",
"ind",
".",
"is_unique",
"]",
"return",
"self"
] | Deletes an index from the Table. Can be used to drop and rebuild an index,
or to convert a non-unique index to a unique index, or vice versa.
@param attr: name of an indexed attribute
@type attr: string | [
"Deletes",
"an",
"index",
"from",
"the",
"Table",
".",
"Can",
"be",
"used",
"to",
"drop",
"and",
"rebuild",
"an",
"index",
"or",
"to",
"convert",
"a",
"non",
"-",
"unique",
"index",
"to",
"a",
"unique",
"index",
"or",
"vice",
"versa",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L689-L698 | train |
ptmcg/littletable | littletable.py | Table.insert_many | def insert_many(self, it):
"""Inserts a collection of objects into the table."""
unique_indexes = self._uniqueIndexes # [ind for ind in self._indexes.values() if ind.is_unique]
NO_SUCH_ATTR = object()
new_objs = list(it)
if unique_indexes:
for ind in unique_indexes:
ind_attr = ind.attr
new_keys = dict((getattr(obj, ind_attr, NO_SUCH_ATTR), obj) for obj in new_objs)
if not ind.accept_none and (None in new_keys or NO_SUCH_ATTR in new_keys):
raise KeyError("unique key cannot be None or blank for index %s" % ind_attr,
[ob for ob in new_objs if getattr(ob, ind_attr, NO_SUCH_ATTR) is None])
if len(new_keys) < len(new_objs):
raise KeyError("given sequence contains duplicate keys for index %s" % ind_attr)
for key in new_keys:
if key in ind:
obj = new_keys[key]
raise KeyError("duplicate unique key value '%s' for index %s" % (getattr(obj, ind_attr), ind_attr),
new_keys[key])
for obj in new_objs:
self.obs.append(obj)
for attr, ind in self._indexes.items():
obval = getattr(obj, attr)
ind[obval] = obj
return self | python | def insert_many(self, it):
"""Inserts a collection of objects into the table."""
unique_indexes = self._uniqueIndexes # [ind for ind in self._indexes.values() if ind.is_unique]
NO_SUCH_ATTR = object()
new_objs = list(it)
if unique_indexes:
for ind in unique_indexes:
ind_attr = ind.attr
new_keys = dict((getattr(obj, ind_attr, NO_SUCH_ATTR), obj) for obj in new_objs)
if not ind.accept_none and (None in new_keys or NO_SUCH_ATTR in new_keys):
raise KeyError("unique key cannot be None or blank for index %s" % ind_attr,
[ob for ob in new_objs if getattr(ob, ind_attr, NO_SUCH_ATTR) is None])
if len(new_keys) < len(new_objs):
raise KeyError("given sequence contains duplicate keys for index %s" % ind_attr)
for key in new_keys:
if key in ind:
obj = new_keys[key]
raise KeyError("duplicate unique key value '%s' for index %s" % (getattr(obj, ind_attr), ind_attr),
new_keys[key])
for obj in new_objs:
self.obs.append(obj)
for attr, ind in self._indexes.items():
obval = getattr(obj, attr)
ind[obval] = obj
return self | [
"def",
"insert_many",
"(",
"self",
",",
"it",
")",
":",
"unique_indexes",
"=",
"self",
".",
"_uniqueIndexes",
"# [ind for ind in self._indexes.values() if ind.is_unique]",
"NO_SUCH_ATTR",
"=",
"object",
"(",
")",
"new_objs",
"=",
"list",
"(",
"it",
")",
"if",
"unique_indexes",
":",
"for",
"ind",
"in",
"unique_indexes",
":",
"ind_attr",
"=",
"ind",
".",
"attr",
"new_keys",
"=",
"dict",
"(",
"(",
"getattr",
"(",
"obj",
",",
"ind_attr",
",",
"NO_SUCH_ATTR",
")",
",",
"obj",
")",
"for",
"obj",
"in",
"new_objs",
")",
"if",
"not",
"ind",
".",
"accept_none",
"and",
"(",
"None",
"in",
"new_keys",
"or",
"NO_SUCH_ATTR",
"in",
"new_keys",
")",
":",
"raise",
"KeyError",
"(",
"\"unique key cannot be None or blank for index %s\"",
"%",
"ind_attr",
",",
"[",
"ob",
"for",
"ob",
"in",
"new_objs",
"if",
"getattr",
"(",
"ob",
",",
"ind_attr",
",",
"NO_SUCH_ATTR",
")",
"is",
"None",
"]",
")",
"if",
"len",
"(",
"new_keys",
")",
"<",
"len",
"(",
"new_objs",
")",
":",
"raise",
"KeyError",
"(",
"\"given sequence contains duplicate keys for index %s\"",
"%",
"ind_attr",
")",
"for",
"key",
"in",
"new_keys",
":",
"if",
"key",
"in",
"ind",
":",
"obj",
"=",
"new_keys",
"[",
"key",
"]",
"raise",
"KeyError",
"(",
"\"duplicate unique key value '%s' for index %s\"",
"%",
"(",
"getattr",
"(",
"obj",
",",
"ind_attr",
")",
",",
"ind_attr",
")",
",",
"new_keys",
"[",
"key",
"]",
")",
"for",
"obj",
"in",
"new_objs",
":",
"self",
".",
"obs",
".",
"append",
"(",
"obj",
")",
"for",
"attr",
",",
"ind",
"in",
"self",
".",
"_indexes",
".",
"items",
"(",
")",
":",
"obval",
"=",
"getattr",
"(",
"obj",
",",
"attr",
")",
"ind",
"[",
"obval",
"]",
"=",
"obj",
"return",
"self"
] | Inserts a collection of objects into the table. | [
"Inserts",
"a",
"collection",
"of",
"objects",
"into",
"the",
"table",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L720-L746 | train |
ptmcg/littletable | littletable.py | Table.remove_many | def remove_many(self, it):
"""Removes a collection of objects from the table."""
# find indicies of objects in iterable
to_be_deleted = list(it)
del_indices = []
for i, ob in enumerate(self.obs):
try:
tbd_index = to_be_deleted.index(ob)
except ValueError:
continue
else:
del_indices.append(i)
to_be_deleted.pop(tbd_index)
# quit early if we have found them all
if not to_be_deleted:
break
for i in sorted(del_indices, reverse=True):
self.pop(i)
return self | python | def remove_many(self, it):
"""Removes a collection of objects from the table."""
# find indicies of objects in iterable
to_be_deleted = list(it)
del_indices = []
for i, ob in enumerate(self.obs):
try:
tbd_index = to_be_deleted.index(ob)
except ValueError:
continue
else:
del_indices.append(i)
to_be_deleted.pop(tbd_index)
# quit early if we have found them all
if not to_be_deleted:
break
for i in sorted(del_indices, reverse=True):
self.pop(i)
return self | [
"def",
"remove_many",
"(",
"self",
",",
"it",
")",
":",
"# find indicies of objects in iterable",
"to_be_deleted",
"=",
"list",
"(",
"it",
")",
"del_indices",
"=",
"[",
"]",
"for",
"i",
",",
"ob",
"in",
"enumerate",
"(",
"self",
".",
"obs",
")",
":",
"try",
":",
"tbd_index",
"=",
"to_be_deleted",
".",
"index",
"(",
"ob",
")",
"except",
"ValueError",
":",
"continue",
"else",
":",
"del_indices",
".",
"append",
"(",
"i",
")",
"to_be_deleted",
".",
"pop",
"(",
"tbd_index",
")",
"# quit early if we have found them all",
"if",
"not",
"to_be_deleted",
":",
"break",
"for",
"i",
"in",
"sorted",
"(",
"del_indices",
",",
"reverse",
"=",
"True",
")",
":",
"self",
".",
"pop",
"(",
"i",
")",
"return",
"self"
] | Removes a collection of objects from the table. | [
"Removes",
"a",
"collection",
"of",
"objects",
"from",
"the",
"table",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L753-L774 | train |
ptmcg/littletable | littletable.py | Table._query_attr_sort_fn | def _query_attr_sort_fn(self, attr_val):
"""Used to order where keys by most selective key first"""
attr, v = attr_val
if attr in self._indexes:
idx = self._indexes[attr]
if v in idx:
return len(idx[v])
else:
return 0
else:
return 1e9 | python | def _query_attr_sort_fn(self, attr_val):
"""Used to order where keys by most selective key first"""
attr, v = attr_val
if attr in self._indexes:
idx = self._indexes[attr]
if v in idx:
return len(idx[v])
else:
return 0
else:
return 1e9 | [
"def",
"_query_attr_sort_fn",
"(",
"self",
",",
"attr_val",
")",
":",
"attr",
",",
"v",
"=",
"attr_val",
"if",
"attr",
"in",
"self",
".",
"_indexes",
":",
"idx",
"=",
"self",
".",
"_indexes",
"[",
"attr",
"]",
"if",
"v",
"in",
"idx",
":",
"return",
"len",
"(",
"idx",
"[",
"v",
"]",
")",
"else",
":",
"return",
"0",
"else",
":",
"return",
"1e9"
] | Used to order where keys by most selective key first | [
"Used",
"to",
"order",
"where",
"keys",
"by",
"most",
"selective",
"key",
"first"
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L776-L786 | train |
ptmcg/littletable | littletable.py | Table.delete | def delete(self, **kwargs):
"""Deletes matching objects from the table, based on given
named parameters. If multiple named parameters are given, then
only objects that satisfy all of the query criteria will be removed.
@param kwargs: attributes for selecting records, given as additional
named arguments of the form C{attrname="attrvalue"}.
@return: the number of objects removed from the table
"""
if not kwargs:
return 0
affected = self.where(**kwargs)
self.remove_many(affected)
return len(affected) | python | def delete(self, **kwargs):
"""Deletes matching objects from the table, based on given
named parameters. If multiple named parameters are given, then
only objects that satisfy all of the query criteria will be removed.
@param kwargs: attributes for selecting records, given as additional
named arguments of the form C{attrname="attrvalue"}.
@return: the number of objects removed from the table
"""
if not kwargs:
return 0
affected = self.where(**kwargs)
self.remove_many(affected)
return len(affected) | [
"def",
"delete",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"kwargs",
":",
"return",
"0",
"affected",
"=",
"self",
".",
"where",
"(",
"*",
"*",
"kwargs",
")",
"self",
".",
"remove_many",
"(",
"affected",
")",
"return",
"len",
"(",
"affected",
")"
] | Deletes matching objects from the table, based on given
named parameters. If multiple named parameters are given, then
only objects that satisfy all of the query criteria will be removed.
@param kwargs: attributes for selecting records, given as additional
named arguments of the form C{attrname="attrvalue"}.
@return: the number of objects removed from the table | [
"Deletes",
"matching",
"objects",
"from",
"the",
"table",
"based",
"on",
"given",
"named",
"parameters",
".",
"If",
"multiple",
"named",
"parameters",
"are",
"given",
"then",
"only",
"objects",
"that",
"satisfy",
"all",
"of",
"the",
"query",
"criteria",
"will",
"be",
"removed",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L835-L848 | train |
ptmcg/littletable | littletable.py | Table.sort | def sort(self, key, reverse=False):
"""Sort Table in place, using given fields as sort key.
@param key: if this is a string, it is a comma-separated list of field names,
optionally followed by 'desc' to indicate descending sort instead of the
default ascending sort; if a list or tuple, it is a list or tuple of field names
or field names with ' desc' appended; if it is a function, then it is the
function to be used as the sort key function
@param reverse: (default=False) set to True if results should be in reverse order
@type reverse: bool
@return: self
"""
if isinstance(key, (basestring, list, tuple)):
if isinstance(key, basestring):
attrdefs = [s.strip() for s in key.split(',')]
attr_orders = [(a.split()+['asc', ])[:2] for a in attrdefs]
else:
# attr definitions were already resolved to a sequence by the caller
if isinstance(key[0], basestring):
attr_orders = [(a.split()+['asc', ])[:2] for a in key]
else:
attr_orders = key
attrs = [attr for attr, order in attr_orders]
# special optimization if all orders are ascending or descending
if all(order == 'asc' for attr, order in attr_orders):
self.obs.sort(key=attrgetter(*attrs), reverse=reverse)
elif all(order == 'desc' for attr, order in attr_orders):
self.obs.sort(key=attrgetter(*attrs), reverse=not reverse)
else:
# mix of ascending and descending sorts, have to do succession of sorts
# leftmost attr is the most primary sort key, so reverse attr_orders to do
# succession of sorts from right to left
do_all(self.obs.sort(key=attrgetter(attr), reverse=(order == "desc"))
for attr, order in reversed(attr_orders))
else:
# sorting given a sort key function
keyfn = key
self.obs.sort(key=keyfn, reverse=reverse)
return self | python | def sort(self, key, reverse=False):
"""Sort Table in place, using given fields as sort key.
@param key: if this is a string, it is a comma-separated list of field names,
optionally followed by 'desc' to indicate descending sort instead of the
default ascending sort; if a list or tuple, it is a list or tuple of field names
or field names with ' desc' appended; if it is a function, then it is the
function to be used as the sort key function
@param reverse: (default=False) set to True if results should be in reverse order
@type reverse: bool
@return: self
"""
if isinstance(key, (basestring, list, tuple)):
if isinstance(key, basestring):
attrdefs = [s.strip() for s in key.split(',')]
attr_orders = [(a.split()+['asc', ])[:2] for a in attrdefs]
else:
# attr definitions were already resolved to a sequence by the caller
if isinstance(key[0], basestring):
attr_orders = [(a.split()+['asc', ])[:2] for a in key]
else:
attr_orders = key
attrs = [attr for attr, order in attr_orders]
# special optimization if all orders are ascending or descending
if all(order == 'asc' for attr, order in attr_orders):
self.obs.sort(key=attrgetter(*attrs), reverse=reverse)
elif all(order == 'desc' for attr, order in attr_orders):
self.obs.sort(key=attrgetter(*attrs), reverse=not reverse)
else:
# mix of ascending and descending sorts, have to do succession of sorts
# leftmost attr is the most primary sort key, so reverse attr_orders to do
# succession of sorts from right to left
do_all(self.obs.sort(key=attrgetter(attr), reverse=(order == "desc"))
for attr, order in reversed(attr_orders))
else:
# sorting given a sort key function
keyfn = key
self.obs.sort(key=keyfn, reverse=reverse)
return self | [
"def",
"sort",
"(",
"self",
",",
"key",
",",
"reverse",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"(",
"basestring",
",",
"list",
",",
"tuple",
")",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"basestring",
")",
":",
"attrdefs",
"=",
"[",
"s",
".",
"strip",
"(",
")",
"for",
"s",
"in",
"key",
".",
"split",
"(",
"','",
")",
"]",
"attr_orders",
"=",
"[",
"(",
"a",
".",
"split",
"(",
")",
"+",
"[",
"'asc'",
",",
"]",
")",
"[",
":",
"2",
"]",
"for",
"a",
"in",
"attrdefs",
"]",
"else",
":",
"# attr definitions were already resolved to a sequence by the caller",
"if",
"isinstance",
"(",
"key",
"[",
"0",
"]",
",",
"basestring",
")",
":",
"attr_orders",
"=",
"[",
"(",
"a",
".",
"split",
"(",
")",
"+",
"[",
"'asc'",
",",
"]",
")",
"[",
":",
"2",
"]",
"for",
"a",
"in",
"key",
"]",
"else",
":",
"attr_orders",
"=",
"key",
"attrs",
"=",
"[",
"attr",
"for",
"attr",
",",
"order",
"in",
"attr_orders",
"]",
"# special optimization if all orders are ascending or descending",
"if",
"all",
"(",
"order",
"==",
"'asc'",
"for",
"attr",
",",
"order",
"in",
"attr_orders",
")",
":",
"self",
".",
"obs",
".",
"sort",
"(",
"key",
"=",
"attrgetter",
"(",
"*",
"attrs",
")",
",",
"reverse",
"=",
"reverse",
")",
"elif",
"all",
"(",
"order",
"==",
"'desc'",
"for",
"attr",
",",
"order",
"in",
"attr_orders",
")",
":",
"self",
".",
"obs",
".",
"sort",
"(",
"key",
"=",
"attrgetter",
"(",
"*",
"attrs",
")",
",",
"reverse",
"=",
"not",
"reverse",
")",
"else",
":",
"# mix of ascending and descending sorts, have to do succession of sorts",
"# leftmost attr is the most primary sort key, so reverse attr_orders to do",
"# succession of sorts from right to left",
"do_all",
"(",
"self",
".",
"obs",
".",
"sort",
"(",
"key",
"=",
"attrgetter",
"(",
"attr",
")",
",",
"reverse",
"=",
"(",
"order",
"==",
"\"desc\"",
")",
")",
"for",
"attr",
",",
"order",
"in",
"reversed",
"(",
"attr_orders",
")",
")",
"else",
":",
"# sorting given a sort key function",
"keyfn",
"=",
"key",
"self",
".",
"obs",
".",
"sort",
"(",
"key",
"=",
"keyfn",
",",
"reverse",
"=",
"reverse",
")",
"return",
"self"
] | Sort Table in place, using given fields as sort key.
@param key: if this is a string, it is a comma-separated list of field names,
optionally followed by 'desc' to indicate descending sort instead of the
default ascending sort; if a list or tuple, it is a list or tuple of field names
or field names with ' desc' appended; if it is a function, then it is the
function to be used as the sort key function
@param reverse: (default=False) set to True if results should be in reverse order
@type reverse: bool
@return: self | [
"Sort",
"Table",
"in",
"place",
"using",
"given",
"fields",
"as",
"sort",
"key",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L857-L895 | train |
ptmcg/littletable | littletable.py | Table.select | def select(self, fields, **exprs):
"""
Create a new table containing a subset of attributes, with optionally
newly-added fields computed from each rec in the original table.
@param fields: list of strings, or single space-delimited string, listing attribute name to be included in the
output
@type fields: list, or space-delimited string
@param exprs: one or more named callable arguments, to compute additional fields using the given function
@type exprs: C{name=callable}, callable takes the record as an argument, and returns the new attribute value
If a string is passed as a callable, this string will be used using string formatting, given the record
as a source of interpolation values. For instance, C{fullName = '%(lastName)s, %(firstName)s'}
"""
fields = self._parse_fields_string(fields)
def _make_string_callable(expr):
if isinstance(expr, basestring):
return lambda r: expr % r
else:
return expr
exprs = dict((k, _make_string_callable(v)) for k, v in exprs.items())
raw_tuples = []
for ob in self.obs:
attrvalues = tuple(getattr(ob, fieldname, None) for fieldname in fields)
if exprs:
attrvalues += tuple(expr(ob) for expr in exprs.values())
raw_tuples.append(attrvalues)
all_names = tuple(fields) + tuple(exprs.keys())
ret = Table()
ret._indexes.update(dict((k, v.copy_template()) for k, v in self._indexes.items() if k in all_names))
return ret().insert_many(DataObject(**dict(zip(all_names, outtuple))) for outtuple in raw_tuples) | python | def select(self, fields, **exprs):
"""
Create a new table containing a subset of attributes, with optionally
newly-added fields computed from each rec in the original table.
@param fields: list of strings, or single space-delimited string, listing attribute name to be included in the
output
@type fields: list, or space-delimited string
@param exprs: one or more named callable arguments, to compute additional fields using the given function
@type exprs: C{name=callable}, callable takes the record as an argument, and returns the new attribute value
If a string is passed as a callable, this string will be used using string formatting, given the record
as a source of interpolation values. For instance, C{fullName = '%(lastName)s, %(firstName)s'}
"""
fields = self._parse_fields_string(fields)
def _make_string_callable(expr):
if isinstance(expr, basestring):
return lambda r: expr % r
else:
return expr
exprs = dict((k, _make_string_callable(v)) for k, v in exprs.items())
raw_tuples = []
for ob in self.obs:
attrvalues = tuple(getattr(ob, fieldname, None) for fieldname in fields)
if exprs:
attrvalues += tuple(expr(ob) for expr in exprs.values())
raw_tuples.append(attrvalues)
all_names = tuple(fields) + tuple(exprs.keys())
ret = Table()
ret._indexes.update(dict((k, v.copy_template()) for k, v in self._indexes.items() if k in all_names))
return ret().insert_many(DataObject(**dict(zip(all_names, outtuple))) for outtuple in raw_tuples) | [
"def",
"select",
"(",
"self",
",",
"fields",
",",
"*",
"*",
"exprs",
")",
":",
"fields",
"=",
"self",
".",
"_parse_fields_string",
"(",
"fields",
")",
"def",
"_make_string_callable",
"(",
"expr",
")",
":",
"if",
"isinstance",
"(",
"expr",
",",
"basestring",
")",
":",
"return",
"lambda",
"r",
":",
"expr",
"%",
"r",
"else",
":",
"return",
"expr",
"exprs",
"=",
"dict",
"(",
"(",
"k",
",",
"_make_string_callable",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"exprs",
".",
"items",
"(",
")",
")",
"raw_tuples",
"=",
"[",
"]",
"for",
"ob",
"in",
"self",
".",
"obs",
":",
"attrvalues",
"=",
"tuple",
"(",
"getattr",
"(",
"ob",
",",
"fieldname",
",",
"None",
")",
"for",
"fieldname",
"in",
"fields",
")",
"if",
"exprs",
":",
"attrvalues",
"+=",
"tuple",
"(",
"expr",
"(",
"ob",
")",
"for",
"expr",
"in",
"exprs",
".",
"values",
"(",
")",
")",
"raw_tuples",
".",
"append",
"(",
"attrvalues",
")",
"all_names",
"=",
"tuple",
"(",
"fields",
")",
"+",
"tuple",
"(",
"exprs",
".",
"keys",
"(",
")",
")",
"ret",
"=",
"Table",
"(",
")",
"ret",
".",
"_indexes",
".",
"update",
"(",
"dict",
"(",
"(",
"k",
",",
"v",
".",
"copy_template",
"(",
")",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_indexes",
".",
"items",
"(",
")",
"if",
"k",
"in",
"all_names",
")",
")",
"return",
"ret",
"(",
")",
".",
"insert_many",
"(",
"DataObject",
"(",
"*",
"*",
"dict",
"(",
"zip",
"(",
"all_names",
",",
"outtuple",
")",
")",
")",
"for",
"outtuple",
"in",
"raw_tuples",
")"
] | Create a new table containing a subset of attributes, with optionally
newly-added fields computed from each rec in the original table.
@param fields: list of strings, or single space-delimited string, listing attribute name to be included in the
output
@type fields: list, or space-delimited string
@param exprs: one or more named callable arguments, to compute additional fields using the given function
@type exprs: C{name=callable}, callable takes the record as an argument, and returns the new attribute value
If a string is passed as a callable, this string will be used using string formatting, given the record
as a source of interpolation values. For instance, C{fullName = '%(lastName)s, %(firstName)s'} | [
"Create",
"a",
"new",
"table",
"containing",
"a",
"subset",
"of",
"attributes",
"with",
"optionally",
"newly",
"-",
"added",
"fields",
"computed",
"from",
"each",
"rec",
"in",
"the",
"original",
"table",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L897-L931 | train |
ptmcg/littletable | littletable.py | Table.formatted_table | def formatted_table(self, *fields, **exprs):
"""
Create a new table with all string formatted attribute values, typically in preparation for
formatted output.
@param fields: one or more strings, each string is an attribute name to be included in the output
@type fields: string (multiple)
@param exprs: one or more named string arguments, to format the given attribute with a formatting string
@type exprs: name=string
"""
# select_exprs = {}
# for f in fields:
# select_exprs[f] = lambda r : str(getattr,f,None)
fields = set(fields)
select_exprs = ODict((f, lambda r, f=f: str(getattr, f, None)) for f in fields)
for ename, expr in exprs.items():
if isinstance(expr, basestring):
if re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*$', expr):
select_exprs[ename] = lambda r: str(getattr(r, expr, None))
else:
if "{}" in expr or "{0}" or "{0:" in expr:
select_exprs[ename] = lambda r: expr.format(r)
else:
select_exprs[ename] = lambda r: expr % getattr(r, ename, "None")
return self.select(**select_exprs) | python | def formatted_table(self, *fields, **exprs):
"""
Create a new table with all string formatted attribute values, typically in preparation for
formatted output.
@param fields: one or more strings, each string is an attribute name to be included in the output
@type fields: string (multiple)
@param exprs: one or more named string arguments, to format the given attribute with a formatting string
@type exprs: name=string
"""
# select_exprs = {}
# for f in fields:
# select_exprs[f] = lambda r : str(getattr,f,None)
fields = set(fields)
select_exprs = ODict((f, lambda r, f=f: str(getattr, f, None)) for f in fields)
for ename, expr in exprs.items():
if isinstance(expr, basestring):
if re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*$', expr):
select_exprs[ename] = lambda r: str(getattr(r, expr, None))
else:
if "{}" in expr or "{0}" or "{0:" in expr:
select_exprs[ename] = lambda r: expr.format(r)
else:
select_exprs[ename] = lambda r: expr % getattr(r, ename, "None")
return self.select(**select_exprs) | [
"def",
"formatted_table",
"(",
"self",
",",
"*",
"fields",
",",
"*",
"*",
"exprs",
")",
":",
"# select_exprs = {}",
"# for f in fields:",
"# select_exprs[f] = lambda r : str(getattr,f,None)",
"fields",
"=",
"set",
"(",
"fields",
")",
"select_exprs",
"=",
"ODict",
"(",
"(",
"f",
",",
"lambda",
"r",
",",
"f",
"=",
"f",
":",
"str",
"(",
"getattr",
",",
"f",
",",
"None",
")",
")",
"for",
"f",
"in",
"fields",
")",
"for",
"ename",
",",
"expr",
"in",
"exprs",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"expr",
",",
"basestring",
")",
":",
"if",
"re",
".",
"match",
"(",
"r'^[a-zA-Z_][a-zA-Z0-9_]*$'",
",",
"expr",
")",
":",
"select_exprs",
"[",
"ename",
"]",
"=",
"lambda",
"r",
":",
"str",
"(",
"getattr",
"(",
"r",
",",
"expr",
",",
"None",
")",
")",
"else",
":",
"if",
"\"{}\"",
"in",
"expr",
"or",
"\"{0}\"",
"or",
"\"{0:\"",
"in",
"expr",
":",
"select_exprs",
"[",
"ename",
"]",
"=",
"lambda",
"r",
":",
"expr",
".",
"format",
"(",
"r",
")",
"else",
":",
"select_exprs",
"[",
"ename",
"]",
"=",
"lambda",
"r",
":",
"expr",
"%",
"getattr",
"(",
"r",
",",
"ename",
",",
"\"None\"",
")",
"return",
"self",
".",
"select",
"(",
"*",
"*",
"select_exprs",
")"
] | Create a new table with all string formatted attribute values, typically in preparation for
formatted output.
@param fields: one or more strings, each string is an attribute name to be included in the output
@type fields: string (multiple)
@param exprs: one or more named string arguments, to format the given attribute with a formatting string
@type exprs: name=string | [
"Create",
"a",
"new",
"table",
"with",
"all",
"string",
"formatted",
"attribute",
"values",
"typically",
"in",
"preparation",
"for",
"formatted",
"output",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L933-L958 | train |
ptmcg/littletable | littletable.py | Table.join_on | def join_on(self, attr):
"""Creates a JoinTerm in preparation for joining with another table, to
indicate what attribute should be used in the join. Only indexed attributes
may be used in a join.
@param attr: attribute name to join from this table (may be different
from the attribute name in the table being joined to)
@type attr: string
@returns: L{JoinTerm}"""
if attr not in self._indexes:
raise ValueError("can only join on indexed attributes")
return JoinTerm(self, attr) | python | def join_on(self, attr):
"""Creates a JoinTerm in preparation for joining with another table, to
indicate what attribute should be used in the join. Only indexed attributes
may be used in a join.
@param attr: attribute name to join from this table (may be different
from the attribute name in the table being joined to)
@type attr: string
@returns: L{JoinTerm}"""
if attr not in self._indexes:
raise ValueError("can only join on indexed attributes")
return JoinTerm(self, attr) | [
"def",
"join_on",
"(",
"self",
",",
"attr",
")",
":",
"if",
"attr",
"not",
"in",
"self",
".",
"_indexes",
":",
"raise",
"ValueError",
"(",
"\"can only join on indexed attributes\"",
")",
"return",
"JoinTerm",
"(",
"self",
",",
"attr",
")"
] | Creates a JoinTerm in preparation for joining with another table, to
indicate what attribute should be used in the join. Only indexed attributes
may be used in a join.
@param attr: attribute name to join from this table (may be different
from the attribute name in the table being joined to)
@type attr: string
@returns: L{JoinTerm} | [
"Creates",
"a",
"JoinTerm",
"in",
"preparation",
"for",
"joining",
"with",
"another",
"table",
"to",
"indicate",
"what",
"attribute",
"should",
"be",
"used",
"in",
"the",
"join",
".",
"Only",
"indexed",
"attributes",
"may",
"be",
"used",
"in",
"a",
"join",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L1075-L1085 | train |
ptmcg/littletable | littletable.py | Table.csv_import | def csv_import(self, csv_source, encoding='utf-8', transforms=None, row_class=DataObject, **kwargs):
"""Imports the contents of a CSV-formatted file into this table.
@param csv_source: CSV file - if a string is given, the file with that name will be
opened, read, and closed; if a file object is given, then that object
will be read as-is, and left for the caller to be closed.
@type csv_source: string or file
@param encoding: encoding to be used for reading source text if C{csv_source} is
passed as a string filename
@type encoding: string (default='UTF-8')
@param transforms: dict of functions by attribute name; if given, each
attribute will be transformed using the corresponding transform; if there is no
matching transform, the attribute will be read as a string (default); the
transform function can also be defined as a (function, default-value) tuple; if
there is an Exception raised by the transform function, then the attribute will
be set to the given default value
@type transforms: dict (optional)
@param kwargs: additional constructor arguments for csv C{DictReader} objects, such as C{delimiter}
or C{fieldnames}; these are passed directly through to the csv C{DictReader} constructor
@type kwargs: named arguments (optional)
"""
reader_args = dict((k, v) for k, v in kwargs.items() if k not in ['encoding',
'csv_source',
'transforms',
'row_class'])
reader = lambda src: csv.DictReader(src, **reader_args)
return self._import(csv_source, encoding, transforms, reader=reader, row_class=row_class) | python | def csv_import(self, csv_source, encoding='utf-8', transforms=None, row_class=DataObject, **kwargs):
"""Imports the contents of a CSV-formatted file into this table.
@param csv_source: CSV file - if a string is given, the file with that name will be
opened, read, and closed; if a file object is given, then that object
will be read as-is, and left for the caller to be closed.
@type csv_source: string or file
@param encoding: encoding to be used for reading source text if C{csv_source} is
passed as a string filename
@type encoding: string (default='UTF-8')
@param transforms: dict of functions by attribute name; if given, each
attribute will be transformed using the corresponding transform; if there is no
matching transform, the attribute will be read as a string (default); the
transform function can also be defined as a (function, default-value) tuple; if
there is an Exception raised by the transform function, then the attribute will
be set to the given default value
@type transforms: dict (optional)
@param kwargs: additional constructor arguments for csv C{DictReader} objects, such as C{delimiter}
or C{fieldnames}; these are passed directly through to the csv C{DictReader} constructor
@type kwargs: named arguments (optional)
"""
reader_args = dict((k, v) for k, v in kwargs.items() if k not in ['encoding',
'csv_source',
'transforms',
'row_class'])
reader = lambda src: csv.DictReader(src, **reader_args)
return self._import(csv_source, encoding, transforms, reader=reader, row_class=row_class) | [
"def",
"csv_import",
"(",
"self",
",",
"csv_source",
",",
"encoding",
"=",
"'utf-8'",
",",
"transforms",
"=",
"None",
",",
"row_class",
"=",
"DataObject",
",",
"*",
"*",
"kwargs",
")",
":",
"reader_args",
"=",
"dict",
"(",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
"if",
"k",
"not",
"in",
"[",
"'encoding'",
",",
"'csv_source'",
",",
"'transforms'",
",",
"'row_class'",
"]",
")",
"reader",
"=",
"lambda",
"src",
":",
"csv",
".",
"DictReader",
"(",
"src",
",",
"*",
"*",
"reader_args",
")",
"return",
"self",
".",
"_import",
"(",
"csv_source",
",",
"encoding",
",",
"transforms",
",",
"reader",
"=",
"reader",
",",
"row_class",
"=",
"row_class",
")"
] | Imports the contents of a CSV-formatted file into this table.
@param csv_source: CSV file - if a string is given, the file with that name will be
opened, read, and closed; if a file object is given, then that object
will be read as-is, and left for the caller to be closed.
@type csv_source: string or file
@param encoding: encoding to be used for reading source text if C{csv_source} is
passed as a string filename
@type encoding: string (default='UTF-8')
@param transforms: dict of functions by attribute name; if given, each
attribute will be transformed using the corresponding transform; if there is no
matching transform, the attribute will be read as a string (default); the
transform function can also be defined as a (function, default-value) tuple; if
there is an Exception raised by the transform function, then the attribute will
be set to the given default value
@type transforms: dict (optional)
@param kwargs: additional constructor arguments for csv C{DictReader} objects, such as C{delimiter}
or C{fieldnames}; these are passed directly through to the csv C{DictReader} constructor
@type kwargs: named arguments (optional) | [
"Imports",
"the",
"contents",
"of",
"a",
"CSV",
"-",
"formatted",
"file",
"into",
"this",
"table",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L1136-L1161 | train |
ptmcg/littletable | littletable.py | Table.tsv_import | def tsv_import(self, xsv_source, encoding="UTF-8", transforms=None, row_class=DataObject, **kwargs):
"""Imports the contents of a tab-separated data file into this table.
@param xsv_source: tab-separated data file - if a string is given, the file with that name will be
opened, read, and closed; if a file object is given, then that object
will be read as-is, and left for the caller to be closed.
@type xsv_source: string or file
@param transforms: dict of functions by attribute name; if given, each
attribute will be transformed using the corresponding transform; if there is no
matching transform, the attribute will be read as a string (default); the
transform function can also be defined as a (function, default-value) tuple; if
there is an Exception raised by the transform function, then the attribute will
be set to the given default value
@type transforms: dict (optional)
"""
return self._xsv_import(xsv_source, encoding, transforms=transforms, delimiter="\t", row_class=row_class, **kwargs) | python | def tsv_import(self, xsv_source, encoding="UTF-8", transforms=None, row_class=DataObject, **kwargs):
"""Imports the contents of a tab-separated data file into this table.
@param xsv_source: tab-separated data file - if a string is given, the file with that name will be
opened, read, and closed; if a file object is given, then that object
will be read as-is, and left for the caller to be closed.
@type xsv_source: string or file
@param transforms: dict of functions by attribute name; if given, each
attribute will be transformed using the corresponding transform; if there is no
matching transform, the attribute will be read as a string (default); the
transform function can also be defined as a (function, default-value) tuple; if
there is an Exception raised by the transform function, then the attribute will
be set to the given default value
@type transforms: dict (optional)
"""
return self._xsv_import(xsv_source, encoding, transforms=transforms, delimiter="\t", row_class=row_class, **kwargs) | [
"def",
"tsv_import",
"(",
"self",
",",
"xsv_source",
",",
"encoding",
"=",
"\"UTF-8\"",
",",
"transforms",
"=",
"None",
",",
"row_class",
"=",
"DataObject",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_xsv_import",
"(",
"xsv_source",
",",
"encoding",
",",
"transforms",
"=",
"transforms",
",",
"delimiter",
"=",
"\"\\t\"",
",",
"row_class",
"=",
"row_class",
",",
"*",
"*",
"kwargs",
")"
] | Imports the contents of a tab-separated data file into this table.
@param xsv_source: tab-separated data file - if a string is given, the file with that name will be
opened, read, and closed; if a file object is given, then that object
will be read as-is, and left for the caller to be closed.
@type xsv_source: string or file
@param transforms: dict of functions by attribute name; if given, each
attribute will be transformed using the corresponding transform; if there is no
matching transform, the attribute will be read as a string (default); the
transform function can also be defined as a (function, default-value) tuple; if
there is an Exception raised by the transform function, then the attribute will
be set to the given default value
@type transforms: dict (optional) | [
"Imports",
"the",
"contents",
"of",
"a",
"tab",
"-",
"separated",
"data",
"file",
"into",
"this",
"table",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L1171-L1185 | train |
ptmcg/littletable | littletable.py | Table.csv_export | def csv_export(self, csv_dest, fieldnames=None, encoding="UTF-8"):
"""Exports the contents of the table to a CSV-formatted file.
@param csv_dest: CSV file - if a string is given, the file with that name will be
opened, written, and closed; if a file object is given, then that object
will be written as-is, and left for the caller to be closed.
@type csv_dest: string or file
@param fieldnames: attribute names to be exported; can be given as a single
string with space-delimited names, or as a list of attribute names
@type fieldnames: list of strings
@param encoding: string (default="UTF-8"); if csv_dest is provided as a string
representing an output filename, an encoding argument can be provided (Python 3 only)
@type encoding: string
"""
close_on_exit = False
if isinstance(csv_dest, basestring):
if PY_3:
csv_dest = open(csv_dest, 'w', newline='', encoding=encoding)
else:
csv_dest = open(csv_dest, 'wb')
close_on_exit = True
try:
if fieldnames is None:
fieldnames = list(_object_attrnames(self.obs[0]))
if isinstance(fieldnames, basestring):
fieldnames = fieldnames.split()
csv_dest.write(','.join(fieldnames) + NL)
csvout = csv.DictWriter(csv_dest, fieldnames, extrasaction='ignore', lineterminator=NL)
if hasattr(self.obs[0], "__dict__"):
csvout.writerows(o.__dict__ for o in self.obs)
else:
do_all(csvout.writerow(ODict(starmap(lambda obj, fld: (fld, getattr(obj, fld)),
zip(repeat(o), fieldnames)))) for o in self.obs)
finally:
if close_on_exit:
csv_dest.close() | python | def csv_export(self, csv_dest, fieldnames=None, encoding="UTF-8"):
"""Exports the contents of the table to a CSV-formatted file.
@param csv_dest: CSV file - if a string is given, the file with that name will be
opened, written, and closed; if a file object is given, then that object
will be written as-is, and left for the caller to be closed.
@type csv_dest: string or file
@param fieldnames: attribute names to be exported; can be given as a single
string with space-delimited names, or as a list of attribute names
@type fieldnames: list of strings
@param encoding: string (default="UTF-8"); if csv_dest is provided as a string
representing an output filename, an encoding argument can be provided (Python 3 only)
@type encoding: string
"""
close_on_exit = False
if isinstance(csv_dest, basestring):
if PY_3:
csv_dest = open(csv_dest, 'w', newline='', encoding=encoding)
else:
csv_dest = open(csv_dest, 'wb')
close_on_exit = True
try:
if fieldnames is None:
fieldnames = list(_object_attrnames(self.obs[0]))
if isinstance(fieldnames, basestring):
fieldnames = fieldnames.split()
csv_dest.write(','.join(fieldnames) + NL)
csvout = csv.DictWriter(csv_dest, fieldnames, extrasaction='ignore', lineterminator=NL)
if hasattr(self.obs[0], "__dict__"):
csvout.writerows(o.__dict__ for o in self.obs)
else:
do_all(csvout.writerow(ODict(starmap(lambda obj, fld: (fld, getattr(obj, fld)),
zip(repeat(o), fieldnames)))) for o in self.obs)
finally:
if close_on_exit:
csv_dest.close() | [
"def",
"csv_export",
"(",
"self",
",",
"csv_dest",
",",
"fieldnames",
"=",
"None",
",",
"encoding",
"=",
"\"UTF-8\"",
")",
":",
"close_on_exit",
"=",
"False",
"if",
"isinstance",
"(",
"csv_dest",
",",
"basestring",
")",
":",
"if",
"PY_3",
":",
"csv_dest",
"=",
"open",
"(",
"csv_dest",
",",
"'w'",
",",
"newline",
"=",
"''",
",",
"encoding",
"=",
"encoding",
")",
"else",
":",
"csv_dest",
"=",
"open",
"(",
"csv_dest",
",",
"'wb'",
")",
"close_on_exit",
"=",
"True",
"try",
":",
"if",
"fieldnames",
"is",
"None",
":",
"fieldnames",
"=",
"list",
"(",
"_object_attrnames",
"(",
"self",
".",
"obs",
"[",
"0",
"]",
")",
")",
"if",
"isinstance",
"(",
"fieldnames",
",",
"basestring",
")",
":",
"fieldnames",
"=",
"fieldnames",
".",
"split",
"(",
")",
"csv_dest",
".",
"write",
"(",
"','",
".",
"join",
"(",
"fieldnames",
")",
"+",
"NL",
")",
"csvout",
"=",
"csv",
".",
"DictWriter",
"(",
"csv_dest",
",",
"fieldnames",
",",
"extrasaction",
"=",
"'ignore'",
",",
"lineterminator",
"=",
"NL",
")",
"if",
"hasattr",
"(",
"self",
".",
"obs",
"[",
"0",
"]",
",",
"\"__dict__\"",
")",
":",
"csvout",
".",
"writerows",
"(",
"o",
".",
"__dict__",
"for",
"o",
"in",
"self",
".",
"obs",
")",
"else",
":",
"do_all",
"(",
"csvout",
".",
"writerow",
"(",
"ODict",
"(",
"starmap",
"(",
"lambda",
"obj",
",",
"fld",
":",
"(",
"fld",
",",
"getattr",
"(",
"obj",
",",
"fld",
")",
")",
",",
"zip",
"(",
"repeat",
"(",
"o",
")",
",",
"fieldnames",
")",
")",
")",
")",
"for",
"o",
"in",
"self",
".",
"obs",
")",
"finally",
":",
"if",
"close_on_exit",
":",
"csv_dest",
".",
"close",
"(",
")"
] | Exports the contents of the table to a CSV-formatted file.
@param csv_dest: CSV file - if a string is given, the file with that name will be
opened, written, and closed; if a file object is given, then that object
will be written as-is, and left for the caller to be closed.
@type csv_dest: string or file
@param fieldnames: attribute names to be exported; can be given as a single
string with space-delimited names, or as a list of attribute names
@type fieldnames: list of strings
@param encoding: string (default="UTF-8"); if csv_dest is provided as a string
representing an output filename, an encoding argument can be provided (Python 3 only)
@type encoding: string | [
"Exports",
"the",
"contents",
"of",
"the",
"table",
"to",
"a",
"CSV",
"-",
"formatted",
"file",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L1187-L1222 | train |
ptmcg/littletable | littletable.py | Table.json_import | def json_import(self, source, encoding="UTF-8", transforms=None, row_class=DataObject):
"""Imports the contents of a JSON data file into this table.
@param source: JSON data file - if a string is given, the file with that name will be
opened, read, and closed; if a file object is given, then that object
will be read as-is, and left for the caller to be closed.
@type source: string or file
@param transforms: dict of functions by attribute name; if given, each
attribute will be transformed using the corresponding transform; if there is no
matching transform, the attribute will be read as a string (default); the
transform function can also be defined as a (function, default-value) tuple; if
there is an Exception raised by the transform function, then the attribute will
be set to the given default value
@type transforms: dict (optional)
"""
class _JsonFileReader(object):
def __init__(self, src):
self.source = src
def __iter__(self):
current = ''
for line in self.source:
if current:
current += ' '
current += line
try:
yield json.loads(current)
current = ''
except Exception:
pass
return self._import(source, encoding, transforms=transforms, reader=_JsonFileReader, row_class=row_class) | python | def json_import(self, source, encoding="UTF-8", transforms=None, row_class=DataObject):
"""Imports the contents of a JSON data file into this table.
@param source: JSON data file - if a string is given, the file with that name will be
opened, read, and closed; if a file object is given, then that object
will be read as-is, and left for the caller to be closed.
@type source: string or file
@param transforms: dict of functions by attribute name; if given, each
attribute will be transformed using the corresponding transform; if there is no
matching transform, the attribute will be read as a string (default); the
transform function can also be defined as a (function, default-value) tuple; if
there is an Exception raised by the transform function, then the attribute will
be set to the given default value
@type transforms: dict (optional)
"""
class _JsonFileReader(object):
def __init__(self, src):
self.source = src
def __iter__(self):
current = ''
for line in self.source:
if current:
current += ' '
current += line
try:
yield json.loads(current)
current = ''
except Exception:
pass
return self._import(source, encoding, transforms=transforms, reader=_JsonFileReader, row_class=row_class) | [
"def",
"json_import",
"(",
"self",
",",
"source",
",",
"encoding",
"=",
"\"UTF-8\"",
",",
"transforms",
"=",
"None",
",",
"row_class",
"=",
"DataObject",
")",
":",
"class",
"_JsonFileReader",
"(",
"object",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"src",
")",
":",
"self",
".",
"source",
"=",
"src",
"def",
"__iter__",
"(",
"self",
")",
":",
"current",
"=",
"''",
"for",
"line",
"in",
"self",
".",
"source",
":",
"if",
"current",
":",
"current",
"+=",
"' '",
"current",
"+=",
"line",
"try",
":",
"yield",
"json",
".",
"loads",
"(",
"current",
")",
"current",
"=",
"''",
"except",
"Exception",
":",
"pass",
"return",
"self",
".",
"_import",
"(",
"source",
",",
"encoding",
",",
"transforms",
"=",
"transforms",
",",
"reader",
"=",
"_JsonFileReader",
",",
"row_class",
"=",
"row_class",
")"
] | Imports the contents of a JSON data file into this table.
@param source: JSON data file - if a string is given, the file with that name will be
opened, read, and closed; if a file object is given, then that object
will be read as-is, and left for the caller to be closed.
@type source: string or file
@param transforms: dict of functions by attribute name; if given, each
attribute will be transformed using the corresponding transform; if there is no
matching transform, the attribute will be read as a string (default); the
transform function can also be defined as a (function, default-value) tuple; if
there is an Exception raised by the transform function, then the attribute will
be set to the given default value
@type transforms: dict (optional) | [
"Imports",
"the",
"contents",
"of",
"a",
"JSON",
"data",
"file",
"into",
"this",
"table",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L1224-L1252 | train |
ptmcg/littletable | littletable.py | Table.json_export | def json_export(self, dest, fieldnames=None, encoding="UTF-8"):
"""Exports the contents of the table to a JSON-formatted file.
@param dest: output file - if a string is given, the file with that name will be
opened, written, and closed; if a file object is given, then that object
will be written as-is, and left for the caller to be closed.
@type dest: string or file
@param fieldnames: attribute names to be exported; can be given as a single
string with space-delimited names, or as a list of attribute names
@type fieldnames: list of strings
@param encoding: string (default="UTF-8"); if csv_dest is provided as a string
representing an output filename, an encoding argument can be provided (Python 3 only)
@type encoding: string
"""
close_on_exit = False
if isinstance(dest, basestring):
if PY_3:
dest = open(dest, 'w', encoding=encoding)
else:
dest = open(dest, 'w')
close_on_exit = True
try:
if isinstance(fieldnames, basestring):
fieldnames = fieldnames.split()
if fieldnames is None:
do_all(dest.write(_to_json(o)+'\n') for o in self.obs)
else:
do_all(dest.write(json.dumps(ODict((f, getattr(o, f)) for f in fieldnames))+'\n') for o in self.obs)
finally:
if close_on_exit:
dest.close() | python | def json_export(self, dest, fieldnames=None, encoding="UTF-8"):
"""Exports the contents of the table to a JSON-formatted file.
@param dest: output file - if a string is given, the file with that name will be
opened, written, and closed; if a file object is given, then that object
will be written as-is, and left for the caller to be closed.
@type dest: string or file
@param fieldnames: attribute names to be exported; can be given as a single
string with space-delimited names, or as a list of attribute names
@type fieldnames: list of strings
@param encoding: string (default="UTF-8"); if csv_dest is provided as a string
representing an output filename, an encoding argument can be provided (Python 3 only)
@type encoding: string
"""
close_on_exit = False
if isinstance(dest, basestring):
if PY_3:
dest = open(dest, 'w', encoding=encoding)
else:
dest = open(dest, 'w')
close_on_exit = True
try:
if isinstance(fieldnames, basestring):
fieldnames = fieldnames.split()
if fieldnames is None:
do_all(dest.write(_to_json(o)+'\n') for o in self.obs)
else:
do_all(dest.write(json.dumps(ODict((f, getattr(o, f)) for f in fieldnames))+'\n') for o in self.obs)
finally:
if close_on_exit:
dest.close() | [
"def",
"json_export",
"(",
"self",
",",
"dest",
",",
"fieldnames",
"=",
"None",
",",
"encoding",
"=",
"\"UTF-8\"",
")",
":",
"close_on_exit",
"=",
"False",
"if",
"isinstance",
"(",
"dest",
",",
"basestring",
")",
":",
"if",
"PY_3",
":",
"dest",
"=",
"open",
"(",
"dest",
",",
"'w'",
",",
"encoding",
"=",
"encoding",
")",
"else",
":",
"dest",
"=",
"open",
"(",
"dest",
",",
"'w'",
")",
"close_on_exit",
"=",
"True",
"try",
":",
"if",
"isinstance",
"(",
"fieldnames",
",",
"basestring",
")",
":",
"fieldnames",
"=",
"fieldnames",
".",
"split",
"(",
")",
"if",
"fieldnames",
"is",
"None",
":",
"do_all",
"(",
"dest",
".",
"write",
"(",
"_to_json",
"(",
"o",
")",
"+",
"'\\n'",
")",
"for",
"o",
"in",
"self",
".",
"obs",
")",
"else",
":",
"do_all",
"(",
"dest",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"ODict",
"(",
"(",
"f",
",",
"getattr",
"(",
"o",
",",
"f",
")",
")",
"for",
"f",
"in",
"fieldnames",
")",
")",
"+",
"'\\n'",
")",
"for",
"o",
"in",
"self",
".",
"obs",
")",
"finally",
":",
"if",
"close_on_exit",
":",
"dest",
".",
"close",
"(",
")"
] | Exports the contents of the table to a JSON-formatted file.
@param dest: output file - if a string is given, the file with that name will be
opened, written, and closed; if a file object is given, then that object
will be written as-is, and left for the caller to be closed.
@type dest: string or file
@param fieldnames: attribute names to be exported; can be given as a single
string with space-delimited names, or as a list of attribute names
@type fieldnames: list of strings
@param encoding: string (default="UTF-8"); if csv_dest is provided as a string
representing an output filename, an encoding argument can be provided (Python 3 only)
@type encoding: string | [
"Exports",
"the",
"contents",
"of",
"the",
"table",
"to",
"a",
"JSON",
"-",
"formatted",
"file",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L1254-L1284 | train |
ptmcg/littletable | littletable.py | Table.add_field | def add_field(self, attrname, fn, default=None):
"""Computes a new attribute for each object in table, or replaces an
existing attribute in each record with a computed value
@param attrname: attribute to compute for each object
@type attrname: string
@param fn: function used to compute new attribute value, based on
other values in the object, as in::
lambda ob : ob.commission_pct/100.0 * ob.gross_sales
@type fn: function(obj) returns value
@param default: value to use if an exception is raised while trying
to evaluate fn
"""
# for rec in self:
def _add_field_to_rec(rec_, fn_=fn, default_=default):
try:
val = fn_(rec_)
except Exception:
val = default_
if isinstance(rec_, DataObject):
rec_.__dict__[attrname] = val
else:
setattr(rec_, attrname, val)
try:
do_all(_add_field_to_rec(r) for r in self)
except AttributeError:
raise AttributeError("cannot add/modify attribute {!r} in table records".format(attrname))
return self | python | def add_field(self, attrname, fn, default=None):
"""Computes a new attribute for each object in table, or replaces an
existing attribute in each record with a computed value
@param attrname: attribute to compute for each object
@type attrname: string
@param fn: function used to compute new attribute value, based on
other values in the object, as in::
lambda ob : ob.commission_pct/100.0 * ob.gross_sales
@type fn: function(obj) returns value
@param default: value to use if an exception is raised while trying
to evaluate fn
"""
# for rec in self:
def _add_field_to_rec(rec_, fn_=fn, default_=default):
try:
val = fn_(rec_)
except Exception:
val = default_
if isinstance(rec_, DataObject):
rec_.__dict__[attrname] = val
else:
setattr(rec_, attrname, val)
try:
do_all(_add_field_to_rec(r) for r in self)
except AttributeError:
raise AttributeError("cannot add/modify attribute {!r} in table records".format(attrname))
return self | [
"def",
"add_field",
"(",
"self",
",",
"attrname",
",",
"fn",
",",
"default",
"=",
"None",
")",
":",
"# for rec in self:",
"def",
"_add_field_to_rec",
"(",
"rec_",
",",
"fn_",
"=",
"fn",
",",
"default_",
"=",
"default",
")",
":",
"try",
":",
"val",
"=",
"fn_",
"(",
"rec_",
")",
"except",
"Exception",
":",
"val",
"=",
"default_",
"if",
"isinstance",
"(",
"rec_",
",",
"DataObject",
")",
":",
"rec_",
".",
"__dict__",
"[",
"attrname",
"]",
"=",
"val",
"else",
":",
"setattr",
"(",
"rec_",
",",
"attrname",
",",
"val",
")",
"try",
":",
"do_all",
"(",
"_add_field_to_rec",
"(",
"r",
")",
"for",
"r",
"in",
"self",
")",
"except",
"AttributeError",
":",
"raise",
"AttributeError",
"(",
"\"cannot add/modify attribute {!r} in table records\"",
".",
"format",
"(",
"attrname",
")",
")",
"return",
"self"
] | Computes a new attribute for each object in table, or replaces an
existing attribute in each record with a computed value
@param attrname: attribute to compute for each object
@type attrname: string
@param fn: function used to compute new attribute value, based on
other values in the object, as in::
lambda ob : ob.commission_pct/100.0 * ob.gross_sales
@type fn: function(obj) returns value
@param default: value to use if an exception is raised while trying
to evaluate fn | [
"Computes",
"a",
"new",
"attribute",
"for",
"each",
"object",
"in",
"table",
"or",
"replaces",
"an",
"existing",
"attribute",
"in",
"each",
"record",
"with",
"a",
"computed",
"value"
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L1286-L1314 | train |
ptmcg/littletable | littletable.py | Table.groupby | def groupby(self, keyexpr, **outexprs):
"""simple prototype of group by, with support for expressions in the group-by clause
and outputs
@param keyexpr: grouping field and optional expression for computing the key value;
if a string is passed
@type keyexpr: string or tuple
@param outexprs: named arguments describing one or more summary values to
compute per key
@type outexprs: callable, taking a sequence of objects as input and returning
a single summary value
"""
if isinstance(keyexpr, basestring):
keyattrs = keyexpr.split()
keyfn = lambda o: tuple(getattr(o, k) for k in keyattrs)
elif isinstance(keyexpr, tuple):
keyattrs = (keyexpr[0],)
keyfn = keyexpr[1]
else:
raise TypeError("keyexpr must be string or tuple")
groupedobs = defaultdict(list)
do_all(groupedobs[keyfn(ob)].append(ob) for ob in self.obs)
tbl = Table()
do_all(tbl.create_index(k, unique=(len(keyattrs) == 1)) for k in keyattrs)
for key, recs in sorted(groupedobs.items()):
groupobj = DataObject(**dict(zip(keyattrs, key)))
do_all(setattr(groupobj, subkey, expr(recs)) for subkey, expr in outexprs.items())
tbl.insert(groupobj)
return tbl | python | def groupby(self, keyexpr, **outexprs):
"""simple prototype of group by, with support for expressions in the group-by clause
and outputs
@param keyexpr: grouping field and optional expression for computing the key value;
if a string is passed
@type keyexpr: string or tuple
@param outexprs: named arguments describing one or more summary values to
compute per key
@type outexprs: callable, taking a sequence of objects as input and returning
a single summary value
"""
if isinstance(keyexpr, basestring):
keyattrs = keyexpr.split()
keyfn = lambda o: tuple(getattr(o, k) for k in keyattrs)
elif isinstance(keyexpr, tuple):
keyattrs = (keyexpr[0],)
keyfn = keyexpr[1]
else:
raise TypeError("keyexpr must be string or tuple")
groupedobs = defaultdict(list)
do_all(groupedobs[keyfn(ob)].append(ob) for ob in self.obs)
tbl = Table()
do_all(tbl.create_index(k, unique=(len(keyattrs) == 1)) for k in keyattrs)
for key, recs in sorted(groupedobs.items()):
groupobj = DataObject(**dict(zip(keyattrs, key)))
do_all(setattr(groupobj, subkey, expr(recs)) for subkey, expr in outexprs.items())
tbl.insert(groupobj)
return tbl | [
"def",
"groupby",
"(",
"self",
",",
"keyexpr",
",",
"*",
"*",
"outexprs",
")",
":",
"if",
"isinstance",
"(",
"keyexpr",
",",
"basestring",
")",
":",
"keyattrs",
"=",
"keyexpr",
".",
"split",
"(",
")",
"keyfn",
"=",
"lambda",
"o",
":",
"tuple",
"(",
"getattr",
"(",
"o",
",",
"k",
")",
"for",
"k",
"in",
"keyattrs",
")",
"elif",
"isinstance",
"(",
"keyexpr",
",",
"tuple",
")",
":",
"keyattrs",
"=",
"(",
"keyexpr",
"[",
"0",
"]",
",",
")",
"keyfn",
"=",
"keyexpr",
"[",
"1",
"]",
"else",
":",
"raise",
"TypeError",
"(",
"\"keyexpr must be string or tuple\"",
")",
"groupedobs",
"=",
"defaultdict",
"(",
"list",
")",
"do_all",
"(",
"groupedobs",
"[",
"keyfn",
"(",
"ob",
")",
"]",
".",
"append",
"(",
"ob",
")",
"for",
"ob",
"in",
"self",
".",
"obs",
")",
"tbl",
"=",
"Table",
"(",
")",
"do_all",
"(",
"tbl",
".",
"create_index",
"(",
"k",
",",
"unique",
"=",
"(",
"len",
"(",
"keyattrs",
")",
"==",
"1",
")",
")",
"for",
"k",
"in",
"keyattrs",
")",
"for",
"key",
",",
"recs",
"in",
"sorted",
"(",
"groupedobs",
".",
"items",
"(",
")",
")",
":",
"groupobj",
"=",
"DataObject",
"(",
"*",
"*",
"dict",
"(",
"zip",
"(",
"keyattrs",
",",
"key",
")",
")",
")",
"do_all",
"(",
"setattr",
"(",
"groupobj",
",",
"subkey",
",",
"expr",
"(",
"recs",
")",
")",
"for",
"subkey",
",",
"expr",
"in",
"outexprs",
".",
"items",
"(",
")",
")",
"tbl",
".",
"insert",
"(",
"groupobj",
")",
"return",
"tbl"
] | simple prototype of group by, with support for expressions in the group-by clause
and outputs
@param keyexpr: grouping field and optional expression for computing the key value;
if a string is passed
@type keyexpr: string or tuple
@param outexprs: named arguments describing one or more summary values to
compute per key
@type outexprs: callable, taking a sequence of objects as input and returning
a single summary value | [
"simple",
"prototype",
"of",
"group",
"by",
"with",
"support",
"for",
"expressions",
"in",
"the",
"group",
"-",
"by",
"clause",
"and",
"outputs"
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L1316-L1347 | train |
ptmcg/littletable | littletable.py | Table.unique | def unique(self, key=None):
"""
Create a new table of objects,containing no duplicate values.
@param key: (default=None) optional callable for computing a representative unique key for each
object in the table. If None, then a key will be composed as a tuple of all the values in the object.
@type key: callable, takes the record as an argument, and returns the key value or tuple to be used
to represent uniqueness.
"""
if isinstance(key, basestring):
key = lambda r, attr=key: getattr(r, attr, None)
ret = self.copy_template()
seen = set()
for ob in self:
if key is None:
try:
ob_dict = vars(ob)
except TypeError:
ob_dict = dict((k, getattr(ob, k)) for k in _object_attrnames(ob))
reckey = tuple(sorted(ob_dict.items()))
else:
reckey = key(ob)
if reckey not in seen:
seen.add(reckey)
ret.insert(ob)
return ret | python | def unique(self, key=None):
"""
Create a new table of objects,containing no duplicate values.
@param key: (default=None) optional callable for computing a representative unique key for each
object in the table. If None, then a key will be composed as a tuple of all the values in the object.
@type key: callable, takes the record as an argument, and returns the key value or tuple to be used
to represent uniqueness.
"""
if isinstance(key, basestring):
key = lambda r, attr=key: getattr(r, attr, None)
ret = self.copy_template()
seen = set()
for ob in self:
if key is None:
try:
ob_dict = vars(ob)
except TypeError:
ob_dict = dict((k, getattr(ob, k)) for k in _object_attrnames(ob))
reckey = tuple(sorted(ob_dict.items()))
else:
reckey = key(ob)
if reckey not in seen:
seen.add(reckey)
ret.insert(ob)
return ret | [
"def",
"unique",
"(",
"self",
",",
"key",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"basestring",
")",
":",
"key",
"=",
"lambda",
"r",
",",
"attr",
"=",
"key",
":",
"getattr",
"(",
"r",
",",
"attr",
",",
"None",
")",
"ret",
"=",
"self",
".",
"copy_template",
"(",
")",
"seen",
"=",
"set",
"(",
")",
"for",
"ob",
"in",
"self",
":",
"if",
"key",
"is",
"None",
":",
"try",
":",
"ob_dict",
"=",
"vars",
"(",
"ob",
")",
"except",
"TypeError",
":",
"ob_dict",
"=",
"dict",
"(",
"(",
"k",
",",
"getattr",
"(",
"ob",
",",
"k",
")",
")",
"for",
"k",
"in",
"_object_attrnames",
"(",
"ob",
")",
")",
"reckey",
"=",
"tuple",
"(",
"sorted",
"(",
"ob_dict",
".",
"items",
"(",
")",
")",
")",
"else",
":",
"reckey",
"=",
"key",
"(",
"ob",
")",
"if",
"reckey",
"not",
"in",
"seen",
":",
"seen",
".",
"add",
"(",
"reckey",
")",
"ret",
".",
"insert",
"(",
"ob",
")",
"return",
"ret"
] | Create a new table of objects,containing no duplicate values.
@param key: (default=None) optional callable for computing a representative unique key for each
object in the table. If None, then a key will be composed as a tuple of all the values in the object.
@type key: callable, takes the record as an argument, and returns the key value or tuple to be used
to represent uniqueness. | [
"Create",
"a",
"new",
"table",
"of",
"objects",
"containing",
"no",
"duplicate",
"values",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L1349-L1374 | train |
ptmcg/littletable | littletable.py | Table.as_html | def as_html(self, fields='*'):
"""
Output the table as a rudimentary HTML table.
@param fields: fields in the table to be shown in the table
- listing '*' as a field will add all unnamed fields
- starting a field name with '-' will suppress that name
@type fields: list of strings or a single space-delimited string
@return: string of generated HTML representing the selected table row attributes
"""
fields = self._parse_fields_string(fields)
def td_value(v):
return '<td><div align="{}">{}</div></td>'.format(('left','right')[isinstance(v, (int, float))], str(v))
def row_to_tr(r):
return "<tr>" + "".join(td_value(getattr(r, fld)) for fld in fields) + "</tr>\n"
ret = ""
ret += "<table>\n"
ret += "<tr>" + "".join(map('<th><div align="center">{}</div></th>'.format, fields)) + "</tr>\n"
ret += "".join(map(row_to_tr, self))
ret += "</table>"
return ret | python | def as_html(self, fields='*'):
"""
Output the table as a rudimentary HTML table.
@param fields: fields in the table to be shown in the table
- listing '*' as a field will add all unnamed fields
- starting a field name with '-' will suppress that name
@type fields: list of strings or a single space-delimited string
@return: string of generated HTML representing the selected table row attributes
"""
fields = self._parse_fields_string(fields)
def td_value(v):
return '<td><div align="{}">{}</div></td>'.format(('left','right')[isinstance(v, (int, float))], str(v))
def row_to_tr(r):
return "<tr>" + "".join(td_value(getattr(r, fld)) for fld in fields) + "</tr>\n"
ret = ""
ret += "<table>\n"
ret += "<tr>" + "".join(map('<th><div align="center">{}</div></th>'.format, fields)) + "</tr>\n"
ret += "".join(map(row_to_tr, self))
ret += "</table>"
return ret | [
"def",
"as_html",
"(",
"self",
",",
"fields",
"=",
"'*'",
")",
":",
"fields",
"=",
"self",
".",
"_parse_fields_string",
"(",
"fields",
")",
"def",
"td_value",
"(",
"v",
")",
":",
"return",
"'<td><div align=\"{}\">{}</div></td>'",
".",
"format",
"(",
"(",
"'left'",
",",
"'right'",
")",
"[",
"isinstance",
"(",
"v",
",",
"(",
"int",
",",
"float",
")",
")",
"]",
",",
"str",
"(",
"v",
")",
")",
"def",
"row_to_tr",
"(",
"r",
")",
":",
"return",
"\"<tr>\"",
"+",
"\"\"",
".",
"join",
"(",
"td_value",
"(",
"getattr",
"(",
"r",
",",
"fld",
")",
")",
"for",
"fld",
"in",
"fields",
")",
"+",
"\"</tr>\\n\"",
"ret",
"=",
"\"\"",
"ret",
"+=",
"\"<table>\\n\"",
"ret",
"+=",
"\"<tr>\"",
"+",
"\"\"",
".",
"join",
"(",
"map",
"(",
"'<th><div align=\"center\">{}</div></th>'",
".",
"format",
",",
"fields",
")",
")",
"+",
"\"</tr>\\n\"",
"ret",
"+=",
"\"\"",
".",
"join",
"(",
"map",
"(",
"row_to_tr",
",",
"self",
")",
")",
"ret",
"+=",
"\"</table>\"",
"return",
"ret"
] | Output the table as a rudimentary HTML table.
@param fields: fields in the table to be shown in the table
- listing '*' as a field will add all unnamed fields
- starting a field name with '-' will suppress that name
@type fields: list of strings or a single space-delimited string
@return: string of generated HTML representing the selected table row attributes | [
"Output",
"the",
"table",
"as",
"a",
"rudimentary",
"HTML",
"table",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L1450-L1469 | train |
ptmcg/littletable | littletable.py | PivotTable.dump | def dump(self, out=sys.stdout, row_fn=repr, limit=-1, indent=0):
"""Dump out the contents of this table in a nested listing.
@param out: output stream to write to
@param row_fn: function to call to display individual rows
@param limit: number of records to show at deepest level of pivot (-1=show all)
@param indent: current nesting level
"""
NL = '\n'
if indent:
out.write(" "*indent + self.pivot_key_str())
else:
out.write("Pivot: %s" % ','.join(self._pivot_attrs))
out.write(NL)
if self.has_subtables():
do_all(sub.dump(out, row_fn, limit, indent+1) for sub in self.subtables if sub)
else:
if limit >= 0:
showslice = slice(0, limit)
else:
showslice = slice(None, None)
do_all(out.write(" "*(indent+1) + row_fn(r) + NL) for r in self.obs[showslice])
out.flush() | python | def dump(self, out=sys.stdout, row_fn=repr, limit=-1, indent=0):
"""Dump out the contents of this table in a nested listing.
@param out: output stream to write to
@param row_fn: function to call to display individual rows
@param limit: number of records to show at deepest level of pivot (-1=show all)
@param indent: current nesting level
"""
NL = '\n'
if indent:
out.write(" "*indent + self.pivot_key_str())
else:
out.write("Pivot: %s" % ','.join(self._pivot_attrs))
out.write(NL)
if self.has_subtables():
do_all(sub.dump(out, row_fn, limit, indent+1) for sub in self.subtables if sub)
else:
if limit >= 0:
showslice = slice(0, limit)
else:
showslice = slice(None, None)
do_all(out.write(" "*(indent+1) + row_fn(r) + NL) for r in self.obs[showslice])
out.flush() | [
"def",
"dump",
"(",
"self",
",",
"out",
"=",
"sys",
".",
"stdout",
",",
"row_fn",
"=",
"repr",
",",
"limit",
"=",
"-",
"1",
",",
"indent",
"=",
"0",
")",
":",
"NL",
"=",
"'\\n'",
"if",
"indent",
":",
"out",
".",
"write",
"(",
"\" \"",
"*",
"indent",
"+",
"self",
".",
"pivot_key_str",
"(",
")",
")",
"else",
":",
"out",
".",
"write",
"(",
"\"Pivot: %s\"",
"%",
"','",
".",
"join",
"(",
"self",
".",
"_pivot_attrs",
")",
")",
"out",
".",
"write",
"(",
"NL",
")",
"if",
"self",
".",
"has_subtables",
"(",
")",
":",
"do_all",
"(",
"sub",
".",
"dump",
"(",
"out",
",",
"row_fn",
",",
"limit",
",",
"indent",
"+",
"1",
")",
"for",
"sub",
"in",
"self",
".",
"subtables",
"if",
"sub",
")",
"else",
":",
"if",
"limit",
">=",
"0",
":",
"showslice",
"=",
"slice",
"(",
"0",
",",
"limit",
")",
"else",
":",
"showslice",
"=",
"slice",
"(",
"None",
",",
"None",
")",
"do_all",
"(",
"out",
".",
"write",
"(",
"\" \"",
"*",
"(",
"indent",
"+",
"1",
")",
"+",
"row_fn",
"(",
"r",
")",
"+",
"NL",
")",
"for",
"r",
"in",
"self",
".",
"obs",
"[",
"showslice",
"]",
")",
"out",
".",
"flush",
"(",
")"
] | Dump out the contents of this table in a nested listing.
@param out: output stream to write to
@param row_fn: function to call to display individual rows
@param limit: number of records to show at deepest level of pivot (-1=show all)
@param indent: current nesting level | [
"Dump",
"out",
"the",
"contents",
"of",
"this",
"table",
"in",
"a",
"nested",
"listing",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L1535-L1556 | train |
ptmcg/littletable | littletable.py | PivotTable.dump_counts | def dump_counts(self, out=sys.stdout, count_fn=len, colwidth=10):
"""Dump out the summary counts of entries in this pivot table as a tabular listing.
@param out: output stream to write to
@param count_fn: (default=len) function for computing value for each pivot cell
@param colwidth: (default=10)
"""
if len(self._pivot_attrs) == 1:
out.write("Pivot: %s\n" % ','.join(self._pivot_attrs))
maxkeylen = max(len(str(k)) for k in self.keys())
maxvallen = colwidth
keytally = {}
for k, sub in self.items():
sub_v = count_fn(sub)
maxvallen = max(maxvallen, len(str(sub_v)))
keytally[k] = sub_v
for k, sub in self.items():
out.write("%-*.*s " % (maxkeylen, maxkeylen, k))
out.write("%*s\n" % (maxvallen, keytally[k]))
elif len(self._pivot_attrs) == 2:
out.write("Pivot: %s\n" % ','.join(self._pivot_attrs))
maxkeylen = max(max(len(str(k)) for k in self.keys()), 5)
maxvallen = max(max(len(str(k)) for k in self.subtables[0].keys()), colwidth)
keytally = dict((k, 0) for k in self.subtables[0].keys())
out.write("%*s " % (maxkeylen, ''))
out.write(' '.join("%*.*s" % (maxvallen, maxvallen, k) for k in self.subtables[0].keys()))
out.write(' %*s\n' % (maxvallen, 'Total'))
for k, sub in self.items():
out.write("%-*.*s " % (maxkeylen, maxkeylen, k))
for kk, ssub in sub.items():
ssub_v = count_fn(ssub)
out.write("%*d " % (maxvallen, ssub_v))
keytally[kk] += ssub_v
maxvallen = max(maxvallen, len(str(ssub_v)))
sub_v = count_fn(sub)
maxvallen = max(maxvallen, len(str(sub_v)))
out.write("%*d\n" % (maxvallen, sub_v))
out.write('%-*.*s ' % (maxkeylen, maxkeylen, "Total"))
out.write(' '.join("%*d" % (maxvallen, tally) for k, tally in sorted(keytally.items())))
out.write(" %*d\n" % (maxvallen, sum(tally for k, tally in keytally.items())))
else:
raise ValueError("can only dump summary counts for 1 or 2-attribute pivots") | python | def dump_counts(self, out=sys.stdout, count_fn=len, colwidth=10):
"""Dump out the summary counts of entries in this pivot table as a tabular listing.
@param out: output stream to write to
@param count_fn: (default=len) function for computing value for each pivot cell
@param colwidth: (default=10)
"""
if len(self._pivot_attrs) == 1:
out.write("Pivot: %s\n" % ','.join(self._pivot_attrs))
maxkeylen = max(len(str(k)) for k in self.keys())
maxvallen = colwidth
keytally = {}
for k, sub in self.items():
sub_v = count_fn(sub)
maxvallen = max(maxvallen, len(str(sub_v)))
keytally[k] = sub_v
for k, sub in self.items():
out.write("%-*.*s " % (maxkeylen, maxkeylen, k))
out.write("%*s\n" % (maxvallen, keytally[k]))
elif len(self._pivot_attrs) == 2:
out.write("Pivot: %s\n" % ','.join(self._pivot_attrs))
maxkeylen = max(max(len(str(k)) for k in self.keys()), 5)
maxvallen = max(max(len(str(k)) for k in self.subtables[0].keys()), colwidth)
keytally = dict((k, 0) for k in self.subtables[0].keys())
out.write("%*s " % (maxkeylen, ''))
out.write(' '.join("%*.*s" % (maxvallen, maxvallen, k) for k in self.subtables[0].keys()))
out.write(' %*s\n' % (maxvallen, 'Total'))
for k, sub in self.items():
out.write("%-*.*s " % (maxkeylen, maxkeylen, k))
for kk, ssub in sub.items():
ssub_v = count_fn(ssub)
out.write("%*d " % (maxvallen, ssub_v))
keytally[kk] += ssub_v
maxvallen = max(maxvallen, len(str(ssub_v)))
sub_v = count_fn(sub)
maxvallen = max(maxvallen, len(str(sub_v)))
out.write("%*d\n" % (maxvallen, sub_v))
out.write('%-*.*s ' % (maxkeylen, maxkeylen, "Total"))
out.write(' '.join("%*d" % (maxvallen, tally) for k, tally in sorted(keytally.items())))
out.write(" %*d\n" % (maxvallen, sum(tally for k, tally in keytally.items())))
else:
raise ValueError("can only dump summary counts for 1 or 2-attribute pivots") | [
"def",
"dump_counts",
"(",
"self",
",",
"out",
"=",
"sys",
".",
"stdout",
",",
"count_fn",
"=",
"len",
",",
"colwidth",
"=",
"10",
")",
":",
"if",
"len",
"(",
"self",
".",
"_pivot_attrs",
")",
"==",
"1",
":",
"out",
".",
"write",
"(",
"\"Pivot: %s\\n\"",
"%",
"','",
".",
"join",
"(",
"self",
".",
"_pivot_attrs",
")",
")",
"maxkeylen",
"=",
"max",
"(",
"len",
"(",
"str",
"(",
"k",
")",
")",
"for",
"k",
"in",
"self",
".",
"keys",
"(",
")",
")",
"maxvallen",
"=",
"colwidth",
"keytally",
"=",
"{",
"}",
"for",
"k",
",",
"sub",
"in",
"self",
".",
"items",
"(",
")",
":",
"sub_v",
"=",
"count_fn",
"(",
"sub",
")",
"maxvallen",
"=",
"max",
"(",
"maxvallen",
",",
"len",
"(",
"str",
"(",
"sub_v",
")",
")",
")",
"keytally",
"[",
"k",
"]",
"=",
"sub_v",
"for",
"k",
",",
"sub",
"in",
"self",
".",
"items",
"(",
")",
":",
"out",
".",
"write",
"(",
"\"%-*.*s \"",
"%",
"(",
"maxkeylen",
",",
"maxkeylen",
",",
"k",
")",
")",
"out",
".",
"write",
"(",
"\"%*s\\n\"",
"%",
"(",
"maxvallen",
",",
"keytally",
"[",
"k",
"]",
")",
")",
"elif",
"len",
"(",
"self",
".",
"_pivot_attrs",
")",
"==",
"2",
":",
"out",
".",
"write",
"(",
"\"Pivot: %s\\n\"",
"%",
"','",
".",
"join",
"(",
"self",
".",
"_pivot_attrs",
")",
")",
"maxkeylen",
"=",
"max",
"(",
"max",
"(",
"len",
"(",
"str",
"(",
"k",
")",
")",
"for",
"k",
"in",
"self",
".",
"keys",
"(",
")",
")",
",",
"5",
")",
"maxvallen",
"=",
"max",
"(",
"max",
"(",
"len",
"(",
"str",
"(",
"k",
")",
")",
"for",
"k",
"in",
"self",
".",
"subtables",
"[",
"0",
"]",
".",
"keys",
"(",
")",
")",
",",
"colwidth",
")",
"keytally",
"=",
"dict",
"(",
"(",
"k",
",",
"0",
")",
"for",
"k",
"in",
"self",
".",
"subtables",
"[",
"0",
"]",
".",
"keys",
"(",
")",
")",
"out",
".",
"write",
"(",
"\"%*s \"",
"%",
"(",
"maxkeylen",
",",
"''",
")",
")",
"out",
".",
"write",
"(",
"' '",
".",
"join",
"(",
"\"%*.*s\"",
"%",
"(",
"maxvallen",
",",
"maxvallen",
",",
"k",
")",
"for",
"k",
"in",
"self",
".",
"subtables",
"[",
"0",
"]",
".",
"keys",
"(",
")",
")",
")",
"out",
".",
"write",
"(",
"' %*s\\n'",
"%",
"(",
"maxvallen",
",",
"'Total'",
")",
")",
"for",
"k",
",",
"sub",
"in",
"self",
".",
"items",
"(",
")",
":",
"out",
".",
"write",
"(",
"\"%-*.*s \"",
"%",
"(",
"maxkeylen",
",",
"maxkeylen",
",",
"k",
")",
")",
"for",
"kk",
",",
"ssub",
"in",
"sub",
".",
"items",
"(",
")",
":",
"ssub_v",
"=",
"count_fn",
"(",
"ssub",
")",
"out",
".",
"write",
"(",
"\"%*d \"",
"%",
"(",
"maxvallen",
",",
"ssub_v",
")",
")",
"keytally",
"[",
"kk",
"]",
"+=",
"ssub_v",
"maxvallen",
"=",
"max",
"(",
"maxvallen",
",",
"len",
"(",
"str",
"(",
"ssub_v",
")",
")",
")",
"sub_v",
"=",
"count_fn",
"(",
"sub",
")",
"maxvallen",
"=",
"max",
"(",
"maxvallen",
",",
"len",
"(",
"str",
"(",
"sub_v",
")",
")",
")",
"out",
".",
"write",
"(",
"\"%*d\\n\"",
"%",
"(",
"maxvallen",
",",
"sub_v",
")",
")",
"out",
".",
"write",
"(",
"'%-*.*s '",
"%",
"(",
"maxkeylen",
",",
"maxkeylen",
",",
"\"Total\"",
")",
")",
"out",
".",
"write",
"(",
"' '",
".",
"join",
"(",
"\"%*d\"",
"%",
"(",
"maxvallen",
",",
"tally",
")",
"for",
"k",
",",
"tally",
"in",
"sorted",
"(",
"keytally",
".",
"items",
"(",
")",
")",
")",
")",
"out",
".",
"write",
"(",
"\" %*d\\n\"",
"%",
"(",
"maxvallen",
",",
"sum",
"(",
"tally",
"for",
"k",
",",
"tally",
"in",
"keytally",
".",
"items",
"(",
")",
")",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"can only dump summary counts for 1 or 2-attribute pivots\"",
")"
] | Dump out the summary counts of entries in this pivot table as a tabular listing.
@param out: output stream to write to
@param count_fn: (default=len) function for computing value for each pivot cell
@param colwidth: (default=10) | [
"Dump",
"out",
"the",
"summary",
"counts",
"of",
"entries",
"in",
"this",
"pivot",
"table",
"as",
"a",
"tabular",
"listing",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L1558-L1598 | train |
ptmcg/littletable | littletable.py | PivotTable.as_table | def as_table(self, fn=None, col=None, col_label=None):
"""Dump out the summary counts of this pivot table as a Table.
"""
if col_label is None:
col_label = col
if fn is None:
fn = len
if col_label is None:
col_label = 'count'
ret = Table()
# topattr = self._pivot_attrs[0]
do_all(ret.create_index(attr) for attr in self._pivot_attrs)
if len(self._pivot_attrs) == 1:
for sub in self.subtables:
subattr, subval = sub._attr_path[-1]
attrdict = {subattr: subval}
if col is None or fn is len:
attrdict[col_label] = fn(sub)
else:
attrdict[col_label] = fn(s[col] for s in sub)
ret.insert(DataObject(**attrdict))
elif len(self._pivot_attrs) == 2:
for sub in self.subtables:
for ssub in sub.subtables:
attrdict = dict(ssub._attr_path)
if col is None or fn is len:
attrdict[col_label] = fn(ssub)
else:
attrdict[col_label] = fn(s[col] for s in ssub)
ret.insert(DataObject(**attrdict))
elif len(self._pivot_attrs) == 3:
for sub in self.subtables:
for ssub in sub.subtables:
for sssub in ssub.subtables:
attrdict = dict(sssub._attr_path)
if col is None or fn is len:
attrdict[col_label] = fn(sssub)
else:
attrdict[col_label] = fn(s[col] for s in sssub)
ret.insert(DataObject(**attrdict))
else:
raise ValueError("can only dump summary counts for 1 or 2-attribute pivots")
return ret | python | def as_table(self, fn=None, col=None, col_label=None):
"""Dump out the summary counts of this pivot table as a Table.
"""
if col_label is None:
col_label = col
if fn is None:
fn = len
if col_label is None:
col_label = 'count'
ret = Table()
# topattr = self._pivot_attrs[0]
do_all(ret.create_index(attr) for attr in self._pivot_attrs)
if len(self._pivot_attrs) == 1:
for sub in self.subtables:
subattr, subval = sub._attr_path[-1]
attrdict = {subattr: subval}
if col is None or fn is len:
attrdict[col_label] = fn(sub)
else:
attrdict[col_label] = fn(s[col] for s in sub)
ret.insert(DataObject(**attrdict))
elif len(self._pivot_attrs) == 2:
for sub in self.subtables:
for ssub in sub.subtables:
attrdict = dict(ssub._attr_path)
if col is None or fn is len:
attrdict[col_label] = fn(ssub)
else:
attrdict[col_label] = fn(s[col] for s in ssub)
ret.insert(DataObject(**attrdict))
elif len(self._pivot_attrs) == 3:
for sub in self.subtables:
for ssub in sub.subtables:
for sssub in ssub.subtables:
attrdict = dict(sssub._attr_path)
if col is None or fn is len:
attrdict[col_label] = fn(sssub)
else:
attrdict[col_label] = fn(s[col] for s in sssub)
ret.insert(DataObject(**attrdict))
else:
raise ValueError("can only dump summary counts for 1 or 2-attribute pivots")
return ret | [
"def",
"as_table",
"(",
"self",
",",
"fn",
"=",
"None",
",",
"col",
"=",
"None",
",",
"col_label",
"=",
"None",
")",
":",
"if",
"col_label",
"is",
"None",
":",
"col_label",
"=",
"col",
"if",
"fn",
"is",
"None",
":",
"fn",
"=",
"len",
"if",
"col_label",
"is",
"None",
":",
"col_label",
"=",
"'count'",
"ret",
"=",
"Table",
"(",
")",
"# topattr = self._pivot_attrs[0]",
"do_all",
"(",
"ret",
".",
"create_index",
"(",
"attr",
")",
"for",
"attr",
"in",
"self",
".",
"_pivot_attrs",
")",
"if",
"len",
"(",
"self",
".",
"_pivot_attrs",
")",
"==",
"1",
":",
"for",
"sub",
"in",
"self",
".",
"subtables",
":",
"subattr",
",",
"subval",
"=",
"sub",
".",
"_attr_path",
"[",
"-",
"1",
"]",
"attrdict",
"=",
"{",
"subattr",
":",
"subval",
"}",
"if",
"col",
"is",
"None",
"or",
"fn",
"is",
"len",
":",
"attrdict",
"[",
"col_label",
"]",
"=",
"fn",
"(",
"sub",
")",
"else",
":",
"attrdict",
"[",
"col_label",
"]",
"=",
"fn",
"(",
"s",
"[",
"col",
"]",
"for",
"s",
"in",
"sub",
")",
"ret",
".",
"insert",
"(",
"DataObject",
"(",
"*",
"*",
"attrdict",
")",
")",
"elif",
"len",
"(",
"self",
".",
"_pivot_attrs",
")",
"==",
"2",
":",
"for",
"sub",
"in",
"self",
".",
"subtables",
":",
"for",
"ssub",
"in",
"sub",
".",
"subtables",
":",
"attrdict",
"=",
"dict",
"(",
"ssub",
".",
"_attr_path",
")",
"if",
"col",
"is",
"None",
"or",
"fn",
"is",
"len",
":",
"attrdict",
"[",
"col_label",
"]",
"=",
"fn",
"(",
"ssub",
")",
"else",
":",
"attrdict",
"[",
"col_label",
"]",
"=",
"fn",
"(",
"s",
"[",
"col",
"]",
"for",
"s",
"in",
"ssub",
")",
"ret",
".",
"insert",
"(",
"DataObject",
"(",
"*",
"*",
"attrdict",
")",
")",
"elif",
"len",
"(",
"self",
".",
"_pivot_attrs",
")",
"==",
"3",
":",
"for",
"sub",
"in",
"self",
".",
"subtables",
":",
"for",
"ssub",
"in",
"sub",
".",
"subtables",
":",
"for",
"sssub",
"in",
"ssub",
".",
"subtables",
":",
"attrdict",
"=",
"dict",
"(",
"sssub",
".",
"_attr_path",
")",
"if",
"col",
"is",
"None",
"or",
"fn",
"is",
"len",
":",
"attrdict",
"[",
"col_label",
"]",
"=",
"fn",
"(",
"sssub",
")",
"else",
":",
"attrdict",
"[",
"col_label",
"]",
"=",
"fn",
"(",
"s",
"[",
"col",
"]",
"for",
"s",
"in",
"sssub",
")",
"ret",
".",
"insert",
"(",
"DataObject",
"(",
"*",
"*",
"attrdict",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"can only dump summary counts for 1 or 2-attribute pivots\"",
")",
"return",
"ret"
] | Dump out the summary counts of this pivot table as a Table. | [
"Dump",
"out",
"the",
"summary",
"counts",
"of",
"this",
"pivot",
"table",
"as",
"a",
"Table",
"."
] | 8352f7716e458e55a6997372dadf92e179d19f98 | https://github.com/ptmcg/littletable/blob/8352f7716e458e55a6997372dadf92e179d19f98/littletable.py#L1600-L1642 | train |
davidcarboni/Flask-Sleuth | sleuth/__init__.py | _update_record | def _update_record(record):
"""Collates values needed by LOG_FORMAT
This adds additional information to the log record to implement the logging standard.
:return: A log record augmented with the values required by LOG_FORMAT:
* springtime: LogRecord.asctime, but with a `.` instead of a `,` as the millisecond separator
* levelname_spring: specifically, "WARN" instead of "WARNING"
* process_id
* thread_name
* logger_name
* tracing_information: if B3 values have not been collected this will be an empty string
"""
# Standard fields
dt = datetime.fromtimestamp(record.created)
# Truncate microseconds to milliseconds
record.springtime = str(dt)[:-3]
record.levelname_spring = "WARN" if record.levelname == "WARNING" else record.levelname
record.process_id = str(os.getpid())
record.thread_name = (current_thread().getName())[:15]
record.logger_name = record.name[:40]
record.tracing_information = ""
# Optional distributed tracing information
tracing_information = _tracing_information()
if tracing_information:
record.tracing_information = "[" + ",".join(tracing_information) + "] " | python | def _update_record(record):
"""Collates values needed by LOG_FORMAT
This adds additional information to the log record to implement the logging standard.
:return: A log record augmented with the values required by LOG_FORMAT:
* springtime: LogRecord.asctime, but with a `.` instead of a `,` as the millisecond separator
* levelname_spring: specifically, "WARN" instead of "WARNING"
* process_id
* thread_name
* logger_name
* tracing_information: if B3 values have not been collected this will be an empty string
"""
# Standard fields
dt = datetime.fromtimestamp(record.created)
# Truncate microseconds to milliseconds
record.springtime = str(dt)[:-3]
record.levelname_spring = "WARN" if record.levelname == "WARNING" else record.levelname
record.process_id = str(os.getpid())
record.thread_name = (current_thread().getName())[:15]
record.logger_name = record.name[:40]
record.tracing_information = ""
# Optional distributed tracing information
tracing_information = _tracing_information()
if tracing_information:
record.tracing_information = "[" + ",".join(tracing_information) + "] " | [
"def",
"_update_record",
"(",
"record",
")",
":",
"# Standard fields",
"dt",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"record",
".",
"created",
")",
"# Truncate microseconds to milliseconds",
"record",
".",
"springtime",
"=",
"str",
"(",
"dt",
")",
"[",
":",
"-",
"3",
"]",
"record",
".",
"levelname_spring",
"=",
"\"WARN\"",
"if",
"record",
".",
"levelname",
"==",
"\"WARNING\"",
"else",
"record",
".",
"levelname",
"record",
".",
"process_id",
"=",
"str",
"(",
"os",
".",
"getpid",
"(",
")",
")",
"record",
".",
"thread_name",
"=",
"(",
"current_thread",
"(",
")",
".",
"getName",
"(",
")",
")",
"[",
":",
"15",
"]",
"record",
".",
"logger_name",
"=",
"record",
".",
"name",
"[",
":",
"40",
"]",
"record",
".",
"tracing_information",
"=",
"\"\"",
"# Optional distributed tracing information",
"tracing_information",
"=",
"_tracing_information",
"(",
")",
"if",
"tracing_information",
":",
"record",
".",
"tracing_information",
"=",
"\"[\"",
"+",
"\",\"",
".",
"join",
"(",
"tracing_information",
")",
"+",
"\"] \""
] | Collates values needed by LOG_FORMAT
This adds additional information to the log record to implement the logging standard.
:return: A log record augmented with the values required by LOG_FORMAT:
* springtime: LogRecord.asctime, but with a `.` instead of a `,` as the millisecond separator
* levelname_spring: specifically, "WARN" instead of "WARNING"
* process_id
* thread_name
* logger_name
* tracing_information: if B3 values have not been collected this will be an empty string | [
"Collates",
"values",
"needed",
"by",
"LOG_FORMAT"
] | 2191aa2a929ec43c0176ec51c7abef924b12d015 | https://github.com/davidcarboni/Flask-Sleuth/blob/2191aa2a929ec43c0176ec51c7abef924b12d015/sleuth/__init__.py#L41-L68 | train |
davidcarboni/Flask-Sleuth | sleuth/__init__.py | _tracing_information | def _tracing_information():
"""Gets B3 distributed tracing information, if available.
This is returned as a list, ready to be formatted into Spring Cloud Sleuth compatible format.
"""
# We'll collate trace information if the B3 headers have been collected:
values = b3.values()
if values[b3.b3_trace_id]:
# Trace information would normally be sent to Zipkin if either of sampled or debug ("flags") is set to 1
# However we're not currently using Zipkin, so it's always false
# exported = "true" if values[b3.b3_sampled] == '1' or values[b3.b3_flags] == '1' else "false"
return [
current_app.name if current_app.name else " - ",
values[b3.b3_trace_id],
values[b3.b3_span_id],
"false",
] | python | def _tracing_information():
"""Gets B3 distributed tracing information, if available.
This is returned as a list, ready to be formatted into Spring Cloud Sleuth compatible format.
"""
# We'll collate trace information if the B3 headers have been collected:
values = b3.values()
if values[b3.b3_trace_id]:
# Trace information would normally be sent to Zipkin if either of sampled or debug ("flags") is set to 1
# However we're not currently using Zipkin, so it's always false
# exported = "true" if values[b3.b3_sampled] == '1' or values[b3.b3_flags] == '1' else "false"
return [
current_app.name if current_app.name else " - ",
values[b3.b3_trace_id],
values[b3.b3_span_id],
"false",
] | [
"def",
"_tracing_information",
"(",
")",
":",
"# We'll collate trace information if the B3 headers have been collected:",
"values",
"=",
"b3",
".",
"values",
"(",
")",
"if",
"values",
"[",
"b3",
".",
"b3_trace_id",
"]",
":",
"# Trace information would normally be sent to Zipkin if either of sampled or debug (\"flags\") is set to 1",
"# However we're not currently using Zipkin, so it's always false",
"# exported = \"true\" if values[b3.b3_sampled] == '1' or values[b3.b3_flags] == '1' else \"false\"",
"return",
"[",
"current_app",
".",
"name",
"if",
"current_app",
".",
"name",
"else",
"\" - \"",
",",
"values",
"[",
"b3",
".",
"b3_trace_id",
"]",
",",
"values",
"[",
"b3",
".",
"b3_span_id",
"]",
",",
"\"false\"",
",",
"]"
] | Gets B3 distributed tracing information, if available.
This is returned as a list, ready to be formatted into Spring Cloud Sleuth compatible format. | [
"Gets",
"B3",
"distributed",
"tracing",
"information",
"if",
"available",
".",
"This",
"is",
"returned",
"as",
"a",
"list",
"ready",
"to",
"be",
"formatted",
"into",
"Spring",
"Cloud",
"Sleuth",
"compatible",
"format",
"."
] | 2191aa2a929ec43c0176ec51c7abef924b12d015 | https://github.com/davidcarboni/Flask-Sleuth/blob/2191aa2a929ec43c0176ec51c7abef924b12d015/sleuth/__init__.py#L71-L88 | train |
costastf/toonlib | toonlib/toonlib.py | Toon._authenticate | def _authenticate(self):
"""Authenticates to the api and sets up client information."""
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data) | python | def _authenticate(self):
"""Authenticates to the api and sets up client information."""
data = {'username': self.username,
'password': self.password}
url = '{base}/client/login'.format(base=self.base_url)
response = self._session.get(url, params=data)
print(response.text)
data = response.json()
if not data.get('success'):
raise InvalidCredentials(data.get('reason', None))
self._populate_info(data) | [
"def",
"_authenticate",
"(",
"self",
")",
":",
"data",
"=",
"{",
"'username'",
":",
"self",
".",
"username",
",",
"'password'",
":",
"self",
".",
"password",
"}",
"url",
"=",
"'{base}/client/login'",
".",
"format",
"(",
"base",
"=",
"self",
".",
"base_url",
")",
"response",
"=",
"self",
".",
"_session",
".",
"get",
"(",
"url",
",",
"params",
"=",
"data",
")",
"print",
"(",
"response",
".",
"text",
")",
"data",
"=",
"response",
".",
"json",
"(",
")",
"if",
"not",
"data",
".",
"get",
"(",
"'success'",
")",
":",
"raise",
"InvalidCredentials",
"(",
"data",
".",
"get",
"(",
"'reason'",
",",
"None",
")",
")",
"self",
".",
"_populate_info",
"(",
"data",
")"
] | Authenticates to the api and sets up client information. | [
"Authenticates",
"to",
"the",
"api",
"and",
"sets",
"up",
"client",
"information",
"."
] | 2fa95430240d1a1c2a85a8827aecfcb1ca41c18c | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L75-L85 | train |
costastf/toonlib | toonlib/toonlib.py | Toon._logout | def _logout(self, reset=True):
"""Log out of the API."""
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False | python | def _logout(self, reset=True):
"""Log out of the API."""
url = '{base}/client/auth/logout'.format(base=self.base_url)
response = self._session.get(url, params=self._parameters)
if response.ok:
if reset:
self._reset()
return True
else:
return False | [
"def",
"_logout",
"(",
"self",
",",
"reset",
"=",
"True",
")",
":",
"url",
"=",
"'{base}/client/auth/logout'",
".",
"format",
"(",
"base",
"=",
"self",
".",
"base_url",
")",
"response",
"=",
"self",
".",
"_session",
".",
"get",
"(",
"url",
",",
"params",
"=",
"self",
".",
"_parameters",
")",
"if",
"response",
".",
"ok",
":",
"if",
"reset",
":",
"self",
".",
"_reset",
"(",
")",
"return",
"True",
"else",
":",
"return",
"False"
] | Log out of the API. | [
"Log",
"out",
"of",
"the",
"API",
"."
] | 2fa95430240d1a1c2a85a8827aecfcb1ca41c18c | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L127-L136 | train |
costastf/toonlib | toonlib/toonlib.py | Toon._state | def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_ | python | def _state(self):
"""The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state.
"""
state = {}
required_keys = ('deviceStatusInfo',
'gasUsage',
'powerUsage',
'thermostatInfo',
'thermostatStates')
try:
for _ in range(self._state_retries):
state.update(self._get_data('/client/auth/retrieveToonState'))
except TypeError:
self._logger.exception('Could not get answer from service.')
message = ('Updating internal state with retrieved '
'state:{state}').format(state=state)
self._logger.debug(message)
self._state_.update(state)
if not all([key in self._state_.keys() for key in required_keys]):
raise IncompleteResponse(state)
return self._state_ | [
"def",
"_state",
"(",
"self",
")",
":",
"state",
"=",
"{",
"}",
"required_keys",
"=",
"(",
"'deviceStatusInfo'",
",",
"'gasUsage'",
",",
"'powerUsage'",
",",
"'thermostatInfo'",
",",
"'thermostatStates'",
")",
"try",
":",
"for",
"_",
"in",
"range",
"(",
"self",
".",
"_state_retries",
")",
":",
"state",
".",
"update",
"(",
"self",
".",
"_get_data",
"(",
"'/client/auth/retrieveToonState'",
")",
")",
"except",
"TypeError",
":",
"self",
".",
"_logger",
".",
"exception",
"(",
"'Could not get answer from service.'",
")",
"message",
"=",
"(",
"'Updating internal state with retrieved '",
"'state:{state}'",
")",
".",
"format",
"(",
"state",
"=",
"state",
")",
"self",
".",
"_logger",
".",
"debug",
"(",
"message",
")",
"self",
".",
"_state_",
".",
"update",
"(",
"state",
")",
"if",
"not",
"all",
"(",
"[",
"key",
"in",
"self",
".",
"_state_",
".",
"keys",
"(",
")",
"for",
"key",
"in",
"required_keys",
"]",
")",
":",
"raise",
"IncompleteResponse",
"(",
"state",
")",
"return",
"self",
".",
"_state_"
] | The internal state of the object.
The api responses are not consistent so a retry is performed on every
call with information updating the internally saved state refreshing
the data. The info is cached for STATE_CACHING_SECONDS.
:return: The current state of the toons' information state. | [
"The",
"internal",
"state",
"of",
"the",
"object",
"."
] | 2fa95430240d1a1c2a85a8827aecfcb1ca41c18c | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L162-L188 | train |
costastf/toonlib | toonlib/toonlib.py | Toon.get_smokedetector_by_name | def get_smokedetector_by_name(self, name):
"""Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object
"""
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None) | python | def get_smokedetector_by_name(self, name):
"""Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object
"""
return next((smokedetector for smokedetector in self.smokedetectors
if smokedetector.name.lower() == name.lower()), None) | [
"def",
"get_smokedetector_by_name",
"(",
"self",
",",
"name",
")",
":",
"return",
"next",
"(",
"(",
"smokedetector",
"for",
"smokedetector",
"in",
"self",
".",
"smokedetectors",
"if",
"smokedetector",
".",
"name",
".",
"lower",
"(",
")",
"==",
"name",
".",
"lower",
"(",
")",
")",
",",
"None",
")"
] | Retrieves a smokedetector object by its name
:param name: The name of the smokedetector to return
:return: A smokedetector object | [
"Retrieves",
"a",
"smokedetector",
"object",
"by",
"its",
"name"
] | 2fa95430240d1a1c2a85a8827aecfcb1ca41c18c | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L236-L243 | train |
costastf/toonlib | toonlib/toonlib.py | Toon.get_light_by_name | def get_light_by_name(self, name):
"""Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object
"""
return next((light for light in self.lights
if light.name.lower() == name.lower()), None) | python | def get_light_by_name(self, name):
"""Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object
"""
return next((light for light in self.lights
if light.name.lower() == name.lower()), None) | [
"def",
"get_light_by_name",
"(",
"self",
",",
"name",
")",
":",
"return",
"next",
"(",
"(",
"light",
"for",
"light",
"in",
"self",
".",
"lights",
"if",
"light",
".",
"name",
".",
"lower",
"(",
")",
"==",
"name",
".",
"lower",
"(",
")",
")",
",",
"None",
")"
] | Retrieves a light object by its name
:param name: The name of the light to return
:return: A light object | [
"Retrieves",
"a",
"light",
"object",
"by",
"its",
"name"
] | 2fa95430240d1a1c2a85a8827aecfcb1ca41c18c | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L253-L260 | train |
costastf/toonlib | toonlib/toonlib.py | Toon.get_smartplug_by_name | def get_smartplug_by_name(self, name):
"""Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object
"""
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None) | python | def get_smartplug_by_name(self, name):
"""Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object
"""
return next((plug for plug in self.smartplugs
if plug.name.lower() == name.lower()), None) | [
"def",
"get_smartplug_by_name",
"(",
"self",
",",
"name",
")",
":",
"return",
"next",
"(",
"(",
"plug",
"for",
"plug",
"in",
"self",
".",
"smartplugs",
"if",
"plug",
".",
"name",
".",
"lower",
"(",
")",
"==",
"name",
".",
"lower",
"(",
")",
")",
",",
"None",
")"
] | Retrieves a smartplug object by its name
:param name: The name of the smartplug to return
:return: A smartplug object | [
"Retrieves",
"a",
"smartplug",
"object",
"by",
"its",
"name"
] | 2fa95430240d1a1c2a85a8827aecfcb1ca41c18c | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L270-L277 | train |
costastf/toonlib | toonlib/toonlib.py | Toon.get_thermostat_state_by_name | def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None) | python | def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None) | [
"def",
"get_thermostat_state_by_name",
"(",
"self",
",",
"name",
")",
":",
"self",
".",
"_validate_thermostat_state_name",
"(",
"name",
")",
"return",
"next",
"(",
"(",
"state",
"for",
"state",
"in",
"self",
".",
"thermostat_states",
"if",
"state",
".",
"name",
".",
"lower",
"(",
")",
"==",
"name",
".",
"lower",
"(",
")",
")",
",",
"None",
")"
] | Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object | [
"Retrieves",
"a",
"thermostat",
"state",
"object",
"by",
"its",
"assigned",
"name"
] | 2fa95430240d1a1c2a85a8827aecfcb1ca41c18c | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L347-L355 | train |
costastf/toonlib | toonlib/toonlib.py | Toon.get_thermostat_state_by_id | def get_thermostat_state_by_id(self, id_):
"""Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object
"""
return next((state for state in self.thermostat_states
if state.id == id_), None) | python | def get_thermostat_state_by_id(self, id_):
"""Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object
"""
return next((state for state in self.thermostat_states
if state.id == id_), None) | [
"def",
"get_thermostat_state_by_id",
"(",
"self",
",",
"id_",
")",
":",
"return",
"next",
"(",
"(",
"state",
"for",
"state",
"in",
"self",
".",
"thermostat_states",
"if",
"state",
".",
"id",
"==",
"id_",
")",
",",
"None",
")"
] | Retrieves a thermostat state object by its id
:param id_: The id of the thermostat state
:return: The thermostat state object | [
"Retrieves",
"a",
"thermostat",
"state",
"object",
"by",
"its",
"id"
] | 2fa95430240d1a1c2a85a8827aecfcb1ca41c18c | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L357-L364 | train |
costastf/toonlib | toonlib/toonlib.py | Toon.thermostat_state | def thermostat_state(self):
"""The state of the thermostat programming
:return: A thermostat state object of the current setting
"""
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state | python | def thermostat_state(self):
"""The state of the thermostat programming
:return: A thermostat state object of the current setting
"""
current_state = self.thermostat_info.active_state
state = self.get_thermostat_state_by_id(current_state)
if not state:
self._logger.debug('Manually set temperature, no Thermostat '
'State chosen!')
return state | [
"def",
"thermostat_state",
"(",
"self",
")",
":",
"current_state",
"=",
"self",
".",
"thermostat_info",
".",
"active_state",
"state",
"=",
"self",
".",
"get_thermostat_state_by_id",
"(",
"current_state",
")",
"if",
"not",
"state",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"'Manually set temperature, no Thermostat '",
"'State chosen!'",
")",
"return",
"state"
] | The state of the thermostat programming
:return: A thermostat state object of the current setting | [
"The",
"state",
"of",
"the",
"thermostat",
"programming"
] | 2fa95430240d1a1c2a85a8827aecfcb1ca41c18c | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L377-L387 | train |
costastf/toonlib | toonlib/toonlib.py | Toon.thermostat_state | def thermostat_state(self, name):
"""Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to.
"""
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache() | python | def thermostat_state(self, name):
"""Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to.
"""
self._validate_thermostat_state_name(name)
id_ = next((key for key in STATES.keys()
if STATES[key].lower() == name.lower()), None)
data = copy.copy(self._parameters)
data.update({'state': 2,
'temperatureState': id_})
response = self._get_data('/client/auth/schemeState', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache() | [
"def",
"thermostat_state",
"(",
"self",
",",
"name",
")",
":",
"self",
".",
"_validate_thermostat_state_name",
"(",
"name",
")",
"id_",
"=",
"next",
"(",
"(",
"key",
"for",
"key",
"in",
"STATES",
".",
"keys",
"(",
")",
"if",
"STATES",
"[",
"key",
"]",
".",
"lower",
"(",
")",
"==",
"name",
".",
"lower",
"(",
")",
")",
",",
"None",
")",
"data",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"_parameters",
")",
"data",
".",
"update",
"(",
"{",
"'state'",
":",
"2",
",",
"'temperatureState'",
":",
"id_",
"}",
")",
"response",
"=",
"self",
".",
"_get_data",
"(",
"'/client/auth/schemeState'",
",",
"data",
")",
"self",
".",
"_logger",
".",
"debug",
"(",
"'Response received {}'",
".",
"format",
"(",
"response",
")",
")",
"self",
".",
"_clear_cache",
"(",
")"
] | Changes the thermostat state to the one passed as an argument as name
:param name: The name of the thermostat state to change to. | [
"Changes",
"the",
"thermostat",
"state",
"to",
"the",
"one",
"passed",
"as",
"an",
"argument",
"as",
"name"
] | 2fa95430240d1a1c2a85a8827aecfcb1ca41c18c | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L390-L403 | train |
costastf/toonlib | toonlib/toonlib.py | Toon.thermostat | def thermostat(self, temperature):
"""A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to.
"""
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache() | python | def thermostat(self, temperature):
"""A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to.
"""
target = int(temperature * 100)
data = copy.copy(self._parameters)
data.update({'value': target})
response = self._get_data('/client/auth/setPoint', data)
self._logger.debug('Response received {}'.format(response))
self._clear_cache() | [
"def",
"thermostat",
"(",
"self",
",",
"temperature",
")",
":",
"target",
"=",
"int",
"(",
"temperature",
"*",
"100",
")",
"data",
"=",
"copy",
".",
"copy",
"(",
"self",
".",
"_parameters",
")",
"data",
".",
"update",
"(",
"{",
"'value'",
":",
"target",
"}",
")",
"response",
"=",
"self",
".",
"_get_data",
"(",
"'/client/auth/setPoint'",
",",
"data",
")",
"self",
".",
"_logger",
".",
"debug",
"(",
"'Response received {}'",
".",
"format",
"(",
"response",
")",
")",
"self",
".",
"_clear_cache",
"(",
")"
] | A temperature to set the thermostat to. Requires a float.
:param temperature: A float of the desired temperature to change to. | [
"A",
"temperature",
"to",
"set",
"the",
"thermostat",
"to",
".",
"Requires",
"a",
"float",
"."
] | 2fa95430240d1a1c2a85a8827aecfcb1ca41c18c | https://github.com/costastf/toonlib/blob/2fa95430240d1a1c2a85a8827aecfcb1ca41c18c/toonlib/toonlib.py#L415-L425 | train |
Erotemic/utool | utool/experimental/dynamic_connectivity.py | euler_tour_dfs | def euler_tour_dfs(G, source=None):
""" adaptation of networkx dfs """
if source is None:
# produce edges for all components
nodes = G
else:
# produce edges for components with source
nodes = [source]
yielder = []
visited = set()
for start in nodes:
if start in visited:
continue
visited.add(start)
stack = [(start, iter(G[start]))]
while stack:
parent, children = stack[-1]
try:
child = next(children)
if child not in visited:
# yielder += [[parent, child]]
yielder += [parent]
visited.add(child)
stack.append((child, iter(G[child])))
except StopIteration:
if stack:
last = stack[-1]
yielder += [last[0]]
stack.pop()
return yielder | python | def euler_tour_dfs(G, source=None):
""" adaptation of networkx dfs """
if source is None:
# produce edges for all components
nodes = G
else:
# produce edges for components with source
nodes = [source]
yielder = []
visited = set()
for start in nodes:
if start in visited:
continue
visited.add(start)
stack = [(start, iter(G[start]))]
while stack:
parent, children = stack[-1]
try:
child = next(children)
if child not in visited:
# yielder += [[parent, child]]
yielder += [parent]
visited.add(child)
stack.append((child, iter(G[child])))
except StopIteration:
if stack:
last = stack[-1]
yielder += [last[0]]
stack.pop()
return yielder | [
"def",
"euler_tour_dfs",
"(",
"G",
",",
"source",
"=",
"None",
")",
":",
"if",
"source",
"is",
"None",
":",
"# produce edges for all components",
"nodes",
"=",
"G",
"else",
":",
"# produce edges for components with source",
"nodes",
"=",
"[",
"source",
"]",
"yielder",
"=",
"[",
"]",
"visited",
"=",
"set",
"(",
")",
"for",
"start",
"in",
"nodes",
":",
"if",
"start",
"in",
"visited",
":",
"continue",
"visited",
".",
"add",
"(",
"start",
")",
"stack",
"=",
"[",
"(",
"start",
",",
"iter",
"(",
"G",
"[",
"start",
"]",
")",
")",
"]",
"while",
"stack",
":",
"parent",
",",
"children",
"=",
"stack",
"[",
"-",
"1",
"]",
"try",
":",
"child",
"=",
"next",
"(",
"children",
")",
"if",
"child",
"not",
"in",
"visited",
":",
"# yielder += [[parent, child]]",
"yielder",
"+=",
"[",
"parent",
"]",
"visited",
".",
"add",
"(",
"child",
")",
"stack",
".",
"append",
"(",
"(",
"child",
",",
"iter",
"(",
"G",
"[",
"child",
"]",
")",
")",
")",
"except",
"StopIteration",
":",
"if",
"stack",
":",
"last",
"=",
"stack",
"[",
"-",
"1",
"]",
"yielder",
"+=",
"[",
"last",
"[",
"0",
"]",
"]",
"stack",
".",
"pop",
"(",
")",
"return",
"yielder"
] | adaptation of networkx dfs | [
"adaptation",
"of",
"networkx",
"dfs"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/experimental/dynamic_connectivity.py#L12-L41 | train |
Erotemic/utool | utool/experimental/dynamic_connectivity.py | EulerTourTree.reroot | def reroot(self, s):
"""
s = 3
s = 'B'
Let os denote any occurrence of s.
Splice out the first part of the sequence ending with the occurrence before os,
remove its first occurrence (or),
and tack this on to the end of the sequence which now begins with os.
Add a new occurrence os to the end.
"""
# Splice out the first part of the sequence ending with the occurrence before os
# remove its first occurrence (or),
o_s1 = self.first_lookup[s]
splice1 = self.tour[1:o_s1]
rest = self.tour[o_s1 + 1:]
new_tour = [s] + rest + splice1 + [s]
new_tree = TestETT.from_tour(new_tour, fast=self.fast)
return new_tree | python | def reroot(self, s):
"""
s = 3
s = 'B'
Let os denote any occurrence of s.
Splice out the first part of the sequence ending with the occurrence before os,
remove its first occurrence (or),
and tack this on to the end of the sequence which now begins with os.
Add a new occurrence os to the end.
"""
# Splice out the first part of the sequence ending with the occurrence before os
# remove its first occurrence (or),
o_s1 = self.first_lookup[s]
splice1 = self.tour[1:o_s1]
rest = self.tour[o_s1 + 1:]
new_tour = [s] + rest + splice1 + [s]
new_tree = TestETT.from_tour(new_tour, fast=self.fast)
return new_tree | [
"def",
"reroot",
"(",
"self",
",",
"s",
")",
":",
"# Splice out the first part of the sequence ending with the occurrence before os",
"# remove its first occurrence (or),",
"o_s1",
"=",
"self",
".",
"first_lookup",
"[",
"s",
"]",
"splice1",
"=",
"self",
".",
"tour",
"[",
"1",
":",
"o_s1",
"]",
"rest",
"=",
"self",
".",
"tour",
"[",
"o_s1",
"+",
"1",
":",
"]",
"new_tour",
"=",
"[",
"s",
"]",
"+",
"rest",
"+",
"splice1",
"+",
"[",
"s",
"]",
"new_tree",
"=",
"TestETT",
".",
"from_tour",
"(",
"new_tour",
",",
"fast",
"=",
"self",
".",
"fast",
")",
"return",
"new_tree"
] | s = 3
s = 'B'
Let os denote any occurrence of s.
Splice out the first part of the sequence ending with the occurrence before os,
remove its first occurrence (or),
and tack this on to the end of the sequence which now begins with os.
Add a new occurrence os to the end. | [
"s",
"=",
"3",
"s",
"=",
"B"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/experimental/dynamic_connectivity.py#L521-L539 | train |
Erotemic/utool | utool/experimental/dynamic_connectivity.py | DynConnGraph.remove_edge | def remove_edge(self, u, v):
"""
Using notation where 0 is top level
Intuitively speaking, when the level of a nontree edge is increased, it
is because we have discovered that its end points are close enough in F
to fit in a smaller tree on a higher level.
"""
# Remove (u, v) from represented graph
print('Dynamically removing uv=(%r, %r)' % (u, v))
self.graph.remove_edge(u, v)
e = (u, v)
# Remove edge e = (u, v) from all graphs.
if not self.forests[0].has_edge(u, v):
# If (u, v) is a non-tree edge, simply delete it.
# Nothing else to do.
return
# If (u, v) is a tree edge we delete it and search for a replacement.
# Delete from all higher levels
for i in reversed(range(0, self.level[e] + 1)):
self.forests[i].remove_edge(u, v)
# Determine if another edge that connects u and v exists.
# (This must be an edge r, level[r] <= level[e])
# (Find max possible level[r] <= level[e])
for i in reversed(range(0, self.level[e] + 1)):
# Tu != Tw b/c (u, v) was just deleted from all forests
Tu = self.forests[i].subtree(u)
print('Tu = %r' % (list(Tu.nodes()),))
Tv = self.forests[i].subtree(v)
print('Tv = %r' % (list(Tv.nodes()),))
# Relabel so len(Tu) <= len(Tv)
# This ensures len(Tu) < 2 ** (floor(log(n)) - i)
if len(Tu) > len(Tv):
Tu, Tv = Tv, Tu
# Note len(Tu) <= 2 * (len(Tu) + len(Tv) + 1)
# We can afford to push all of Tu's edges to the next level and
# still preserve invariant 1.
seen_ = set([])
for x in Tu.nodes():
# Visit all edges INCIDENT (in real graph) to nodes in Tu.
# This lets us find non-tree edges to make a tree edge
seen_.add(x)
for y in self.graph.neighbors(x):
if y in seen_:
continue
# print('Check replacement edge xy=(%r, %r)' % (x, y))
if y in Tv:
print('* Found replacement xy=(%r, %r)' % (x, y))
# edge (x, y) is a replacement edge.
# add (x, y) to prev forests F[0:i+1]
# This is the only place edges are added to forets of
# higher levels.
if len(self.forests) == i + 1:
self.forests.append(DummyEulerTourForest(self.graph.nodes()))
for j in range(0, i + 2):
print('* Add replacment to F[j=%r]' % (j,))
# Need euler tree augmentation for outgoing level edges
self.forests[j].add_edge(x, y)
return
else:
print('* Charging xy=(%r, %r)' % (x, y))
# charge --- add (x, y) to next level
# this pays for our search in an amortized sense
# (ie, the next search at this level wont consider this)
if len(self.forests) == i + 1:
self.forests.append(DummyEulerTourForest(self.graph.nodes()))
if self.forests[i].has_edge(x, y):
self.forests[i + 1].add_edge(x, y)
# # assert False, 'we got it, should add it?'
self.level[(x, y)] = i + 1 | python | def remove_edge(self, u, v):
"""
Using notation where 0 is top level
Intuitively speaking, when the level of a nontree edge is increased, it
is because we have discovered that its end points are close enough in F
to fit in a smaller tree on a higher level.
"""
# Remove (u, v) from represented graph
print('Dynamically removing uv=(%r, %r)' % (u, v))
self.graph.remove_edge(u, v)
e = (u, v)
# Remove edge e = (u, v) from all graphs.
if not self.forests[0].has_edge(u, v):
# If (u, v) is a non-tree edge, simply delete it.
# Nothing else to do.
return
# If (u, v) is a tree edge we delete it and search for a replacement.
# Delete from all higher levels
for i in reversed(range(0, self.level[e] + 1)):
self.forests[i].remove_edge(u, v)
# Determine if another edge that connects u and v exists.
# (This must be an edge r, level[r] <= level[e])
# (Find max possible level[r] <= level[e])
for i in reversed(range(0, self.level[e] + 1)):
# Tu != Tw b/c (u, v) was just deleted from all forests
Tu = self.forests[i].subtree(u)
print('Tu = %r' % (list(Tu.nodes()),))
Tv = self.forests[i].subtree(v)
print('Tv = %r' % (list(Tv.nodes()),))
# Relabel so len(Tu) <= len(Tv)
# This ensures len(Tu) < 2 ** (floor(log(n)) - i)
if len(Tu) > len(Tv):
Tu, Tv = Tv, Tu
# Note len(Tu) <= 2 * (len(Tu) + len(Tv) + 1)
# We can afford to push all of Tu's edges to the next level and
# still preserve invariant 1.
seen_ = set([])
for x in Tu.nodes():
# Visit all edges INCIDENT (in real graph) to nodes in Tu.
# This lets us find non-tree edges to make a tree edge
seen_.add(x)
for y in self.graph.neighbors(x):
if y in seen_:
continue
# print('Check replacement edge xy=(%r, %r)' % (x, y))
if y in Tv:
print('* Found replacement xy=(%r, %r)' % (x, y))
# edge (x, y) is a replacement edge.
# add (x, y) to prev forests F[0:i+1]
# This is the only place edges are added to forets of
# higher levels.
if len(self.forests) == i + 1:
self.forests.append(DummyEulerTourForest(self.graph.nodes()))
for j in range(0, i + 2):
print('* Add replacment to F[j=%r]' % (j,))
# Need euler tree augmentation for outgoing level edges
self.forests[j].add_edge(x, y)
return
else:
print('* Charging xy=(%r, %r)' % (x, y))
# charge --- add (x, y) to next level
# this pays for our search in an amortized sense
# (ie, the next search at this level wont consider this)
if len(self.forests) == i + 1:
self.forests.append(DummyEulerTourForest(self.graph.nodes()))
if self.forests[i].has_edge(x, y):
self.forests[i + 1].add_edge(x, y)
# # assert False, 'we got it, should add it?'
self.level[(x, y)] = i + 1 | [
"def",
"remove_edge",
"(",
"self",
",",
"u",
",",
"v",
")",
":",
"# Remove (u, v) from represented graph",
"print",
"(",
"'Dynamically removing uv=(%r, %r)'",
"%",
"(",
"u",
",",
"v",
")",
")",
"self",
".",
"graph",
".",
"remove_edge",
"(",
"u",
",",
"v",
")",
"e",
"=",
"(",
"u",
",",
"v",
")",
"# Remove edge e = (u, v) from all graphs.",
"if",
"not",
"self",
".",
"forests",
"[",
"0",
"]",
".",
"has_edge",
"(",
"u",
",",
"v",
")",
":",
"# If (u, v) is a non-tree edge, simply delete it.",
"# Nothing else to do.",
"return",
"# If (u, v) is a tree edge we delete it and search for a replacement.",
"# Delete from all higher levels",
"for",
"i",
"in",
"reversed",
"(",
"range",
"(",
"0",
",",
"self",
".",
"level",
"[",
"e",
"]",
"+",
"1",
")",
")",
":",
"self",
".",
"forests",
"[",
"i",
"]",
".",
"remove_edge",
"(",
"u",
",",
"v",
")",
"# Determine if another edge that connects u and v exists.",
"# (This must be an edge r, level[r] <= level[e])",
"# (Find max possible level[r] <= level[e])",
"for",
"i",
"in",
"reversed",
"(",
"range",
"(",
"0",
",",
"self",
".",
"level",
"[",
"e",
"]",
"+",
"1",
")",
")",
":",
"# Tu != Tw b/c (u, v) was just deleted from all forests",
"Tu",
"=",
"self",
".",
"forests",
"[",
"i",
"]",
".",
"subtree",
"(",
"u",
")",
"print",
"(",
"'Tu = %r'",
"%",
"(",
"list",
"(",
"Tu",
".",
"nodes",
"(",
")",
")",
",",
")",
")",
"Tv",
"=",
"self",
".",
"forests",
"[",
"i",
"]",
".",
"subtree",
"(",
"v",
")",
"print",
"(",
"'Tv = %r'",
"%",
"(",
"list",
"(",
"Tv",
".",
"nodes",
"(",
")",
")",
",",
")",
")",
"# Relabel so len(Tu) <= len(Tv)",
"# This ensures len(Tu) < 2 ** (floor(log(n)) - i)",
"if",
"len",
"(",
"Tu",
")",
">",
"len",
"(",
"Tv",
")",
":",
"Tu",
",",
"Tv",
"=",
"Tv",
",",
"Tu",
"# Note len(Tu) <= 2 * (len(Tu) + len(Tv) + 1)",
"# We can afford to push all of Tu's edges to the next level and",
"# still preserve invariant 1.",
"seen_",
"=",
"set",
"(",
"[",
"]",
")",
"for",
"x",
"in",
"Tu",
".",
"nodes",
"(",
")",
":",
"# Visit all edges INCIDENT (in real graph) to nodes in Tu.",
"# This lets us find non-tree edges to make a tree edge",
"seen_",
".",
"add",
"(",
"x",
")",
"for",
"y",
"in",
"self",
".",
"graph",
".",
"neighbors",
"(",
"x",
")",
":",
"if",
"y",
"in",
"seen_",
":",
"continue",
"# print('Check replacement edge xy=(%r, %r)' % (x, y))",
"if",
"y",
"in",
"Tv",
":",
"print",
"(",
"'* Found replacement xy=(%r, %r)'",
"%",
"(",
"x",
",",
"y",
")",
")",
"# edge (x, y) is a replacement edge.",
"# add (x, y) to prev forests F[0:i+1]",
"# This is the only place edges are added to forets of",
"# higher levels.",
"if",
"len",
"(",
"self",
".",
"forests",
")",
"==",
"i",
"+",
"1",
":",
"self",
".",
"forests",
".",
"append",
"(",
"DummyEulerTourForest",
"(",
"self",
".",
"graph",
".",
"nodes",
"(",
")",
")",
")",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"i",
"+",
"2",
")",
":",
"print",
"(",
"'* Add replacment to F[j=%r]'",
"%",
"(",
"j",
",",
")",
")",
"# Need euler tree augmentation for outgoing level edges",
"self",
".",
"forests",
"[",
"j",
"]",
".",
"add_edge",
"(",
"x",
",",
"y",
")",
"return",
"else",
":",
"print",
"(",
"'* Charging xy=(%r, %r)'",
"%",
"(",
"x",
",",
"y",
")",
")",
"# charge --- add (x, y) to next level",
"# this pays for our search in an amortized sense",
"# (ie, the next search at this level wont consider this)",
"if",
"len",
"(",
"self",
".",
"forests",
")",
"==",
"i",
"+",
"1",
":",
"self",
".",
"forests",
".",
"append",
"(",
"DummyEulerTourForest",
"(",
"self",
".",
"graph",
".",
"nodes",
"(",
")",
")",
")",
"if",
"self",
".",
"forests",
"[",
"i",
"]",
".",
"has_edge",
"(",
"x",
",",
"y",
")",
":",
"self",
".",
"forests",
"[",
"i",
"+",
"1",
"]",
".",
"add_edge",
"(",
"x",
",",
"y",
")",
"# # assert False, 'we got it, should add it?'",
"self",
".",
"level",
"[",
"(",
"x",
",",
"y",
")",
"]",
"=",
"i",
"+",
"1"
] | Using notation where 0 is top level
Intuitively speaking, when the level of a nontree edge is increased, it
is because we have discovered that its end points are close enough in F
to fit in a smaller tree on a higher level. | [
"Using",
"notation",
"where",
"0",
"is",
"top",
"level"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/experimental/dynamic_connectivity.py#L853-L923 | train |
Erotemic/utool | utool/util_regex.py | extend_regex2 | def extend_regex2(regexpr, reflags=0):
"""
also preprocesses flags
"""
regexpr = extend_regex(regexpr)
IGNORE_CASE_PREF = '\\c'
if regexpr.startswith(IGNORE_CASE_PREF):
# hack for vim-like ignore case
regexpr = regexpr[len(IGNORE_CASE_PREF):]
reflags = reflags | re.IGNORECASE
return regexpr, reflags | python | def extend_regex2(regexpr, reflags=0):
"""
also preprocesses flags
"""
regexpr = extend_regex(regexpr)
IGNORE_CASE_PREF = '\\c'
if regexpr.startswith(IGNORE_CASE_PREF):
# hack for vim-like ignore case
regexpr = regexpr[len(IGNORE_CASE_PREF):]
reflags = reflags | re.IGNORECASE
return regexpr, reflags | [
"def",
"extend_regex2",
"(",
"regexpr",
",",
"reflags",
"=",
"0",
")",
":",
"regexpr",
"=",
"extend_regex",
"(",
"regexpr",
")",
"IGNORE_CASE_PREF",
"=",
"'\\\\c'",
"if",
"regexpr",
".",
"startswith",
"(",
"IGNORE_CASE_PREF",
")",
":",
"# hack for vim-like ignore case",
"regexpr",
"=",
"regexpr",
"[",
"len",
"(",
"IGNORE_CASE_PREF",
")",
":",
"]",
"reflags",
"=",
"reflags",
"|",
"re",
".",
"IGNORECASE",
"return",
"regexpr",
",",
"reflags"
] | also preprocesses flags | [
"also",
"preprocesses",
"flags"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_regex.py#L91-L101 | train |
Erotemic/utool | utool/util_regex.py | named_field | def named_field(key, regex, vim=False):
"""
Creates a named regex group that can be referend via a backref.
If key is None the backref is referenced by number.
References:
https://docs.python.org/2/library/re.html#regular-expression-syntax
"""
if key is None:
#return regex
return r'(%s)' % (regex,)
if vim:
return r'\(%s\)' % (regex)
else:
return r'(?P<%s>%s)' % (key, regex) | python | def named_field(key, regex, vim=False):
"""
Creates a named regex group that can be referend via a backref.
If key is None the backref is referenced by number.
References:
https://docs.python.org/2/library/re.html#regular-expression-syntax
"""
if key is None:
#return regex
return r'(%s)' % (regex,)
if vim:
return r'\(%s\)' % (regex)
else:
return r'(?P<%s>%s)' % (key, regex) | [
"def",
"named_field",
"(",
"key",
",",
"regex",
",",
"vim",
"=",
"False",
")",
":",
"if",
"key",
"is",
"None",
":",
"#return regex",
"return",
"r'(%s)'",
"%",
"(",
"regex",
",",
")",
"if",
"vim",
":",
"return",
"r'\\(%s\\)'",
"%",
"(",
"regex",
")",
"else",
":",
"return",
"r'(?P<%s>%s)'",
"%",
"(",
"key",
",",
"regex",
")"
] | Creates a named regex group that can be referend via a backref.
If key is None the backref is referenced by number.
References:
https://docs.python.org/2/library/re.html#regular-expression-syntax | [
"Creates",
"a",
"named",
"regex",
"group",
"that",
"can",
"be",
"referend",
"via",
"a",
"backref",
".",
"If",
"key",
"is",
"None",
"the",
"backref",
"is",
"referenced",
"by",
"number",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_regex.py#L124-L138 | train |
Erotemic/utool | utool/util_regex.py | regex_replace | def regex_replace(regex, repl, text):
r"""
thin wrapper around re.sub
regex_replace
MULTILINE and DOTALL are on by default in all util_regex functions
Args:
regex (str): pattern to find
repl (str): replace pattern with this
text (str): text to modify
Returns:
str: modified text
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_regex import * # NOQA
>>> regex = r'\(.*\):'
>>> repl = '(*args)'
>>> text = '''def foo(param1,
... param2,
... param3):'''
>>> result = regex_replace(regex, repl, text)
>>> print(result)
def foo(*args)
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_regex import * # NOQA
>>> import utool as ut
>>> regex = ut.named_field_regex([('keyword', 'def'), ' ', ('funcname', '.*'), '\(.*\):'])
>>> repl = ut.named_field_repl([('funcname',), ('keyword',)])
>>> text = '''def foo(param1,
... param2,
... param3):'''
>>> result = regex_replace(regex, repl, text)
>>> print(result)
foodef
"""
return re.sub(regex, repl, text, **RE_KWARGS) | python | def regex_replace(regex, repl, text):
r"""
thin wrapper around re.sub
regex_replace
MULTILINE and DOTALL are on by default in all util_regex functions
Args:
regex (str): pattern to find
repl (str): replace pattern with this
text (str): text to modify
Returns:
str: modified text
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_regex import * # NOQA
>>> regex = r'\(.*\):'
>>> repl = '(*args)'
>>> text = '''def foo(param1,
... param2,
... param3):'''
>>> result = regex_replace(regex, repl, text)
>>> print(result)
def foo(*args)
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_regex import * # NOQA
>>> import utool as ut
>>> regex = ut.named_field_regex([('keyword', 'def'), ' ', ('funcname', '.*'), '\(.*\):'])
>>> repl = ut.named_field_repl([('funcname',), ('keyword',)])
>>> text = '''def foo(param1,
... param2,
... param3):'''
>>> result = regex_replace(regex, repl, text)
>>> print(result)
foodef
"""
return re.sub(regex, repl, text, **RE_KWARGS) | [
"def",
"regex_replace",
"(",
"regex",
",",
"repl",
",",
"text",
")",
":",
"return",
"re",
".",
"sub",
"(",
"regex",
",",
"repl",
",",
"text",
",",
"*",
"*",
"RE_KWARGS",
")"
] | r"""
thin wrapper around re.sub
regex_replace
MULTILINE and DOTALL are on by default in all util_regex functions
Args:
regex (str): pattern to find
repl (str): replace pattern with this
text (str): text to modify
Returns:
str: modified text
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_regex import * # NOQA
>>> regex = r'\(.*\):'
>>> repl = '(*args)'
>>> text = '''def foo(param1,
... param2,
... param3):'''
>>> result = regex_replace(regex, repl, text)
>>> print(result)
def foo(*args)
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_regex import * # NOQA
>>> import utool as ut
>>> regex = ut.named_field_regex([('keyword', 'def'), ' ', ('funcname', '.*'), '\(.*\):'])
>>> repl = ut.named_field_repl([('funcname',), ('keyword',)])
>>> text = '''def foo(param1,
... param2,
... param3):'''
>>> result = regex_replace(regex, repl, text)
>>> print(result)
foodef | [
"r",
"thin",
"wrapper",
"around",
"re",
".",
"sub",
"regex_replace"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_regex.py#L203-L243 | train |
timedata-org/loady | loady/library.py | clear | def clear(prompt=True, cache=None):
"""Clear loady's cache."""
cache = cache or config.cache()
if prompt:
answer = input(
'Clear library cache files in %s/? (yN) ' % cache)
if not answer.startswith('y'):
return False
shutil.rmtree(cache, ignore_errors=True)
return True | python | def clear(prompt=True, cache=None):
"""Clear loady's cache."""
cache = cache or config.cache()
if prompt:
answer = input(
'Clear library cache files in %s/? (yN) ' % cache)
if not answer.startswith('y'):
return False
shutil.rmtree(cache, ignore_errors=True)
return True | [
"def",
"clear",
"(",
"prompt",
"=",
"True",
",",
"cache",
"=",
"None",
")",
":",
"cache",
"=",
"cache",
"or",
"config",
".",
"cache",
"(",
")",
"if",
"prompt",
":",
"answer",
"=",
"input",
"(",
"'Clear library cache files in %s/? (yN) '",
"%",
"cache",
")",
"if",
"not",
"answer",
".",
"startswith",
"(",
"'y'",
")",
":",
"return",
"False",
"shutil",
".",
"rmtree",
"(",
"cache",
",",
"ignore_errors",
"=",
"True",
")",
"return",
"True"
] | Clear loady's cache. | [
"Clear",
"loady",
"s",
"cache",
"."
] | 94ffcdb92f15a28f3c85f77bd293a9cb59de4cad | https://github.com/timedata-org/loady/blob/94ffcdb92f15a28f3c85f77bd293a9cb59de4cad/loady/library.py#L10-L19 | train |
timedata-org/loady | loady/library.py | create | def create(gitpath, cache=None):
"""
Create a Library from a git path.
"""
if gitpath.startswith(config.LIBRARY_PREFIX):
path = gitpath[len(config.LIBRARY_PREFIX):]
return Library(*path.split('/'), cache=cache) | python | def create(gitpath, cache=None):
"""
Create a Library from a git path.
"""
if gitpath.startswith(config.LIBRARY_PREFIX):
path = gitpath[len(config.LIBRARY_PREFIX):]
return Library(*path.split('/'), cache=cache) | [
"def",
"create",
"(",
"gitpath",
",",
"cache",
"=",
"None",
")",
":",
"if",
"gitpath",
".",
"startswith",
"(",
"config",
".",
"LIBRARY_PREFIX",
")",
":",
"path",
"=",
"gitpath",
"[",
"len",
"(",
"config",
".",
"LIBRARY_PREFIX",
")",
":",
"]",
"return",
"Library",
"(",
"*",
"path",
".",
"split",
"(",
"'/'",
")",
",",
"cache",
"=",
"cache",
")"
] | Create a Library from a git path. | [
"Create",
"a",
"Library",
"from",
"a",
"git",
"path",
"."
] | 94ffcdb92f15a28f3c85f77bd293a9cb59de4cad | https://github.com/timedata-org/loady/blob/94ffcdb92f15a28f3c85f77bd293a9cb59de4cad/loady/library.py#L67-L74 | train |
timedata-org/loady | loady/library.py | Library.load | def load(self):
"""Load the library."""
if not git:
raise EnvironmentError(MISSING_GIT_ERROR)
if os.path.exists(self.path):
if not config.CACHE_DISABLE:
return
shutil.rmtree(self.path, ignore_errors=True)
with files.remove_on_exception(self.path):
url = self.GIT_URL.format(**vars(self))
repo = git.Repo.clone_from(
url=url, to_path=self.path, b=self.branch)
if self.commit:
repo.head.reset(self.commit, index=True, working_tree=True) | python | def load(self):
"""Load the library."""
if not git:
raise EnvironmentError(MISSING_GIT_ERROR)
if os.path.exists(self.path):
if not config.CACHE_DISABLE:
return
shutil.rmtree(self.path, ignore_errors=True)
with files.remove_on_exception(self.path):
url = self.GIT_URL.format(**vars(self))
repo = git.Repo.clone_from(
url=url, to_path=self.path, b=self.branch)
if self.commit:
repo.head.reset(self.commit, index=True, working_tree=True) | [
"def",
"load",
"(",
"self",
")",
":",
"if",
"not",
"git",
":",
"raise",
"EnvironmentError",
"(",
"MISSING_GIT_ERROR",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"path",
")",
":",
"if",
"not",
"config",
".",
"CACHE_DISABLE",
":",
"return",
"shutil",
".",
"rmtree",
"(",
"self",
".",
"path",
",",
"ignore_errors",
"=",
"True",
")",
"with",
"files",
".",
"remove_on_exception",
"(",
"self",
".",
"path",
")",
":",
"url",
"=",
"self",
".",
"GIT_URL",
".",
"format",
"(",
"*",
"*",
"vars",
"(",
"self",
")",
")",
"repo",
"=",
"git",
".",
"Repo",
".",
"clone_from",
"(",
"url",
"=",
"url",
",",
"to_path",
"=",
"self",
".",
"path",
",",
"b",
"=",
"self",
".",
"branch",
")",
"if",
"self",
".",
"commit",
":",
"repo",
".",
"head",
".",
"reset",
"(",
"self",
".",
"commit",
",",
"index",
"=",
"True",
",",
"working_tree",
"=",
"True",
")"
] | Load the library. | [
"Load",
"the",
"library",
"."
] | 94ffcdb92f15a28f3c85f77bd293a9cb59de4cad | https://github.com/timedata-org/loady/blob/94ffcdb92f15a28f3c85f77bd293a9cb59de4cad/loady/library.py#L49-L64 | train |
rhazdon/django-sonic-screwdriver | django_sonic_screwdriver/git/git.py | Git.check_existens_of_staging_tag_in_remote_repo | def check_existens_of_staging_tag_in_remote_repo():
"""
This method will check, if the given tag exists as a staging tag in the remote repository.
The intention is, that every tag, which should be deployed on a production envirnment,
has to be deployed on a staging environment before.
"""
staging_tag = Git.create_git_version_tag(APISettings.GIT_STAGING_PRE_TAG)
command_git = 'git ls-remote -t'
command_awk = 'awk \'{print $2}\''
command_cut_1 = 'cut -d \'/\' -f 3'
command_cut_2 = 'cut -d \'^\' -f 1'
command_sort = 'sort -b -t . -k 1,1nr -k 2,2nr -k 3,3r -k 4,4r -k 5,5r'
command_uniq = 'uniq'
command = command_git + ' | ' + command_awk + ' | ' + command_cut_1 + ' | ' + \
command_cut_2 + ' | ' + command_sort + ' | ' + command_uniq
list_of_tags = str(check_output(command, shell=True))
if staging_tag in list_of_tags:
return True
return False | python | def check_existens_of_staging_tag_in_remote_repo():
"""
This method will check, if the given tag exists as a staging tag in the remote repository.
The intention is, that every tag, which should be deployed on a production envirnment,
has to be deployed on a staging environment before.
"""
staging_tag = Git.create_git_version_tag(APISettings.GIT_STAGING_PRE_TAG)
command_git = 'git ls-remote -t'
command_awk = 'awk \'{print $2}\''
command_cut_1 = 'cut -d \'/\' -f 3'
command_cut_2 = 'cut -d \'^\' -f 1'
command_sort = 'sort -b -t . -k 1,1nr -k 2,2nr -k 3,3r -k 4,4r -k 5,5r'
command_uniq = 'uniq'
command = command_git + ' | ' + command_awk + ' | ' + command_cut_1 + ' | ' + \
command_cut_2 + ' | ' + command_sort + ' | ' + command_uniq
list_of_tags = str(check_output(command, shell=True))
if staging_tag in list_of_tags:
return True
return False | [
"def",
"check_existens_of_staging_tag_in_remote_repo",
"(",
")",
":",
"staging_tag",
"=",
"Git",
".",
"create_git_version_tag",
"(",
"APISettings",
".",
"GIT_STAGING_PRE_TAG",
")",
"command_git",
"=",
"'git ls-remote -t'",
"command_awk",
"=",
"'awk \\'{print $2}\\''",
"command_cut_1",
"=",
"'cut -d \\'/\\' -f 3'",
"command_cut_2",
"=",
"'cut -d \\'^\\' -f 1'",
"command_sort",
"=",
"'sort -b -t . -k 1,1nr -k 2,2nr -k 3,3r -k 4,4r -k 5,5r'",
"command_uniq",
"=",
"'uniq'",
"command",
"=",
"command_git",
"+",
"' | '",
"+",
"command_awk",
"+",
"' | '",
"+",
"command_cut_1",
"+",
"' | '",
"+",
"command_cut_2",
"+",
"' | '",
"+",
"command_sort",
"+",
"' | '",
"+",
"command_uniq",
"list_of_tags",
"=",
"str",
"(",
"check_output",
"(",
"command",
",",
"shell",
"=",
"True",
")",
")",
"if",
"staging_tag",
"in",
"list_of_tags",
":",
"return",
"True",
"return",
"False"
] | This method will check, if the given tag exists as a staging tag in the remote repository.
The intention is, that every tag, which should be deployed on a production envirnment,
has to be deployed on a staging environment before. | [
"This",
"method",
"will",
"check",
"if",
"the",
"given",
"tag",
"exists",
"as",
"a",
"staging",
"tag",
"in",
"the",
"remote",
"repository",
"."
] | 89e885e8c1322fc5c3e0f79b03a55acdc6e63972 | https://github.com/rhazdon/django-sonic-screwdriver/blob/89e885e8c1322fc5c3e0f79b03a55acdc6e63972/django_sonic_screwdriver/git/git.py#L40-L63 | train |
rhazdon/django-sonic-screwdriver | django_sonic_screwdriver/git/git.py | Git.__debug | def __debug(command, dry=False):
"""
This method will be called, if the debug mode
is on.
"""
if dry:
command.append('--dry-run')
Shell.debug(command)
if dry:
call(command)
exit(1) | python | def __debug(command, dry=False):
"""
This method will be called, if the debug mode
is on.
"""
if dry:
command.append('--dry-run')
Shell.debug(command)
if dry:
call(command)
exit(1) | [
"def",
"__debug",
"(",
"command",
",",
"dry",
"=",
"False",
")",
":",
"if",
"dry",
":",
"command",
".",
"append",
"(",
"'--dry-run'",
")",
"Shell",
".",
"debug",
"(",
"command",
")",
"if",
"dry",
":",
"call",
"(",
"command",
")",
"exit",
"(",
"1",
")"
] | This method will be called, if the debug mode
is on. | [
"This",
"method",
"will",
"be",
"called",
"if",
"the",
"debug",
"mode",
"is",
"on",
"."
] | 89e885e8c1322fc5c3e0f79b03a55acdc6e63972 | https://github.com/rhazdon/django-sonic-screwdriver/blob/89e885e8c1322fc5c3e0f79b03a55acdc6e63972/django_sonic_screwdriver/git/git.py#L67-L78 | train |
rhazdon/django-sonic-screwdriver | django_sonic_screwdriver/git/git.py | Git.__git_add | def __git_add(args=''):
"""
Add files to staging.
The function call will return 0 if the command success.
"""
command = ['git', 'add', '.']
Shell.msg('Adding files...')
if APISettings.DEBUG:
Git.__debug(command, True)
for key in args:
command.append(key)
if not call(command):
pass
return False | python | def __git_add(args=''):
"""
Add files to staging.
The function call will return 0 if the command success.
"""
command = ['git', 'add', '.']
Shell.msg('Adding files...')
if APISettings.DEBUG:
Git.__debug(command, True)
for key in args:
command.append(key)
if not call(command):
pass
return False | [
"def",
"__git_add",
"(",
"args",
"=",
"''",
")",
":",
"command",
"=",
"[",
"'git'",
",",
"'add'",
",",
"'.'",
"]",
"Shell",
".",
"msg",
"(",
"'Adding files...'",
")",
"if",
"APISettings",
".",
"DEBUG",
":",
"Git",
".",
"__debug",
"(",
"command",
",",
"True",
")",
"for",
"key",
"in",
"args",
":",
"command",
".",
"append",
"(",
"key",
")",
"if",
"not",
"call",
"(",
"command",
")",
":",
"pass",
"return",
"False"
] | Add files to staging.
The function call will return 0 if the command success. | [
"Add",
"files",
"to",
"staging",
".",
"The",
"function",
"call",
"will",
"return",
"0",
"if",
"the",
"command",
"success",
"."
] | 89e885e8c1322fc5c3e0f79b03a55acdc6e63972 | https://github.com/rhazdon/django-sonic-screwdriver/blob/89e885e8c1322fc5c3e0f79b03a55acdc6e63972/django_sonic_screwdriver/git/git.py#L83-L98 | train |
rhazdon/django-sonic-screwdriver | django_sonic_screwdriver/git/git.py | Git.__git_commit | def __git_commit(git_tag):
"""
Commit files to branch.
The function call will return 0 if the command success.
"""
Shell.msg('Commit changes.')
if APISettings.DEBUG:
Shell.debug('Execute "git commit" in dry mode.')
if not call(['git', 'commit', '-m', '\'' + git_tag + '\'', '--dry-run']):
pass
return True
if not call(['git', 'commit', '-m', '\'' + git_tag + '\'']):
return True
return False | python | def __git_commit(git_tag):
"""
Commit files to branch.
The function call will return 0 if the command success.
"""
Shell.msg('Commit changes.')
if APISettings.DEBUG:
Shell.debug('Execute "git commit" in dry mode.')
if not call(['git', 'commit', '-m', '\'' + git_tag + '\'', '--dry-run']):
pass
return True
if not call(['git', 'commit', '-m', '\'' + git_tag + '\'']):
return True
return False | [
"def",
"__git_commit",
"(",
"git_tag",
")",
":",
"Shell",
".",
"msg",
"(",
"'Commit changes.'",
")",
"if",
"APISettings",
".",
"DEBUG",
":",
"Shell",
".",
"debug",
"(",
"'Execute \"git commit\" in dry mode.'",
")",
"if",
"not",
"call",
"(",
"[",
"'git'",
",",
"'commit'",
",",
"'-m'",
",",
"'\\''",
"+",
"git_tag",
"+",
"'\\''",
",",
"'--dry-run'",
"]",
")",
":",
"pass",
"return",
"True",
"if",
"not",
"call",
"(",
"[",
"'git'",
",",
"'commit'",
",",
"'-m'",
",",
"'\\''",
"+",
"git_tag",
"+",
"'\\''",
"]",
")",
":",
"return",
"True",
"return",
"False"
] | Commit files to branch.
The function call will return 0 if the command success. | [
"Commit",
"files",
"to",
"branch",
".",
"The",
"function",
"call",
"will",
"return",
"0",
"if",
"the",
"command",
"success",
"."
] | 89e885e8c1322fc5c3e0f79b03a55acdc6e63972 | https://github.com/rhazdon/django-sonic-screwdriver/blob/89e885e8c1322fc5c3e0f79b03a55acdc6e63972/django_sonic_screwdriver/git/git.py#L117-L131 | train |
rhazdon/django-sonic-screwdriver | django_sonic_screwdriver/git/git.py | Git.__git_tag | def __git_tag(git_tag):
"""
Create new tag.
The function call will return 0 if the command success.
"""
command = ['git', 'tag', '-a', git_tag, '-m', '\'' + git_tag + '\'']
Shell.msg('Create tag from version ' + git_tag)
if APISettings.DEBUG:
Git.__debug(command, False)
if not call(command):
return True
return False | python | def __git_tag(git_tag):
"""
Create new tag.
The function call will return 0 if the command success.
"""
command = ['git', 'tag', '-a', git_tag, '-m', '\'' + git_tag + '\'']
Shell.msg('Create tag from version ' + git_tag)
if APISettings.DEBUG:
Git.__debug(command, False)
if not call(command):
return True
return False | [
"def",
"__git_tag",
"(",
"git_tag",
")",
":",
"command",
"=",
"[",
"'git'",
",",
"'tag'",
",",
"'-a'",
",",
"git_tag",
",",
"'-m'",
",",
"'\\''",
"+",
"git_tag",
"+",
"'\\''",
"]",
"Shell",
".",
"msg",
"(",
"'Create tag from version '",
"+",
"git_tag",
")",
"if",
"APISettings",
".",
"DEBUG",
":",
"Git",
".",
"__debug",
"(",
"command",
",",
"False",
")",
"if",
"not",
"call",
"(",
"command",
")",
":",
"return",
"True",
"return",
"False"
] | Create new tag.
The function call will return 0 if the command success. | [
"Create",
"new",
"tag",
".",
"The",
"function",
"call",
"will",
"return",
"0",
"if",
"the",
"command",
"success",
"."
] | 89e885e8c1322fc5c3e0f79b03a55acdc6e63972 | https://github.com/rhazdon/django-sonic-screwdriver/blob/89e885e8c1322fc5c3e0f79b03a55acdc6e63972/django_sonic_screwdriver/git/git.py#L135-L148 | train |
rhazdon/django-sonic-screwdriver | django_sonic_screwdriver/git/git.py | Git.__git_tag_push | def __git_tag_push():
"""
Push all tags.
The function call will return 0 if the command success.
"""
command = ['git', 'push', 'origin', '--tags']
Shell.msg('Pushing tags...')
if APISettings.DEBUG:
Git.__debug(command, True)
if not call(command):
return True
return False | python | def __git_tag_push():
"""
Push all tags.
The function call will return 0 if the command success.
"""
command = ['git', 'push', 'origin', '--tags']
Shell.msg('Pushing tags...')
if APISettings.DEBUG:
Git.__debug(command, True)
if not call(command):
return True
return False | [
"def",
"__git_tag_push",
"(",
")",
":",
"command",
"=",
"[",
"'git'",
",",
"'push'",
",",
"'origin'",
",",
"'--tags'",
"]",
"Shell",
".",
"msg",
"(",
"'Pushing tags...'",
")",
"if",
"APISettings",
".",
"DEBUG",
":",
"Git",
".",
"__debug",
"(",
"command",
",",
"True",
")",
"if",
"not",
"call",
"(",
"command",
")",
":",
"return",
"True",
"return",
"False"
] | Push all tags.
The function call will return 0 if the command success. | [
"Push",
"all",
"tags",
".",
"The",
"function",
"call",
"will",
"return",
"0",
"if",
"the",
"command",
"success",
"."
] | 89e885e8c1322fc5c3e0f79b03a55acdc6e63972 | https://github.com/rhazdon/django-sonic-screwdriver/blob/89e885e8c1322fc5c3e0f79b03a55acdc6e63972/django_sonic_screwdriver/git/git.py#L169-L182 | train |
YuriyGuts/pygoose | pygoose/kg/jobs.py | split_into_batches | def split_into_batches(input_list, batch_size, batch_storage_dir, checkpoint=False):
"""
Break the input data into smaller batches, optionally saving each one to disk.
Args:
input_list: An input object that has a list-like interface (indexing and slicing).
batch_size: The maximum number of input items in each batch.
batch_storage_dir: The directory to save the checkpoints to.
checkpoint: Whether to save each batch to a file.
Returns:
A list of batch objects with the following structure:
{'index', 'data', 'input_filename', 'result_filename'}
"""
if checkpoint and not os.path.exists(batch_storage_dir):
os.mkdir(batch_storage_dir)
batches = [
{
'index': batch_index,
'data': input_list[start_index:start_index + batch_size],
'input_filename': os.path.join(batch_storage_dir, 'batch-{:05d}-input.pickle'.format(batch_index)),
'result_filename': os.path.join(batch_storage_dir, 'batch-{:05d}-output.pickle'.format(batch_index)),
}
for batch_index, start_index in enumerate(range(0, len(input_list), batch_size))
]
if checkpoint:
for batch in batches:
save(batch['data'], batch['input_filename'])
return batches | python | def split_into_batches(input_list, batch_size, batch_storage_dir, checkpoint=False):
"""
Break the input data into smaller batches, optionally saving each one to disk.
Args:
input_list: An input object that has a list-like interface (indexing and slicing).
batch_size: The maximum number of input items in each batch.
batch_storage_dir: The directory to save the checkpoints to.
checkpoint: Whether to save each batch to a file.
Returns:
A list of batch objects with the following structure:
{'index', 'data', 'input_filename', 'result_filename'}
"""
if checkpoint and not os.path.exists(batch_storage_dir):
os.mkdir(batch_storage_dir)
batches = [
{
'index': batch_index,
'data': input_list[start_index:start_index + batch_size],
'input_filename': os.path.join(batch_storage_dir, 'batch-{:05d}-input.pickle'.format(batch_index)),
'result_filename': os.path.join(batch_storage_dir, 'batch-{:05d}-output.pickle'.format(batch_index)),
}
for batch_index, start_index in enumerate(range(0, len(input_list), batch_size))
]
if checkpoint:
for batch in batches:
save(batch['data'], batch['input_filename'])
return batches | [
"def",
"split_into_batches",
"(",
"input_list",
",",
"batch_size",
",",
"batch_storage_dir",
",",
"checkpoint",
"=",
"False",
")",
":",
"if",
"checkpoint",
"and",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"batch_storage_dir",
")",
":",
"os",
".",
"mkdir",
"(",
"batch_storage_dir",
")",
"batches",
"=",
"[",
"{",
"'index'",
":",
"batch_index",
",",
"'data'",
":",
"input_list",
"[",
"start_index",
":",
"start_index",
"+",
"batch_size",
"]",
",",
"'input_filename'",
":",
"os",
".",
"path",
".",
"join",
"(",
"batch_storage_dir",
",",
"'batch-{:05d}-input.pickle'",
".",
"format",
"(",
"batch_index",
")",
")",
",",
"'result_filename'",
":",
"os",
".",
"path",
".",
"join",
"(",
"batch_storage_dir",
",",
"'batch-{:05d}-output.pickle'",
".",
"format",
"(",
"batch_index",
")",
")",
",",
"}",
"for",
"batch_index",
",",
"start_index",
"in",
"enumerate",
"(",
"range",
"(",
"0",
",",
"len",
"(",
"input_list",
")",
",",
"batch_size",
")",
")",
"]",
"if",
"checkpoint",
":",
"for",
"batch",
"in",
"batches",
":",
"save",
"(",
"batch",
"[",
"'data'",
"]",
",",
"batch",
"[",
"'input_filename'",
"]",
")",
"return",
"batches"
] | Break the input data into smaller batches, optionally saving each one to disk.
Args:
input_list: An input object that has a list-like interface (indexing and slicing).
batch_size: The maximum number of input items in each batch.
batch_storage_dir: The directory to save the checkpoints to.
checkpoint: Whether to save each batch to a file.
Returns:
A list of batch objects with the following structure:
{'index', 'data', 'input_filename', 'result_filename'} | [
"Break",
"the",
"input",
"data",
"into",
"smaller",
"batches",
"optionally",
"saving",
"each",
"one",
"to",
"disk",
"."
] | 4d9b8827c6d6c4b79949d1cd653393498c0bb3c2 | https://github.com/YuriyGuts/pygoose/blob/4d9b8827c6d6c4b79949d1cd653393498c0bb3c2/pygoose/kg/jobs.py#L16-L48 | train |
YuriyGuts/pygoose | pygoose/kg/jobs.py | map_batch_parallel | def map_batch_parallel(input_list, batch_size, item_mapper=None, batch_mapper=None, flatten=True, n_jobs=-1, **kwargs):
"""
Split the data into batches and process each batch in its own thread.
Args:
input_list: An input object that has a list-like interface (indexing and slicing).
item_mapper: (optional) A function to apply to each item in the batch.
batch_mapper: (optional) A function to apply to each batch. Either item_mapper or batch_mapper must be set.
flatten: Whether to unwrap individual batch results or keep them grouped by batch.
n_jobs: The number of parallel processing jobs. -1 will use the number of CPUs on the system.
batch_size: The maximum number of input items in each batch. -1 will store all data as a single batch.
**kwargs: Additional keyword arguments to joblib.Parallel.
Returns:
A list representing the combined output from the mapper function called on all input items of each batch.
"""
# We must specify either how to process each batch or how to process each item.
if item_mapper is None and batch_mapper is None:
raise ValueError('You should specify either batch_mapper or item_mapper.')
if batch_mapper is None:
batch_mapper = _default_batch_mapper
batches = split_into_batches(input_list, batch_size, batch_storage_dir='')
all_batch_results = Parallel(n_jobs=n_jobs, **kwargs)(
delayed(batch_mapper)(batch['data'], item_mapper)
for batch in progressbar(
batches,
desc='Batches',
total=len(batches),
file=sys.stdout,
)
)
# Unwrap the individual batch results if necessary.
if flatten:
final_result = []
for batch_result in all_batch_results:
final_result.extend(batch_result)
else:
final_result = all_batch_results
return final_result | python | def map_batch_parallel(input_list, batch_size, item_mapper=None, batch_mapper=None, flatten=True, n_jobs=-1, **kwargs):
"""
Split the data into batches and process each batch in its own thread.
Args:
input_list: An input object that has a list-like interface (indexing and slicing).
item_mapper: (optional) A function to apply to each item in the batch.
batch_mapper: (optional) A function to apply to each batch. Either item_mapper or batch_mapper must be set.
flatten: Whether to unwrap individual batch results or keep them grouped by batch.
n_jobs: The number of parallel processing jobs. -1 will use the number of CPUs on the system.
batch_size: The maximum number of input items in each batch. -1 will store all data as a single batch.
**kwargs: Additional keyword arguments to joblib.Parallel.
Returns:
A list representing the combined output from the mapper function called on all input items of each batch.
"""
# We must specify either how to process each batch or how to process each item.
if item_mapper is None and batch_mapper is None:
raise ValueError('You should specify either batch_mapper or item_mapper.')
if batch_mapper is None:
batch_mapper = _default_batch_mapper
batches = split_into_batches(input_list, batch_size, batch_storage_dir='')
all_batch_results = Parallel(n_jobs=n_jobs, **kwargs)(
delayed(batch_mapper)(batch['data'], item_mapper)
for batch in progressbar(
batches,
desc='Batches',
total=len(batches),
file=sys.stdout,
)
)
# Unwrap the individual batch results if necessary.
if flatten:
final_result = []
for batch_result in all_batch_results:
final_result.extend(batch_result)
else:
final_result = all_batch_results
return final_result | [
"def",
"map_batch_parallel",
"(",
"input_list",
",",
"batch_size",
",",
"item_mapper",
"=",
"None",
",",
"batch_mapper",
"=",
"None",
",",
"flatten",
"=",
"True",
",",
"n_jobs",
"=",
"-",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"# We must specify either how to process each batch or how to process each item.",
"if",
"item_mapper",
"is",
"None",
"and",
"batch_mapper",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'You should specify either batch_mapper or item_mapper.'",
")",
"if",
"batch_mapper",
"is",
"None",
":",
"batch_mapper",
"=",
"_default_batch_mapper",
"batches",
"=",
"split_into_batches",
"(",
"input_list",
",",
"batch_size",
",",
"batch_storage_dir",
"=",
"''",
")",
"all_batch_results",
"=",
"Parallel",
"(",
"n_jobs",
"=",
"n_jobs",
",",
"*",
"*",
"kwargs",
")",
"(",
"delayed",
"(",
"batch_mapper",
")",
"(",
"batch",
"[",
"'data'",
"]",
",",
"item_mapper",
")",
"for",
"batch",
"in",
"progressbar",
"(",
"batches",
",",
"desc",
"=",
"'Batches'",
",",
"total",
"=",
"len",
"(",
"batches",
")",
",",
"file",
"=",
"sys",
".",
"stdout",
",",
")",
")",
"# Unwrap the individual batch results if necessary.",
"if",
"flatten",
":",
"final_result",
"=",
"[",
"]",
"for",
"batch_result",
"in",
"all_batch_results",
":",
"final_result",
".",
"extend",
"(",
"batch_result",
")",
"else",
":",
"final_result",
"=",
"all_batch_results",
"return",
"final_result"
] | Split the data into batches and process each batch in its own thread.
Args:
input_list: An input object that has a list-like interface (indexing and slicing).
item_mapper: (optional) A function to apply to each item in the batch.
batch_mapper: (optional) A function to apply to each batch. Either item_mapper or batch_mapper must be set.
flatten: Whether to unwrap individual batch results or keep them grouped by batch.
n_jobs: The number of parallel processing jobs. -1 will use the number of CPUs on the system.
batch_size: The maximum number of input items in each batch. -1 will store all data as a single batch.
**kwargs: Additional keyword arguments to joblib.Parallel.
Returns:
A list representing the combined output from the mapper function called on all input items of each batch. | [
"Split",
"the",
"data",
"into",
"batches",
"and",
"process",
"each",
"batch",
"in",
"its",
"own",
"thread",
"."
] | 4d9b8827c6d6c4b79949d1cd653393498c0bb3c2 | https://github.com/YuriyGuts/pygoose/blob/4d9b8827c6d6c4b79949d1cd653393498c0bb3c2/pygoose/kg/jobs.py#L110-L153 | train |
anayjoshi/platypus | platypus/cfg/ast_to_cfg.py | get_cfg | def get_cfg(ast_func):
"""
Traverses the AST and returns the corresponding CFG
:param ast_func: The AST representation of function
:type ast_func: ast.Function
:returns: The CFG representation of the function
:rtype: cfg.Function
"""
cfg_func = cfg.Function()
for ast_var in ast_func.input_variable_list:
cfg_var = cfg_func.get_variable(ast_var.name)
cfg_func.add_input_variable(cfg_var)
for ast_var in ast_func.output_variable_list:
cfg_var = cfg_func.get_variable(ast_var.name)
cfg_func.add_output_variable(cfg_var)
bb_start = cfg.BasicBlock()
cfg_func.add_basic_block(bb_start)
for stmt in ast_func.body:
bb_temp = bb_start
bb_temp = process_cfg(stmt, bb_temp, cfg_func)
cfg_func.clean_up()
cfg_func.add_summary(ast_func.summary)
return cfg_func | python | def get_cfg(ast_func):
"""
Traverses the AST and returns the corresponding CFG
:param ast_func: The AST representation of function
:type ast_func: ast.Function
:returns: The CFG representation of the function
:rtype: cfg.Function
"""
cfg_func = cfg.Function()
for ast_var in ast_func.input_variable_list:
cfg_var = cfg_func.get_variable(ast_var.name)
cfg_func.add_input_variable(cfg_var)
for ast_var in ast_func.output_variable_list:
cfg_var = cfg_func.get_variable(ast_var.name)
cfg_func.add_output_variable(cfg_var)
bb_start = cfg.BasicBlock()
cfg_func.add_basic_block(bb_start)
for stmt in ast_func.body:
bb_temp = bb_start
bb_temp = process_cfg(stmt, bb_temp, cfg_func)
cfg_func.clean_up()
cfg_func.add_summary(ast_func.summary)
return cfg_func | [
"def",
"get_cfg",
"(",
"ast_func",
")",
":",
"cfg_func",
"=",
"cfg",
".",
"Function",
"(",
")",
"for",
"ast_var",
"in",
"ast_func",
".",
"input_variable_list",
":",
"cfg_var",
"=",
"cfg_func",
".",
"get_variable",
"(",
"ast_var",
".",
"name",
")",
"cfg_func",
".",
"add_input_variable",
"(",
"cfg_var",
")",
"for",
"ast_var",
"in",
"ast_func",
".",
"output_variable_list",
":",
"cfg_var",
"=",
"cfg_func",
".",
"get_variable",
"(",
"ast_var",
".",
"name",
")",
"cfg_func",
".",
"add_output_variable",
"(",
"cfg_var",
")",
"bb_start",
"=",
"cfg",
".",
"BasicBlock",
"(",
")",
"cfg_func",
".",
"add_basic_block",
"(",
"bb_start",
")",
"for",
"stmt",
"in",
"ast_func",
".",
"body",
":",
"bb_temp",
"=",
"bb_start",
"bb_temp",
"=",
"process_cfg",
"(",
"stmt",
",",
"bb_temp",
",",
"cfg_func",
")",
"cfg_func",
".",
"clean_up",
"(",
")",
"cfg_func",
".",
"add_summary",
"(",
"ast_func",
".",
"summary",
")",
"return",
"cfg_func"
] | Traverses the AST and returns the corresponding CFG
:param ast_func: The AST representation of function
:type ast_func: ast.Function
:returns: The CFG representation of the function
:rtype: cfg.Function | [
"Traverses",
"the",
"AST",
"and",
"returns",
"the",
"corresponding",
"CFG"
] | 71712f58c99651efbd2e6dfd75a9b1228d42e9ef | https://github.com/anayjoshi/platypus/blob/71712f58c99651efbd2e6dfd75a9b1228d42e9ef/platypus/cfg/ast_to_cfg.py#L4-L28 | train |
Erotemic/utool | utool/util_dev.py | overrideable_partial | def overrideable_partial(func, *args, **default_kwargs):
""" like partial, but given kwargs can be overrideden at calltime """
import functools
@functools.wraps(func)
def partial_wrapper(*given_args, **given_kwargs):
kwargs = default_kwargs.copy()
kwargs.update(given_kwargs)
return func(*(args + given_args), **kwargs)
return partial_wrapper | python | def overrideable_partial(func, *args, **default_kwargs):
""" like partial, but given kwargs can be overrideden at calltime """
import functools
@functools.wraps(func)
def partial_wrapper(*given_args, **given_kwargs):
kwargs = default_kwargs.copy()
kwargs.update(given_kwargs)
return func(*(args + given_args), **kwargs)
return partial_wrapper | [
"def",
"overrideable_partial",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"default_kwargs",
")",
":",
"import",
"functools",
"@",
"functools",
".",
"wraps",
"(",
"func",
")",
"def",
"partial_wrapper",
"(",
"*",
"given_args",
",",
"*",
"*",
"given_kwargs",
")",
":",
"kwargs",
"=",
"default_kwargs",
".",
"copy",
"(",
")",
"kwargs",
".",
"update",
"(",
"given_kwargs",
")",
"return",
"func",
"(",
"*",
"(",
"args",
"+",
"given_args",
")",
",",
"*",
"*",
"kwargs",
")",
"return",
"partial_wrapper"
] | like partial, but given kwargs can be overrideden at calltime | [
"like",
"partial",
"but",
"given",
"kwargs",
"can",
"be",
"overrideden",
"at",
"calltime"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L40-L48 | train |
Erotemic/utool | utool/util_dev.py | get_nonconflicting_string | def get_nonconflicting_string(base_fmtstr, conflict_set, offset=0):
"""
gets a new string that wont conflict with something that already exists
Args:
base_fmtstr (str):
conflict_set (set):
CommandLine:
python -m utool.util_dev --test-get_nonconflicting_string
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> # build test data
>>> base_fmtstr = 'somestring%d'
>>> conflict_set = ['somestring0']
>>> # execute function
>>> result = get_nonconflicting_string(base_fmtstr, conflict_set)
>>> # verify results
>>> print(result)
somestring1
"""
# Infinite loop until we find a non-conflict
conflict_set_ = set(conflict_set)
for count in it.count(offset):
base_str = base_fmtstr % count
if base_str not in conflict_set_:
return base_str | python | def get_nonconflicting_string(base_fmtstr, conflict_set, offset=0):
"""
gets a new string that wont conflict with something that already exists
Args:
base_fmtstr (str):
conflict_set (set):
CommandLine:
python -m utool.util_dev --test-get_nonconflicting_string
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> # build test data
>>> base_fmtstr = 'somestring%d'
>>> conflict_set = ['somestring0']
>>> # execute function
>>> result = get_nonconflicting_string(base_fmtstr, conflict_set)
>>> # verify results
>>> print(result)
somestring1
"""
# Infinite loop until we find a non-conflict
conflict_set_ = set(conflict_set)
for count in it.count(offset):
base_str = base_fmtstr % count
if base_str not in conflict_set_:
return base_str | [
"def",
"get_nonconflicting_string",
"(",
"base_fmtstr",
",",
"conflict_set",
",",
"offset",
"=",
"0",
")",
":",
"# Infinite loop until we find a non-conflict",
"conflict_set_",
"=",
"set",
"(",
"conflict_set",
")",
"for",
"count",
"in",
"it",
".",
"count",
"(",
"offset",
")",
":",
"base_str",
"=",
"base_fmtstr",
"%",
"count",
"if",
"base_str",
"not",
"in",
"conflict_set_",
":",
"return",
"base_str"
] | gets a new string that wont conflict with something that already exists
Args:
base_fmtstr (str):
conflict_set (set):
CommandLine:
python -m utool.util_dev --test-get_nonconflicting_string
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> # build test data
>>> base_fmtstr = 'somestring%d'
>>> conflict_set = ['somestring0']
>>> # execute function
>>> result = get_nonconflicting_string(base_fmtstr, conflict_set)
>>> # verify results
>>> print(result)
somestring1 | [
"gets",
"a",
"new",
"string",
"that",
"wont",
"conflict",
"with",
"something",
"that",
"already",
"exists"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L147-L175 | train |
Erotemic/utool | utool/util_dev.py | get_nonconflicting_path_old | def get_nonconflicting_path_old(base_fmtstr, dpath, offset=0):
r"""
base_fmtstr must have a %d in it
"""
import utool as ut
from os.path import basename
pattern = '*'
dname_list = ut.glob(dpath, pattern, recursive=False,
with_files=True, with_dirs=True)
conflict_set = set([basename(dname) for dname in dname_list])
newname = ut.get_nonconflicting_string(base_fmtstr, conflict_set,
offset=offset)
newpath = join(dpath, newname)
return newpath | python | def get_nonconflicting_path_old(base_fmtstr, dpath, offset=0):
r"""
base_fmtstr must have a %d in it
"""
import utool as ut
from os.path import basename
pattern = '*'
dname_list = ut.glob(dpath, pattern, recursive=False,
with_files=True, with_dirs=True)
conflict_set = set([basename(dname) for dname in dname_list])
newname = ut.get_nonconflicting_string(base_fmtstr, conflict_set,
offset=offset)
newpath = join(dpath, newname)
return newpath | [
"def",
"get_nonconflicting_path_old",
"(",
"base_fmtstr",
",",
"dpath",
",",
"offset",
"=",
"0",
")",
":",
"import",
"utool",
"as",
"ut",
"from",
"os",
".",
"path",
"import",
"basename",
"pattern",
"=",
"'*'",
"dname_list",
"=",
"ut",
".",
"glob",
"(",
"dpath",
",",
"pattern",
",",
"recursive",
"=",
"False",
",",
"with_files",
"=",
"True",
",",
"with_dirs",
"=",
"True",
")",
"conflict_set",
"=",
"set",
"(",
"[",
"basename",
"(",
"dname",
")",
"for",
"dname",
"in",
"dname_list",
"]",
")",
"newname",
"=",
"ut",
".",
"get_nonconflicting_string",
"(",
"base_fmtstr",
",",
"conflict_set",
",",
"offset",
"=",
"offset",
")",
"newpath",
"=",
"join",
"(",
"dpath",
",",
"newname",
")",
"return",
"newpath"
] | r"""
base_fmtstr must have a %d in it | [
"r",
"base_fmtstr",
"must",
"have",
"a",
"%d",
"in",
"it"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L178-L193 | train |
Erotemic/utool | utool/util_dev.py | are_you_sure | def are_you_sure(msg=''):
r"""
Prompts user to accept or checks command line for -y
Args:
msg (str):
Returns:
bool: accept or not
"""
print(msg)
from utool import util_arg
from utool import util_str
override = util_arg.get_argflag(('--yes', '--y', '-y'))
if override:
print('accepting based on command line flag')
return True
valid_ans = ['yes', 'y']
valid_prompt = util_str.conj_phrase(valid_ans, 'or')
ans = input('Are you sure?\n Enter %s to accept\n' % valid_prompt)
return ans.lower() in valid_ans | python | def are_you_sure(msg=''):
r"""
Prompts user to accept or checks command line for -y
Args:
msg (str):
Returns:
bool: accept or not
"""
print(msg)
from utool import util_arg
from utool import util_str
override = util_arg.get_argflag(('--yes', '--y', '-y'))
if override:
print('accepting based on command line flag')
return True
valid_ans = ['yes', 'y']
valid_prompt = util_str.conj_phrase(valid_ans, 'or')
ans = input('Are you sure?\n Enter %s to accept\n' % valid_prompt)
return ans.lower() in valid_ans | [
"def",
"are_you_sure",
"(",
"msg",
"=",
"''",
")",
":",
"print",
"(",
"msg",
")",
"from",
"utool",
"import",
"util_arg",
"from",
"utool",
"import",
"util_str",
"override",
"=",
"util_arg",
".",
"get_argflag",
"(",
"(",
"'--yes'",
",",
"'--y'",
",",
"'-y'",
")",
")",
"if",
"override",
":",
"print",
"(",
"'accepting based on command line flag'",
")",
"return",
"True",
"valid_ans",
"=",
"[",
"'yes'",
",",
"'y'",
"]",
"valid_prompt",
"=",
"util_str",
".",
"conj_phrase",
"(",
"valid_ans",
",",
"'or'",
")",
"ans",
"=",
"input",
"(",
"'Are you sure?\\n Enter %s to accept\\n'",
"%",
"valid_prompt",
")",
"return",
"ans",
".",
"lower",
"(",
")",
"in",
"valid_ans"
] | r"""
Prompts user to accept or checks command line for -y
Args:
msg (str):
Returns:
bool: accept or not | [
"r",
"Prompts",
"user",
"to",
"accept",
"or",
"checks",
"command",
"line",
"for",
"-",
"y"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L1170-L1190 | train |
Erotemic/utool | utool/util_dev.py | grace_period | def grace_period(msg='', seconds=10):
"""
Gives user a window to stop a process before it happens
"""
import time
print(msg)
override = util_arg.get_argflag(('--yes', '--y', '-y'))
print('starting grace period')
if override:
print('ending based on command line flag')
return True
for count in reversed(range(1, seconds + 1)):
time.sleep(1)
print('%d' % (count,))
print('%d' % (0,))
print('grace period is over')
return True | python | def grace_period(msg='', seconds=10):
"""
Gives user a window to stop a process before it happens
"""
import time
print(msg)
override = util_arg.get_argflag(('--yes', '--y', '-y'))
print('starting grace period')
if override:
print('ending based on command line flag')
return True
for count in reversed(range(1, seconds + 1)):
time.sleep(1)
print('%d' % (count,))
print('%d' % (0,))
print('grace period is over')
return True | [
"def",
"grace_period",
"(",
"msg",
"=",
"''",
",",
"seconds",
"=",
"10",
")",
":",
"import",
"time",
"print",
"(",
"msg",
")",
"override",
"=",
"util_arg",
".",
"get_argflag",
"(",
"(",
"'--yes'",
",",
"'--y'",
",",
"'-y'",
")",
")",
"print",
"(",
"'starting grace period'",
")",
"if",
"override",
":",
"print",
"(",
"'ending based on command line flag'",
")",
"return",
"True",
"for",
"count",
"in",
"reversed",
"(",
"range",
"(",
"1",
",",
"seconds",
"+",
"1",
")",
")",
":",
"time",
".",
"sleep",
"(",
"1",
")",
"print",
"(",
"'%d'",
"%",
"(",
"count",
",",
")",
")",
"print",
"(",
"'%d'",
"%",
"(",
"0",
",",
")",
")",
"print",
"(",
"'grace period is over'",
")",
"return",
"True"
] | Gives user a window to stop a process before it happens | [
"Gives",
"user",
"a",
"window",
"to",
"stop",
"a",
"process",
"before",
"it",
"happens"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L1193-L1209 | train |
Erotemic/utool | utool/util_dev.py | delayed_retry_gen | def delayed_retry_gen(delay_schedule=[.1, 1, 10], msg=None, timeout=None, raise_=True):
""" template code for a infinte retry loop """
import utool as ut
import time
if not ut.isiterable(delay_schedule):
delay_schedule = [delay_schedule]
tt = ut.tic()
# First attempt is immediate
yield 0
for count in it.count(0):
#print('count = %r' % (count,))
if timeout is not None and ut.toc(tt) > timeout:
if raise_:
raise Exception('Retry loop timed out')
else:
raise StopIteration('Retry loop timed out')
index = min(count, len(delay_schedule) - 1)
delay = delay_schedule[index]
time.sleep(delay)
yield count + 1 | python | def delayed_retry_gen(delay_schedule=[.1, 1, 10], msg=None, timeout=None, raise_=True):
""" template code for a infinte retry loop """
import utool as ut
import time
if not ut.isiterable(delay_schedule):
delay_schedule = [delay_schedule]
tt = ut.tic()
# First attempt is immediate
yield 0
for count in it.count(0):
#print('count = %r' % (count,))
if timeout is not None and ut.toc(tt) > timeout:
if raise_:
raise Exception('Retry loop timed out')
else:
raise StopIteration('Retry loop timed out')
index = min(count, len(delay_schedule) - 1)
delay = delay_schedule[index]
time.sleep(delay)
yield count + 1 | [
"def",
"delayed_retry_gen",
"(",
"delay_schedule",
"=",
"[",
".1",
",",
"1",
",",
"10",
"]",
",",
"msg",
"=",
"None",
",",
"timeout",
"=",
"None",
",",
"raise_",
"=",
"True",
")",
":",
"import",
"utool",
"as",
"ut",
"import",
"time",
"if",
"not",
"ut",
".",
"isiterable",
"(",
"delay_schedule",
")",
":",
"delay_schedule",
"=",
"[",
"delay_schedule",
"]",
"tt",
"=",
"ut",
".",
"tic",
"(",
")",
"# First attempt is immediate",
"yield",
"0",
"for",
"count",
"in",
"it",
".",
"count",
"(",
"0",
")",
":",
"#print('count = %r' % (count,))",
"if",
"timeout",
"is",
"not",
"None",
"and",
"ut",
".",
"toc",
"(",
"tt",
")",
">",
"timeout",
":",
"if",
"raise_",
":",
"raise",
"Exception",
"(",
"'Retry loop timed out'",
")",
"else",
":",
"raise",
"StopIteration",
"(",
"'Retry loop timed out'",
")",
"index",
"=",
"min",
"(",
"count",
",",
"len",
"(",
"delay_schedule",
")",
"-",
"1",
")",
"delay",
"=",
"delay_schedule",
"[",
"index",
"]",
"time",
".",
"sleep",
"(",
"delay",
")",
"yield",
"count",
"+",
"1"
] | template code for a infinte retry loop | [
"template",
"code",
"for",
"a",
"infinte",
"retry",
"loop"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L1212-L1233 | train |
Erotemic/utool | utool/util_dev.py | get_stats_str | def get_stats_str(list_=None, newlines=False, keys=None, exclude_keys=[], lbl=None,
precision=None, axis=0, stat_dict=None, use_nan=False,
align=False, use_median=False, **kwargs):
"""
Returns the string version of get_stats
DEPRICATE in favor of ut.repr3(ut.get_stats(...))
if keys is not None then it only displays chosen keys
excluded keys are always removed
CommandLine:
python -m utool.util_dev --test-get_stats_str
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> list_ = [1, 2, 3, 4, 5]
>>> newlines = False
>>> keys = None
>>> exclude_keys = []
>>> lbl = None
>>> precision = 2
>>> stat_str = get_stats_str(list_, newlines, keys, exclude_keys, lbl, precision)
>>> result = str(stat_str)
>>> print(result)
{'mean': 3, 'std': 1.41, 'max': 5, 'min': 1, 'nMin': 1, 'nMax': 1, 'shape': (5,)}
SeeAlso:
repr2
get_stats
"""
from utool.util_str import repr4
import utool as ut
# Get stats dict
if stat_dict is None:
stat_dict = get_stats(list_, axis=axis, use_nan=use_nan, use_median=use_median)
else:
stat_dict = stat_dict.copy()
# Keep only included keys if specified
if keys is not None:
for key in list(six.iterkeys(stat_dict)):
if key not in keys:
del stat_dict[key]
# Remove excluded keys
for key in exclude_keys:
if key in stat_dict:
del stat_dict[key]
# apply precision
statstr_dict = stat_dict.copy()
#precisionless_types = (bool,) + six.string_types
if precision is not None:
assert ut.is_int(precision), 'precision must be an integer'
float_fmtstr = '%.' + str(precision) + 'f'
for key in list(six.iterkeys(statstr_dict)):
val = statstr_dict[key]
isfloat = ut.is_float(val)
if not isfloat and isinstance(val, list):
type_list = list(map(type, val))
if len(type_list) > 0 and ut.allsame(type_list):
if ut.is_float(val[0]):
isfloat = True
val = np.array(val)
if isfloat:
if isinstance(val, np.ndarray):
strval = str([float_fmtstr % v for v in val]).replace('\'', '').lstrip('u')
#np.array_str((val), precision=precision)
else:
strval = float_fmtstr % val
if not strval.startswith('0'):
strval = strval.rstrip('0')
strval = strval.rstrip('.')
statstr_dict[key] = strval
else:
if isinstance(val, np.ndarray):
strval = repr(val.tolist())
else:
strval = str(val)
statstr_dict[key] = strval
# format the dictionary string
stat_str = repr4(statstr_dict, strvals=True, newlines=newlines)
# add a label if requested
if lbl is True:
lbl = ut.get_varname_from_stack(list_, N=1) # fancy
if lbl is not None:
stat_str = 'stats_' + lbl + ' = ' + stat_str
if align:
stat_str = ut.align(stat_str, ':')
return stat_str | python | def get_stats_str(list_=None, newlines=False, keys=None, exclude_keys=[], lbl=None,
precision=None, axis=0, stat_dict=None, use_nan=False,
align=False, use_median=False, **kwargs):
"""
Returns the string version of get_stats
DEPRICATE in favor of ut.repr3(ut.get_stats(...))
if keys is not None then it only displays chosen keys
excluded keys are always removed
CommandLine:
python -m utool.util_dev --test-get_stats_str
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> list_ = [1, 2, 3, 4, 5]
>>> newlines = False
>>> keys = None
>>> exclude_keys = []
>>> lbl = None
>>> precision = 2
>>> stat_str = get_stats_str(list_, newlines, keys, exclude_keys, lbl, precision)
>>> result = str(stat_str)
>>> print(result)
{'mean': 3, 'std': 1.41, 'max': 5, 'min': 1, 'nMin': 1, 'nMax': 1, 'shape': (5,)}
SeeAlso:
repr2
get_stats
"""
from utool.util_str import repr4
import utool as ut
# Get stats dict
if stat_dict is None:
stat_dict = get_stats(list_, axis=axis, use_nan=use_nan, use_median=use_median)
else:
stat_dict = stat_dict.copy()
# Keep only included keys if specified
if keys is not None:
for key in list(six.iterkeys(stat_dict)):
if key not in keys:
del stat_dict[key]
# Remove excluded keys
for key in exclude_keys:
if key in stat_dict:
del stat_dict[key]
# apply precision
statstr_dict = stat_dict.copy()
#precisionless_types = (bool,) + six.string_types
if precision is not None:
assert ut.is_int(precision), 'precision must be an integer'
float_fmtstr = '%.' + str(precision) + 'f'
for key in list(six.iterkeys(statstr_dict)):
val = statstr_dict[key]
isfloat = ut.is_float(val)
if not isfloat and isinstance(val, list):
type_list = list(map(type, val))
if len(type_list) > 0 and ut.allsame(type_list):
if ut.is_float(val[0]):
isfloat = True
val = np.array(val)
if isfloat:
if isinstance(val, np.ndarray):
strval = str([float_fmtstr % v for v in val]).replace('\'', '').lstrip('u')
#np.array_str((val), precision=precision)
else:
strval = float_fmtstr % val
if not strval.startswith('0'):
strval = strval.rstrip('0')
strval = strval.rstrip('.')
statstr_dict[key] = strval
else:
if isinstance(val, np.ndarray):
strval = repr(val.tolist())
else:
strval = str(val)
statstr_dict[key] = strval
# format the dictionary string
stat_str = repr4(statstr_dict, strvals=True, newlines=newlines)
# add a label if requested
if lbl is True:
lbl = ut.get_varname_from_stack(list_, N=1) # fancy
if lbl is not None:
stat_str = 'stats_' + lbl + ' = ' + stat_str
if align:
stat_str = ut.align(stat_str, ':')
return stat_str | [
"def",
"get_stats_str",
"(",
"list_",
"=",
"None",
",",
"newlines",
"=",
"False",
",",
"keys",
"=",
"None",
",",
"exclude_keys",
"=",
"[",
"]",
",",
"lbl",
"=",
"None",
",",
"precision",
"=",
"None",
",",
"axis",
"=",
"0",
",",
"stat_dict",
"=",
"None",
",",
"use_nan",
"=",
"False",
",",
"align",
"=",
"False",
",",
"use_median",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"utool",
".",
"util_str",
"import",
"repr4",
"import",
"utool",
"as",
"ut",
"# Get stats dict",
"if",
"stat_dict",
"is",
"None",
":",
"stat_dict",
"=",
"get_stats",
"(",
"list_",
",",
"axis",
"=",
"axis",
",",
"use_nan",
"=",
"use_nan",
",",
"use_median",
"=",
"use_median",
")",
"else",
":",
"stat_dict",
"=",
"stat_dict",
".",
"copy",
"(",
")",
"# Keep only included keys if specified",
"if",
"keys",
"is",
"not",
"None",
":",
"for",
"key",
"in",
"list",
"(",
"six",
".",
"iterkeys",
"(",
"stat_dict",
")",
")",
":",
"if",
"key",
"not",
"in",
"keys",
":",
"del",
"stat_dict",
"[",
"key",
"]",
"# Remove excluded keys",
"for",
"key",
"in",
"exclude_keys",
":",
"if",
"key",
"in",
"stat_dict",
":",
"del",
"stat_dict",
"[",
"key",
"]",
"# apply precision",
"statstr_dict",
"=",
"stat_dict",
".",
"copy",
"(",
")",
"#precisionless_types = (bool,) + six.string_types",
"if",
"precision",
"is",
"not",
"None",
":",
"assert",
"ut",
".",
"is_int",
"(",
"precision",
")",
",",
"'precision must be an integer'",
"float_fmtstr",
"=",
"'%.'",
"+",
"str",
"(",
"precision",
")",
"+",
"'f'",
"for",
"key",
"in",
"list",
"(",
"six",
".",
"iterkeys",
"(",
"statstr_dict",
")",
")",
":",
"val",
"=",
"statstr_dict",
"[",
"key",
"]",
"isfloat",
"=",
"ut",
".",
"is_float",
"(",
"val",
")",
"if",
"not",
"isfloat",
"and",
"isinstance",
"(",
"val",
",",
"list",
")",
":",
"type_list",
"=",
"list",
"(",
"map",
"(",
"type",
",",
"val",
")",
")",
"if",
"len",
"(",
"type_list",
")",
">",
"0",
"and",
"ut",
".",
"allsame",
"(",
"type_list",
")",
":",
"if",
"ut",
".",
"is_float",
"(",
"val",
"[",
"0",
"]",
")",
":",
"isfloat",
"=",
"True",
"val",
"=",
"np",
".",
"array",
"(",
"val",
")",
"if",
"isfloat",
":",
"if",
"isinstance",
"(",
"val",
",",
"np",
".",
"ndarray",
")",
":",
"strval",
"=",
"str",
"(",
"[",
"float_fmtstr",
"%",
"v",
"for",
"v",
"in",
"val",
"]",
")",
".",
"replace",
"(",
"'\\''",
",",
"''",
")",
".",
"lstrip",
"(",
"'u'",
")",
"#np.array_str((val), precision=precision)",
"else",
":",
"strval",
"=",
"float_fmtstr",
"%",
"val",
"if",
"not",
"strval",
".",
"startswith",
"(",
"'0'",
")",
":",
"strval",
"=",
"strval",
".",
"rstrip",
"(",
"'0'",
")",
"strval",
"=",
"strval",
".",
"rstrip",
"(",
"'.'",
")",
"statstr_dict",
"[",
"key",
"]",
"=",
"strval",
"else",
":",
"if",
"isinstance",
"(",
"val",
",",
"np",
".",
"ndarray",
")",
":",
"strval",
"=",
"repr",
"(",
"val",
".",
"tolist",
"(",
")",
")",
"else",
":",
"strval",
"=",
"str",
"(",
"val",
")",
"statstr_dict",
"[",
"key",
"]",
"=",
"strval",
"# format the dictionary string",
"stat_str",
"=",
"repr4",
"(",
"statstr_dict",
",",
"strvals",
"=",
"True",
",",
"newlines",
"=",
"newlines",
")",
"# add a label if requested",
"if",
"lbl",
"is",
"True",
":",
"lbl",
"=",
"ut",
".",
"get_varname_from_stack",
"(",
"list_",
",",
"N",
"=",
"1",
")",
"# fancy",
"if",
"lbl",
"is",
"not",
"None",
":",
"stat_str",
"=",
"'stats_'",
"+",
"lbl",
"+",
"' = '",
"+",
"stat_str",
"if",
"align",
":",
"stat_str",
"=",
"ut",
".",
"align",
"(",
"stat_str",
",",
"':'",
")",
"return",
"stat_str"
] | Returns the string version of get_stats
DEPRICATE in favor of ut.repr3(ut.get_stats(...))
if keys is not None then it only displays chosen keys
excluded keys are always removed
CommandLine:
python -m utool.util_dev --test-get_stats_str
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> list_ = [1, 2, 3, 4, 5]
>>> newlines = False
>>> keys = None
>>> exclude_keys = []
>>> lbl = None
>>> precision = 2
>>> stat_str = get_stats_str(list_, newlines, keys, exclude_keys, lbl, precision)
>>> result = str(stat_str)
>>> print(result)
{'mean': 3, 'std': 1.41, 'max': 5, 'min': 1, 'nMin': 1, 'nMax': 1, 'shape': (5,)}
SeeAlso:
repr2
get_stats | [
"Returns",
"the",
"string",
"version",
"of",
"get_stats"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L1430-L1520 | train |
Erotemic/utool | utool/util_dev.py | make_call_graph | def make_call_graph(func, *args, **kwargs):
""" profile with pycallgraph
Example:
pycallgraph graphviz -- ./mypythonscript.py
References:
http://pycallgraph.slowchop.com/en/master/
"""
from pycallgraph import PyCallGraph
from pycallgraph.output import GraphvizOutput
with PyCallGraph(output=GraphvizOutput):
func(*args, **kwargs) | python | def make_call_graph(func, *args, **kwargs):
""" profile with pycallgraph
Example:
pycallgraph graphviz -- ./mypythonscript.py
References:
http://pycallgraph.slowchop.com/en/master/
"""
from pycallgraph import PyCallGraph
from pycallgraph.output import GraphvizOutput
with PyCallGraph(output=GraphvizOutput):
func(*args, **kwargs) | [
"def",
"make_call_graph",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"pycallgraph",
"import",
"PyCallGraph",
"from",
"pycallgraph",
".",
"output",
"import",
"GraphvizOutput",
"with",
"PyCallGraph",
"(",
"output",
"=",
"GraphvizOutput",
")",
":",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | profile with pycallgraph
Example:
pycallgraph graphviz -- ./mypythonscript.py
References:
http://pycallgraph.slowchop.com/en/master/ | [
"profile",
"with",
"pycallgraph"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L1549-L1561 | train |
Erotemic/utool | utool/util_dev.py | _memory_profile | def _memory_profile(with_gc=False):
"""
Helper for memory debugging. Mostly just a namespace where I experiment with
guppy and heapy.
References:
http://stackoverflow.com/questions/2629680/deciding-between-subprocess-multiprocessing-and-thread-in-python
Reset Numpy Memory::
%reset out
%reset array
"""
import utool as ut
if with_gc:
garbage_collect()
import guppy
hp = guppy.hpy()
print('[hpy] Waiting for heap output...')
heap_output = hp.heap()
print(heap_output)
print('[hpy] total heap size: ' + ut.byte_str2(heap_output.size))
ut.util_resources.memstats() | python | def _memory_profile(with_gc=False):
"""
Helper for memory debugging. Mostly just a namespace where I experiment with
guppy and heapy.
References:
http://stackoverflow.com/questions/2629680/deciding-between-subprocess-multiprocessing-and-thread-in-python
Reset Numpy Memory::
%reset out
%reset array
"""
import utool as ut
if with_gc:
garbage_collect()
import guppy
hp = guppy.hpy()
print('[hpy] Waiting for heap output...')
heap_output = hp.heap()
print(heap_output)
print('[hpy] total heap size: ' + ut.byte_str2(heap_output.size))
ut.util_resources.memstats() | [
"def",
"_memory_profile",
"(",
"with_gc",
"=",
"False",
")",
":",
"import",
"utool",
"as",
"ut",
"if",
"with_gc",
":",
"garbage_collect",
"(",
")",
"import",
"guppy",
"hp",
"=",
"guppy",
".",
"hpy",
"(",
")",
"print",
"(",
"'[hpy] Waiting for heap output...'",
")",
"heap_output",
"=",
"hp",
".",
"heap",
"(",
")",
"print",
"(",
"heap_output",
")",
"print",
"(",
"'[hpy] total heap size: '",
"+",
"ut",
".",
"byte_str2",
"(",
"heap_output",
".",
"size",
")",
")",
"ut",
".",
"util_resources",
".",
"memstats",
"(",
")"
] | Helper for memory debugging. Mostly just a namespace where I experiment with
guppy and heapy.
References:
http://stackoverflow.com/questions/2629680/deciding-between-subprocess-multiprocessing-and-thread-in-python
Reset Numpy Memory::
%reset out
%reset array | [
"Helper",
"for",
"memory",
"debugging",
".",
"Mostly",
"just",
"a",
"namespace",
"where",
"I",
"experiment",
"with",
"guppy",
"and",
"heapy",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L1586-L1607 | train |
Erotemic/utool | utool/util_dev.py | make_object_graph | def make_object_graph(obj, fpath='sample_graph.png'):
""" memoryprofile with objgraph
Examples:
#import objgraph
#objgraph.show_most_common_types()
#objgraph.show_growth()
#memtrack.report()
#memtrack.report()
#objgraph.show_growth()
#import gc
#gc.collect()
#memtrack.report()
#y = 0
#objgraph.show_growth()
#memtrack.report()
#utool.embed()
References:
http://mg.pov.lt/objgraph/
"""
import objgraph
objgraph.show_most_common_types()
#print(objgraph.by_type('ndarray'))
#objgraph.find_backref_chain(
# random.choice(objgraph.by_type('ndarray')),
# objgraph.is_proper_module)
objgraph.show_refs([obj], filename='ref_graph.png')
objgraph.show_backrefs([obj], filename='backref_graph.png') | python | def make_object_graph(obj, fpath='sample_graph.png'):
""" memoryprofile with objgraph
Examples:
#import objgraph
#objgraph.show_most_common_types()
#objgraph.show_growth()
#memtrack.report()
#memtrack.report()
#objgraph.show_growth()
#import gc
#gc.collect()
#memtrack.report()
#y = 0
#objgraph.show_growth()
#memtrack.report()
#utool.embed()
References:
http://mg.pov.lt/objgraph/
"""
import objgraph
objgraph.show_most_common_types()
#print(objgraph.by_type('ndarray'))
#objgraph.find_backref_chain(
# random.choice(objgraph.by_type('ndarray')),
# objgraph.is_proper_module)
objgraph.show_refs([obj], filename='ref_graph.png')
objgraph.show_backrefs([obj], filename='backref_graph.png') | [
"def",
"make_object_graph",
"(",
"obj",
",",
"fpath",
"=",
"'sample_graph.png'",
")",
":",
"import",
"objgraph",
"objgraph",
".",
"show_most_common_types",
"(",
")",
"#print(objgraph.by_type('ndarray'))",
"#objgraph.find_backref_chain(",
"# random.choice(objgraph.by_type('ndarray')),",
"# objgraph.is_proper_module)",
"objgraph",
".",
"show_refs",
"(",
"[",
"obj",
"]",
",",
"filename",
"=",
"'ref_graph.png'",
")",
"objgraph",
".",
"show_backrefs",
"(",
"[",
"obj",
"]",
",",
"filename",
"=",
"'backref_graph.png'",
")"
] | memoryprofile with objgraph
Examples:
#import objgraph
#objgraph.show_most_common_types()
#objgraph.show_growth()
#memtrack.report()
#memtrack.report()
#objgraph.show_growth()
#import gc
#gc.collect()
#memtrack.report()
#y = 0
#objgraph.show_growth()
#memtrack.report()
#utool.embed()
References:
http://mg.pov.lt/objgraph/ | [
"memoryprofile",
"with",
"objgraph"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L1612-L1640 | train |
Erotemic/utool | utool/util_dev.py | inverable_unique_two_lists | def inverable_unique_two_lists(item1_list, item2_list):
"""
item1_list = aid1_list
item2_list = aid2_list
"""
import utool as ut
unique_list1, inverse1 = np.unique(item1_list, return_inverse=True)
unique_list2, inverse2 = np.unique(item2_list, return_inverse=True)
flat_stacked, cumsum = ut.invertible_flatten2((unique_list1, unique_list2))
flat_unique, inverse3 = np.unique(flat_stacked, return_inverse=True)
reconstruct_tup = (inverse3, cumsum, inverse2, inverse1)
return flat_unique, reconstruct_tup | python | def inverable_unique_two_lists(item1_list, item2_list):
"""
item1_list = aid1_list
item2_list = aid2_list
"""
import utool as ut
unique_list1, inverse1 = np.unique(item1_list, return_inverse=True)
unique_list2, inverse2 = np.unique(item2_list, return_inverse=True)
flat_stacked, cumsum = ut.invertible_flatten2((unique_list1, unique_list2))
flat_unique, inverse3 = np.unique(flat_stacked, return_inverse=True)
reconstruct_tup = (inverse3, cumsum, inverse2, inverse1)
return flat_unique, reconstruct_tup | [
"def",
"inverable_unique_two_lists",
"(",
"item1_list",
",",
"item2_list",
")",
":",
"import",
"utool",
"as",
"ut",
"unique_list1",
",",
"inverse1",
"=",
"np",
".",
"unique",
"(",
"item1_list",
",",
"return_inverse",
"=",
"True",
")",
"unique_list2",
",",
"inverse2",
"=",
"np",
".",
"unique",
"(",
"item2_list",
",",
"return_inverse",
"=",
"True",
")",
"flat_stacked",
",",
"cumsum",
"=",
"ut",
".",
"invertible_flatten2",
"(",
"(",
"unique_list1",
",",
"unique_list2",
")",
")",
"flat_unique",
",",
"inverse3",
"=",
"np",
".",
"unique",
"(",
"flat_stacked",
",",
"return_inverse",
"=",
"True",
")",
"reconstruct_tup",
"=",
"(",
"inverse3",
",",
"cumsum",
",",
"inverse2",
",",
"inverse1",
")",
"return",
"flat_unique",
",",
"reconstruct_tup"
] | item1_list = aid1_list
item2_list = aid2_list | [
"item1_list",
"=",
"aid1_list",
"item2_list",
"=",
"aid2_list"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L2151-L2163 | train |
Erotemic/utool | utool/util_dev.py | uninvert_unique_two_lists | def uninvert_unique_two_lists(flat_list, reconstruct_tup):
"""
flat_list = thumb_list
"""
import utool as ut
(inverse3, cumsum, inverse2, inverse1) = reconstruct_tup
flat_stacked_ = ut.take(flat_list, inverse3)
unique_list1_, unique_list2_ = ut.unflatten2(flat_stacked_, cumsum)
res_list1_ = ut.take(unique_list1_, inverse1)
res_list2_ = ut.take(unique_list2_, inverse2)
return res_list1_, res_list2_ | python | def uninvert_unique_two_lists(flat_list, reconstruct_tup):
"""
flat_list = thumb_list
"""
import utool as ut
(inverse3, cumsum, inverse2, inverse1) = reconstruct_tup
flat_stacked_ = ut.take(flat_list, inverse3)
unique_list1_, unique_list2_ = ut.unflatten2(flat_stacked_, cumsum)
res_list1_ = ut.take(unique_list1_, inverse1)
res_list2_ = ut.take(unique_list2_, inverse2)
return res_list1_, res_list2_ | [
"def",
"uninvert_unique_two_lists",
"(",
"flat_list",
",",
"reconstruct_tup",
")",
":",
"import",
"utool",
"as",
"ut",
"(",
"inverse3",
",",
"cumsum",
",",
"inverse2",
",",
"inverse1",
")",
"=",
"reconstruct_tup",
"flat_stacked_",
"=",
"ut",
".",
"take",
"(",
"flat_list",
",",
"inverse3",
")",
"unique_list1_",
",",
"unique_list2_",
"=",
"ut",
".",
"unflatten2",
"(",
"flat_stacked_",
",",
"cumsum",
")",
"res_list1_",
"=",
"ut",
".",
"take",
"(",
"unique_list1_",
",",
"inverse1",
")",
"res_list2_",
"=",
"ut",
".",
"take",
"(",
"unique_list2_",
",",
"inverse2",
")",
"return",
"res_list1_",
",",
"res_list2_"
] | flat_list = thumb_list | [
"flat_list",
"=",
"thumb_list"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L2166-L2176 | train |
Erotemic/utool | utool/util_dev.py | search_module | def search_module(mod, pat, ignore_case=True, recursive=False, _seen=None):
r"""
Searches module functions, classes, and constants for members matching a
pattern.
Args:
mod (module): live python module
pat (str): regular expression
Returns:
list: found_list
CommandLine:
python -m utool.util_dev --exec-search_module --mod=utool --pat=module
python -m utool.util_dev --exec-search_module --mod=opengm --pat=cut
python -m utool.util_dev --exec-search_module --mod=opengm --pat=multi
python -m utool.util_dev --exec-search_module --mod=plottool --pat=networkx
python -m utool.util_dev --exec-search_module --mod=utool --pat=Levenshtein
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> import utool as ut
>>> recursive = True
>>> ignore_case = True
>>> modname = ut.get_argval('--mod', type_=str, default='utool')
>>> pat = ut.get_argval('--pat', type_=str, default='search')
>>> mod = ut.import_modname(modname)
>>> print('pat = %r' % (pat,))
>>> print('mod = %r' % (mod,))
>>> found_list = search_module(mod, pat, recursive=recursive)
>>> result = ('found_list = %s' % (ut.repr2(found_list),))
>>> print(result)
Ignore:
mod = cv2
pat = 'freak'
"""
if _seen is not None and mod in _seen:
return []
import utool as ut
reflags = re.IGNORECASE * ignore_case
found_list = [name for name in dir(mod) if re.search(pat, name, flags=reflags)]
if recursive:
if _seen is None:
_seen = set()
_seen.add(mod)
module_attrs = [getattr(mod, name) for name in dir(mod)]
submodules = [
submod for submod in module_attrs
if isinstance(submod, types.ModuleType) and submod not in _seen and
ut.is_defined_by_module(submod, mod)
]
for submod in submodules:
found_list += search_module(submod, pat, ignore_case=ignore_case, recursive=recursive, _seen=_seen)
# found_list = [name for name in dir(mod) if name.find(pat) >= 0]
found_list = ut.unique_ordered(found_list)
return found_list | python | def search_module(mod, pat, ignore_case=True, recursive=False, _seen=None):
r"""
Searches module functions, classes, and constants for members matching a
pattern.
Args:
mod (module): live python module
pat (str): regular expression
Returns:
list: found_list
CommandLine:
python -m utool.util_dev --exec-search_module --mod=utool --pat=module
python -m utool.util_dev --exec-search_module --mod=opengm --pat=cut
python -m utool.util_dev --exec-search_module --mod=opengm --pat=multi
python -m utool.util_dev --exec-search_module --mod=plottool --pat=networkx
python -m utool.util_dev --exec-search_module --mod=utool --pat=Levenshtein
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> import utool as ut
>>> recursive = True
>>> ignore_case = True
>>> modname = ut.get_argval('--mod', type_=str, default='utool')
>>> pat = ut.get_argval('--pat', type_=str, default='search')
>>> mod = ut.import_modname(modname)
>>> print('pat = %r' % (pat,))
>>> print('mod = %r' % (mod,))
>>> found_list = search_module(mod, pat, recursive=recursive)
>>> result = ('found_list = %s' % (ut.repr2(found_list),))
>>> print(result)
Ignore:
mod = cv2
pat = 'freak'
"""
if _seen is not None and mod in _seen:
return []
import utool as ut
reflags = re.IGNORECASE * ignore_case
found_list = [name for name in dir(mod) if re.search(pat, name, flags=reflags)]
if recursive:
if _seen is None:
_seen = set()
_seen.add(mod)
module_attrs = [getattr(mod, name) for name in dir(mod)]
submodules = [
submod for submod in module_attrs
if isinstance(submod, types.ModuleType) and submod not in _seen and
ut.is_defined_by_module(submod, mod)
]
for submod in submodules:
found_list += search_module(submod, pat, ignore_case=ignore_case, recursive=recursive, _seen=_seen)
# found_list = [name for name in dir(mod) if name.find(pat) >= 0]
found_list = ut.unique_ordered(found_list)
return found_list | [
"def",
"search_module",
"(",
"mod",
",",
"pat",
",",
"ignore_case",
"=",
"True",
",",
"recursive",
"=",
"False",
",",
"_seen",
"=",
"None",
")",
":",
"if",
"_seen",
"is",
"not",
"None",
"and",
"mod",
"in",
"_seen",
":",
"return",
"[",
"]",
"import",
"utool",
"as",
"ut",
"reflags",
"=",
"re",
".",
"IGNORECASE",
"*",
"ignore_case",
"found_list",
"=",
"[",
"name",
"for",
"name",
"in",
"dir",
"(",
"mod",
")",
"if",
"re",
".",
"search",
"(",
"pat",
",",
"name",
",",
"flags",
"=",
"reflags",
")",
"]",
"if",
"recursive",
":",
"if",
"_seen",
"is",
"None",
":",
"_seen",
"=",
"set",
"(",
")",
"_seen",
".",
"add",
"(",
"mod",
")",
"module_attrs",
"=",
"[",
"getattr",
"(",
"mod",
",",
"name",
")",
"for",
"name",
"in",
"dir",
"(",
"mod",
")",
"]",
"submodules",
"=",
"[",
"submod",
"for",
"submod",
"in",
"module_attrs",
"if",
"isinstance",
"(",
"submod",
",",
"types",
".",
"ModuleType",
")",
"and",
"submod",
"not",
"in",
"_seen",
"and",
"ut",
".",
"is_defined_by_module",
"(",
"submod",
",",
"mod",
")",
"]",
"for",
"submod",
"in",
"submodules",
":",
"found_list",
"+=",
"search_module",
"(",
"submod",
",",
"pat",
",",
"ignore_case",
"=",
"ignore_case",
",",
"recursive",
"=",
"recursive",
",",
"_seen",
"=",
"_seen",
")",
"# found_list = [name for name in dir(mod) if name.find(pat) >= 0]",
"found_list",
"=",
"ut",
".",
"unique_ordered",
"(",
"found_list",
")",
"return",
"found_list"
] | r"""
Searches module functions, classes, and constants for members matching a
pattern.
Args:
mod (module): live python module
pat (str): regular expression
Returns:
list: found_list
CommandLine:
python -m utool.util_dev --exec-search_module --mod=utool --pat=module
python -m utool.util_dev --exec-search_module --mod=opengm --pat=cut
python -m utool.util_dev --exec-search_module --mod=opengm --pat=multi
python -m utool.util_dev --exec-search_module --mod=plottool --pat=networkx
python -m utool.util_dev --exec-search_module --mod=utool --pat=Levenshtein
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> import utool as ut
>>> recursive = True
>>> ignore_case = True
>>> modname = ut.get_argval('--mod', type_=str, default='utool')
>>> pat = ut.get_argval('--pat', type_=str, default='search')
>>> mod = ut.import_modname(modname)
>>> print('pat = %r' % (pat,))
>>> print('mod = %r' % (mod,))
>>> found_list = search_module(mod, pat, recursive=recursive)
>>> result = ('found_list = %s' % (ut.repr2(found_list),))
>>> print(result)
Ignore:
mod = cv2
pat = 'freak' | [
"r",
"Searches",
"module",
"functions",
"classes",
"and",
"constants",
"for",
"members",
"matching",
"a",
"pattern",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L2268-L2325 | train |
Erotemic/utool | utool/util_dev.py | instancelist | def instancelist(obj_list, check=False, shared_attrs=None):
"""
Executes methods and attribute calls on a list of objects of the same type
Bundles a list of object of the same type into a single object.
The new object contains the same functions as each original object
but applies them to each element of the list independantly when called.
CommandLine:
python -m utool.util_dev instancelist
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> import utool as ut
>>> obj_list = ['hi', 'bye', 'foo']
>>> self = ut.instancelist(obj_list, check=False)
>>> print(self)
>>> print(self.upper())
>>> print(self.isalpha())
"""
class InstanceList_(object):
def __init__(self, obj_list, shared_attrs=None):
self._obj_list = []
self._shared_public_attrs = []
self._example_type = None
if len(obj_list) > 0:
import utool as ut
self._obj_list = obj_list
example_obj = obj_list[0]
example_type = type(example_obj)
self._example_type = example_type
if shared_attrs is None:
if check:
attrsgen = [set(dir(obj)) for obj in obj_list]
shared_attrs = list(reduce(set.intersection, attrsgen))
else:
shared_attrs = dir(example_obj)
#allowed = ['__getitem__'] # TODO, put in metaclass
allowed = []
self._shared_public_attrs = [
a for a in shared_attrs
if a in allowed or not a.startswith('_')
]
for attrname in self._shared_public_attrs:
attrtype = getattr(example_type, attrname, None)
if attrtype is not None and isinstance(attrtype, property):
# need to do this as metaclass
setattr(InstanceList_, attrname,
property(self._define_prop(attrname)))
else:
func = self._define_func(attrname)
ut.inject_func_as_method(self, func, attrname)
def __nice__(self):
if self._example_type is None:
typename = 'object'
else:
typename = self._example_type.__name__
return 'of %d %s(s)' % (len(self._obj_list), typename)
def __repr__(self):
classname = self.__class__.__name__
devnice = self.__nice__()
return '<%s(%s) at %s>' % (classname, devnice, hex(id(self)))
def __str__(self):
classname = self.__class__.__name__
devnice = self.__nice__()
return '<%s(%s)>' % (classname, devnice)
def __getitem__(self, key):
# TODO, put in metaclass
return self._map_method('__getitem__', key)
def _define_func(self, attrname):
import utool as ut
def _wrapper(self, *args, **kwargs):
return self._map_method(attrname, *args, **kwargs)
ut.set_funcname(_wrapper, attrname)
return _wrapper
def _map_method(self, attrname, *args, **kwargs):
mapped_vals = [getattr(obj, attrname)(*args, **kwargs)
for obj in self._obj_list]
return mapped_vals
def _define_prop(self, attrname):
import utool as ut
def _getter(self):
return self._map_property(attrname)
ut.set_funcname(_getter, 'get_' + attrname)
return _getter
def _map_property(self, attrname):
mapped_vals = [getattr(obj, attrname) for obj in self._obj_list]
return mapped_vals
return InstanceList_(obj_list, shared_attrs) | python | def instancelist(obj_list, check=False, shared_attrs=None):
"""
Executes methods and attribute calls on a list of objects of the same type
Bundles a list of object of the same type into a single object.
The new object contains the same functions as each original object
but applies them to each element of the list independantly when called.
CommandLine:
python -m utool.util_dev instancelist
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> import utool as ut
>>> obj_list = ['hi', 'bye', 'foo']
>>> self = ut.instancelist(obj_list, check=False)
>>> print(self)
>>> print(self.upper())
>>> print(self.isalpha())
"""
class InstanceList_(object):
def __init__(self, obj_list, shared_attrs=None):
self._obj_list = []
self._shared_public_attrs = []
self._example_type = None
if len(obj_list) > 0:
import utool as ut
self._obj_list = obj_list
example_obj = obj_list[0]
example_type = type(example_obj)
self._example_type = example_type
if shared_attrs is None:
if check:
attrsgen = [set(dir(obj)) for obj in obj_list]
shared_attrs = list(reduce(set.intersection, attrsgen))
else:
shared_attrs = dir(example_obj)
#allowed = ['__getitem__'] # TODO, put in metaclass
allowed = []
self._shared_public_attrs = [
a for a in shared_attrs
if a in allowed or not a.startswith('_')
]
for attrname in self._shared_public_attrs:
attrtype = getattr(example_type, attrname, None)
if attrtype is not None and isinstance(attrtype, property):
# need to do this as metaclass
setattr(InstanceList_, attrname,
property(self._define_prop(attrname)))
else:
func = self._define_func(attrname)
ut.inject_func_as_method(self, func, attrname)
def __nice__(self):
if self._example_type is None:
typename = 'object'
else:
typename = self._example_type.__name__
return 'of %d %s(s)' % (len(self._obj_list), typename)
def __repr__(self):
classname = self.__class__.__name__
devnice = self.__nice__()
return '<%s(%s) at %s>' % (classname, devnice, hex(id(self)))
def __str__(self):
classname = self.__class__.__name__
devnice = self.__nice__()
return '<%s(%s)>' % (classname, devnice)
def __getitem__(self, key):
# TODO, put in metaclass
return self._map_method('__getitem__', key)
def _define_func(self, attrname):
import utool as ut
def _wrapper(self, *args, **kwargs):
return self._map_method(attrname, *args, **kwargs)
ut.set_funcname(_wrapper, attrname)
return _wrapper
def _map_method(self, attrname, *args, **kwargs):
mapped_vals = [getattr(obj, attrname)(*args, **kwargs)
for obj in self._obj_list]
return mapped_vals
def _define_prop(self, attrname):
import utool as ut
def _getter(self):
return self._map_property(attrname)
ut.set_funcname(_getter, 'get_' + attrname)
return _getter
def _map_property(self, attrname):
mapped_vals = [getattr(obj, attrname) for obj in self._obj_list]
return mapped_vals
return InstanceList_(obj_list, shared_attrs) | [
"def",
"instancelist",
"(",
"obj_list",
",",
"check",
"=",
"False",
",",
"shared_attrs",
"=",
"None",
")",
":",
"class",
"InstanceList_",
"(",
"object",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"obj_list",
",",
"shared_attrs",
"=",
"None",
")",
":",
"self",
".",
"_obj_list",
"=",
"[",
"]",
"self",
".",
"_shared_public_attrs",
"=",
"[",
"]",
"self",
".",
"_example_type",
"=",
"None",
"if",
"len",
"(",
"obj_list",
")",
">",
"0",
":",
"import",
"utool",
"as",
"ut",
"self",
".",
"_obj_list",
"=",
"obj_list",
"example_obj",
"=",
"obj_list",
"[",
"0",
"]",
"example_type",
"=",
"type",
"(",
"example_obj",
")",
"self",
".",
"_example_type",
"=",
"example_type",
"if",
"shared_attrs",
"is",
"None",
":",
"if",
"check",
":",
"attrsgen",
"=",
"[",
"set",
"(",
"dir",
"(",
"obj",
")",
")",
"for",
"obj",
"in",
"obj_list",
"]",
"shared_attrs",
"=",
"list",
"(",
"reduce",
"(",
"set",
".",
"intersection",
",",
"attrsgen",
")",
")",
"else",
":",
"shared_attrs",
"=",
"dir",
"(",
"example_obj",
")",
"#allowed = ['__getitem__'] # TODO, put in metaclass",
"allowed",
"=",
"[",
"]",
"self",
".",
"_shared_public_attrs",
"=",
"[",
"a",
"for",
"a",
"in",
"shared_attrs",
"if",
"a",
"in",
"allowed",
"or",
"not",
"a",
".",
"startswith",
"(",
"'_'",
")",
"]",
"for",
"attrname",
"in",
"self",
".",
"_shared_public_attrs",
":",
"attrtype",
"=",
"getattr",
"(",
"example_type",
",",
"attrname",
",",
"None",
")",
"if",
"attrtype",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"attrtype",
",",
"property",
")",
":",
"# need to do this as metaclass",
"setattr",
"(",
"InstanceList_",
",",
"attrname",
",",
"property",
"(",
"self",
".",
"_define_prop",
"(",
"attrname",
")",
")",
")",
"else",
":",
"func",
"=",
"self",
".",
"_define_func",
"(",
"attrname",
")",
"ut",
".",
"inject_func_as_method",
"(",
"self",
",",
"func",
",",
"attrname",
")",
"def",
"__nice__",
"(",
"self",
")",
":",
"if",
"self",
".",
"_example_type",
"is",
"None",
":",
"typename",
"=",
"'object'",
"else",
":",
"typename",
"=",
"self",
".",
"_example_type",
".",
"__name__",
"return",
"'of %d %s(s)'",
"%",
"(",
"len",
"(",
"self",
".",
"_obj_list",
")",
",",
"typename",
")",
"def",
"__repr__",
"(",
"self",
")",
":",
"classname",
"=",
"self",
".",
"__class__",
".",
"__name__",
"devnice",
"=",
"self",
".",
"__nice__",
"(",
")",
"return",
"'<%s(%s) at %s>'",
"%",
"(",
"classname",
",",
"devnice",
",",
"hex",
"(",
"id",
"(",
"self",
")",
")",
")",
"def",
"__str__",
"(",
"self",
")",
":",
"classname",
"=",
"self",
".",
"__class__",
".",
"__name__",
"devnice",
"=",
"self",
".",
"__nice__",
"(",
")",
"return",
"'<%s(%s)>'",
"%",
"(",
"classname",
",",
"devnice",
")",
"def",
"__getitem__",
"(",
"self",
",",
"key",
")",
":",
"# TODO, put in metaclass",
"return",
"self",
".",
"_map_method",
"(",
"'__getitem__'",
",",
"key",
")",
"def",
"_define_func",
"(",
"self",
",",
"attrname",
")",
":",
"import",
"utool",
"as",
"ut",
"def",
"_wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_map_method",
"(",
"attrname",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"ut",
".",
"set_funcname",
"(",
"_wrapper",
",",
"attrname",
")",
"return",
"_wrapper",
"def",
"_map_method",
"(",
"self",
",",
"attrname",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"mapped_vals",
"=",
"[",
"getattr",
"(",
"obj",
",",
"attrname",
")",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"for",
"obj",
"in",
"self",
".",
"_obj_list",
"]",
"return",
"mapped_vals",
"def",
"_define_prop",
"(",
"self",
",",
"attrname",
")",
":",
"import",
"utool",
"as",
"ut",
"def",
"_getter",
"(",
"self",
")",
":",
"return",
"self",
".",
"_map_property",
"(",
"attrname",
")",
"ut",
".",
"set_funcname",
"(",
"_getter",
",",
"'get_'",
"+",
"attrname",
")",
"return",
"_getter",
"def",
"_map_property",
"(",
"self",
",",
"attrname",
")",
":",
"mapped_vals",
"=",
"[",
"getattr",
"(",
"obj",
",",
"attrname",
")",
"for",
"obj",
"in",
"self",
".",
"_obj_list",
"]",
"return",
"mapped_vals",
"return",
"InstanceList_",
"(",
"obj_list",
",",
"shared_attrs",
")"
] | Executes methods and attribute calls on a list of objects of the same type
Bundles a list of object of the same type into a single object.
The new object contains the same functions as each original object
but applies them to each element of the list independantly when called.
CommandLine:
python -m utool.util_dev instancelist
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> import utool as ut
>>> obj_list = ['hi', 'bye', 'foo']
>>> self = ut.instancelist(obj_list, check=False)
>>> print(self)
>>> print(self.upper())
>>> print(self.isalpha()) | [
"Executes",
"methods",
"and",
"attribute",
"calls",
"on",
"a",
"list",
"of",
"objects",
"of",
"the",
"same",
"type"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L2570-L2672 | train |
Erotemic/utool | utool/util_dev.py | _heappush_max | def _heappush_max(heap, item):
""" why is this not in heapq """
heap.append(item)
heapq._siftdown_max(heap, 0, len(heap) - 1) | python | def _heappush_max(heap, item):
""" why is this not in heapq """
heap.append(item)
heapq._siftdown_max(heap, 0, len(heap) - 1) | [
"def",
"_heappush_max",
"(",
"heap",
",",
"item",
")",
":",
"heap",
".",
"append",
"(",
"item",
")",
"heapq",
".",
"_siftdown_max",
"(",
"heap",
",",
"0",
",",
"len",
"(",
"heap",
")",
"-",
"1",
")"
] | why is this not in heapq | [
"why",
"is",
"this",
"not",
"in",
"heapq"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L3235-L3238 | train |
Erotemic/utool | utool/util_dev.py | ColumnLists.take_column | def take_column(self, keys, *extra_keys):
""" Takes a subset of columns """
import utool as ut
keys = ut.ensure_iterable(keys) + list(extra_keys)
key_to_list = ut.dict_subset(self._key_to_list, keys)
newself = self.__class__(key_to_list, self._meta.copy())
return newself | python | def take_column(self, keys, *extra_keys):
""" Takes a subset of columns """
import utool as ut
keys = ut.ensure_iterable(keys) + list(extra_keys)
key_to_list = ut.dict_subset(self._key_to_list, keys)
newself = self.__class__(key_to_list, self._meta.copy())
return newself | [
"def",
"take_column",
"(",
"self",
",",
"keys",
",",
"*",
"extra_keys",
")",
":",
"import",
"utool",
"as",
"ut",
"keys",
"=",
"ut",
".",
"ensure_iterable",
"(",
"keys",
")",
"+",
"list",
"(",
"extra_keys",
")",
"key_to_list",
"=",
"ut",
".",
"dict_subset",
"(",
"self",
".",
"_key_to_list",
",",
"keys",
")",
"newself",
"=",
"self",
".",
"__class__",
"(",
"key_to_list",
",",
"self",
".",
"_meta",
".",
"copy",
"(",
")",
")",
"return",
"newself"
] | Takes a subset of columns | [
"Takes",
"a",
"subset",
"of",
"columns"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L2830-L2836 | train |
Erotemic/utool | utool/util_dev.py | ColumnLists.take | def take(self, idxs):
""" Takes a subset of rows """
import utool as ut
if False:
key_to_list = ut.odict([
(key, ut.take(val, idxs))
for key, val in six.iteritems(self._key_to_list)
])
else:
import numpy as np
key_to_list = ut.odict([
(key, ut.take(val, idxs))
if not isinstance(val, np.ndarray)
else val.take(idxs, axis=0)
for key, val in six.iteritems(self._key_to_list)
])
newself = self.__class__(key_to_list, self._meta.copy())
return newself | python | def take(self, idxs):
""" Takes a subset of rows """
import utool as ut
if False:
key_to_list = ut.odict([
(key, ut.take(val, idxs))
for key, val in six.iteritems(self._key_to_list)
])
else:
import numpy as np
key_to_list = ut.odict([
(key, ut.take(val, idxs))
if not isinstance(val, np.ndarray)
else val.take(idxs, axis=0)
for key, val in six.iteritems(self._key_to_list)
])
newself = self.__class__(key_to_list, self._meta.copy())
return newself | [
"def",
"take",
"(",
"self",
",",
"idxs",
")",
":",
"import",
"utool",
"as",
"ut",
"if",
"False",
":",
"key_to_list",
"=",
"ut",
".",
"odict",
"(",
"[",
"(",
"key",
",",
"ut",
".",
"take",
"(",
"val",
",",
"idxs",
")",
")",
"for",
"key",
",",
"val",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"_key_to_list",
")",
"]",
")",
"else",
":",
"import",
"numpy",
"as",
"np",
"key_to_list",
"=",
"ut",
".",
"odict",
"(",
"[",
"(",
"key",
",",
"ut",
".",
"take",
"(",
"val",
",",
"idxs",
")",
")",
"if",
"not",
"isinstance",
"(",
"val",
",",
"np",
".",
"ndarray",
")",
"else",
"val",
".",
"take",
"(",
"idxs",
",",
"axis",
"=",
"0",
")",
"for",
"key",
",",
"val",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"_key_to_list",
")",
"]",
")",
"newself",
"=",
"self",
".",
"__class__",
"(",
"key_to_list",
",",
"self",
".",
"_meta",
".",
"copy",
"(",
")",
")",
"return",
"newself"
] | Takes a subset of rows | [
"Takes",
"a",
"subset",
"of",
"rows"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L2838-L2855 | train |
Erotemic/utool | utool/util_dev.py | ColumnLists.remove | def remove(self, idxs):
""" Returns a copy with idxs removed """
import utool as ut
keep_idxs = ut.index_complement(idxs, len(self))
return self.take(keep_idxs) | python | def remove(self, idxs):
""" Returns a copy with idxs removed """
import utool as ut
keep_idxs = ut.index_complement(idxs, len(self))
return self.take(keep_idxs) | [
"def",
"remove",
"(",
"self",
",",
"idxs",
")",
":",
"import",
"utool",
"as",
"ut",
"keep_idxs",
"=",
"ut",
".",
"index_complement",
"(",
"idxs",
",",
"len",
"(",
"self",
")",
")",
"return",
"self",
".",
"take",
"(",
"keep_idxs",
")"
] | Returns a copy with idxs removed | [
"Returns",
"a",
"copy",
"with",
"idxs",
"removed"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L2857-L2861 | train |
Erotemic/utool | utool/util_dev.py | ColumnLists.group_items | def group_items(self, labels):
""" group as dict """
import utool as ut
unique_labels, groups = self.group(labels)
label_to_group = ut.odict(zip(unique_labels, groups))
return label_to_group | python | def group_items(self, labels):
""" group as dict """
import utool as ut
unique_labels, groups = self.group(labels)
label_to_group = ut.odict(zip(unique_labels, groups))
return label_to_group | [
"def",
"group_items",
"(",
"self",
",",
"labels",
")",
":",
"import",
"utool",
"as",
"ut",
"unique_labels",
",",
"groups",
"=",
"self",
".",
"group",
"(",
"labels",
")",
"label_to_group",
"=",
"ut",
".",
"odict",
"(",
"zip",
"(",
"unique_labels",
",",
"groups",
")",
")",
"return",
"label_to_group"
] | group as dict | [
"group",
"as",
"dict"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L2880-L2885 | train |
Erotemic/utool | utool/util_dev.py | ColumnLists.group | def group(self, labels):
""" group as list """
unique_labels, groupxs = self.group_indicies(labels)
groups = [self.take(idxs) for idxs in groupxs]
return unique_labels, groups | python | def group(self, labels):
""" group as list """
unique_labels, groupxs = self.group_indicies(labels)
groups = [self.take(idxs) for idxs in groupxs]
return unique_labels, groups | [
"def",
"group",
"(",
"self",
",",
"labels",
")",
":",
"unique_labels",
",",
"groupxs",
"=",
"self",
".",
"group_indicies",
"(",
"labels",
")",
"groups",
"=",
"[",
"self",
".",
"take",
"(",
"idxs",
")",
"for",
"idxs",
"in",
"groupxs",
"]",
"return",
"unique_labels",
",",
"groups"
] | group as list | [
"group",
"as",
"list"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L2887-L2891 | train |
Erotemic/utool | utool/util_dev.py | ColumnLists.cast_column | def cast_column(self, keys, func):
""" like map column but applies values inplace """
import utool as ut
for key in ut.ensure_iterable(keys):
self[key] = [func(v) for v in self[key]] | python | def cast_column(self, keys, func):
""" like map column but applies values inplace """
import utool as ut
for key in ut.ensure_iterable(keys):
self[key] = [func(v) for v in self[key]] | [
"def",
"cast_column",
"(",
"self",
",",
"keys",
",",
"func",
")",
":",
"import",
"utool",
"as",
"ut",
"for",
"key",
"in",
"ut",
".",
"ensure_iterable",
"(",
"keys",
")",
":",
"self",
"[",
"key",
"]",
"=",
"[",
"func",
"(",
"v",
")",
"for",
"v",
"in",
"self",
"[",
"key",
"]",
"]"
] | like map column but applies values inplace | [
"like",
"map",
"column",
"but",
"applies",
"values",
"inplace"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L2919-L2923 | train |
Erotemic/utool | utool/util_dev.py | ColumnLists.merge_rows | def merge_rows(self, key, merge_scalars=True):
"""
Uses key as a unique index an merges all duplicates rows. Use
cast_column to modify types of columns before merging to affect
behavior of duplicate rectification.
Args:
key: row to merge on
merge_scalars: if True, scalar values become lists
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> import utool as ut
>>> key_to_list = {
>>> 'uuid': [1, 1, 2, 3, 4, 2, 1],
>>> 'a': [1, 2, 3, 4, 5, 6, 7],
>>> 'b': [[1], [2], [3], [4], [5], [6], [7]],
>>> 'c': [[1], [1], [2], [3], [4], [2], [1]],
>>> }
>>> self = ColumnLists(key_to_list)
>>> key = 'uuid'
>>> newself = self.merge_rows('uuid')
>>> print(newself.to_csv())
# a, c, b, uuid
4, [3], [4], 3
5, [4], [5], 4
"[1, 2, 7]", "[1, 1, 1]", "[1, 2, 7]", "[1, 1, 1]"
"[3, 6]", "[2, 2]", "[3, 6]", "[2, 2]"
"""
import utool as ut
unique_labels, groupxs = self.group_indicies(key)
single_xs = [xs for xs in groupxs if len(xs) == 1]
multi_xs = [xs for xs in groupxs if len(xs) > 1]
singles = self.take(ut.flatten(single_xs))
multis = [self.take(idxs) for idxs in multi_xs]
merged_groups = []
for group in multis:
newgroup = {}
for key_ in group.keys():
val = group[key_]
if key_ == key:
# key_ was garuenteed unique
val_ = val[0]
elif hasattr(val[0].__class__, 'union'):
# HACK
# Sets are unioned
val_ = ut.oset.union(*val)
elif isinstance(val[0], (ut.oset,)):
# Sets are unioned
val_ = ut.oset.union(*val)
elif isinstance(val[0], (set)):
# Sets are unioned
val_ = set.union(*val)
elif isinstance(val[0], (tuple, list)):
# Lists are merged together
val_ = ut.flatten(val)
#val_ = ut.unique(ut.flatten(val))
else:
if ut.allsame(val):
# Merge items that are the same
val_ = val[0]
else:
if merge_scalars:
# If mergeing scalars is ok, then
# Values become lists if they are different
val_ = val
else:
if True:
# If there is only one non-none value then use that.
other_vals = ut.filter_Nones(val)
if len(other_vals) == 1:
val_ = val[0]
else:
raise ValueError(
'tried to merge a scalar in %r, val=%r' % (
key_, val))
else:
# If merging scalars is not ok, then
# we must raise an error
raise ValueError(
'tried to merge a scalar in %r, val=%r' % (
key_, val))
newgroup[key_] = [val_]
merged_groups.append(ut.ColumnLists(newgroup))
merged_multi = self.__class__.flatten(merged_groups)
merged = singles + merged_multi
return merged | python | def merge_rows(self, key, merge_scalars=True):
"""
Uses key as a unique index an merges all duplicates rows. Use
cast_column to modify types of columns before merging to affect
behavior of duplicate rectification.
Args:
key: row to merge on
merge_scalars: if True, scalar values become lists
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> import utool as ut
>>> key_to_list = {
>>> 'uuid': [1, 1, 2, 3, 4, 2, 1],
>>> 'a': [1, 2, 3, 4, 5, 6, 7],
>>> 'b': [[1], [2], [3], [4], [5], [6], [7]],
>>> 'c': [[1], [1], [2], [3], [4], [2], [1]],
>>> }
>>> self = ColumnLists(key_to_list)
>>> key = 'uuid'
>>> newself = self.merge_rows('uuid')
>>> print(newself.to_csv())
# a, c, b, uuid
4, [3], [4], 3
5, [4], [5], 4
"[1, 2, 7]", "[1, 1, 1]", "[1, 2, 7]", "[1, 1, 1]"
"[3, 6]", "[2, 2]", "[3, 6]", "[2, 2]"
"""
import utool as ut
unique_labels, groupxs = self.group_indicies(key)
single_xs = [xs for xs in groupxs if len(xs) == 1]
multi_xs = [xs for xs in groupxs if len(xs) > 1]
singles = self.take(ut.flatten(single_xs))
multis = [self.take(idxs) for idxs in multi_xs]
merged_groups = []
for group in multis:
newgroup = {}
for key_ in group.keys():
val = group[key_]
if key_ == key:
# key_ was garuenteed unique
val_ = val[0]
elif hasattr(val[0].__class__, 'union'):
# HACK
# Sets are unioned
val_ = ut.oset.union(*val)
elif isinstance(val[0], (ut.oset,)):
# Sets are unioned
val_ = ut.oset.union(*val)
elif isinstance(val[0], (set)):
# Sets are unioned
val_ = set.union(*val)
elif isinstance(val[0], (tuple, list)):
# Lists are merged together
val_ = ut.flatten(val)
#val_ = ut.unique(ut.flatten(val))
else:
if ut.allsame(val):
# Merge items that are the same
val_ = val[0]
else:
if merge_scalars:
# If mergeing scalars is ok, then
# Values become lists if they are different
val_ = val
else:
if True:
# If there is only one non-none value then use that.
other_vals = ut.filter_Nones(val)
if len(other_vals) == 1:
val_ = val[0]
else:
raise ValueError(
'tried to merge a scalar in %r, val=%r' % (
key_, val))
else:
# If merging scalars is not ok, then
# we must raise an error
raise ValueError(
'tried to merge a scalar in %r, val=%r' % (
key_, val))
newgroup[key_] = [val_]
merged_groups.append(ut.ColumnLists(newgroup))
merged_multi = self.__class__.flatten(merged_groups)
merged = singles + merged_multi
return merged | [
"def",
"merge_rows",
"(",
"self",
",",
"key",
",",
"merge_scalars",
"=",
"True",
")",
":",
"import",
"utool",
"as",
"ut",
"unique_labels",
",",
"groupxs",
"=",
"self",
".",
"group_indicies",
"(",
"key",
")",
"single_xs",
"=",
"[",
"xs",
"for",
"xs",
"in",
"groupxs",
"if",
"len",
"(",
"xs",
")",
"==",
"1",
"]",
"multi_xs",
"=",
"[",
"xs",
"for",
"xs",
"in",
"groupxs",
"if",
"len",
"(",
"xs",
")",
">",
"1",
"]",
"singles",
"=",
"self",
".",
"take",
"(",
"ut",
".",
"flatten",
"(",
"single_xs",
")",
")",
"multis",
"=",
"[",
"self",
".",
"take",
"(",
"idxs",
")",
"for",
"idxs",
"in",
"multi_xs",
"]",
"merged_groups",
"=",
"[",
"]",
"for",
"group",
"in",
"multis",
":",
"newgroup",
"=",
"{",
"}",
"for",
"key_",
"in",
"group",
".",
"keys",
"(",
")",
":",
"val",
"=",
"group",
"[",
"key_",
"]",
"if",
"key_",
"==",
"key",
":",
"# key_ was garuenteed unique",
"val_",
"=",
"val",
"[",
"0",
"]",
"elif",
"hasattr",
"(",
"val",
"[",
"0",
"]",
".",
"__class__",
",",
"'union'",
")",
":",
"# HACK",
"# Sets are unioned",
"val_",
"=",
"ut",
".",
"oset",
".",
"union",
"(",
"*",
"val",
")",
"elif",
"isinstance",
"(",
"val",
"[",
"0",
"]",
",",
"(",
"ut",
".",
"oset",
",",
")",
")",
":",
"# Sets are unioned",
"val_",
"=",
"ut",
".",
"oset",
".",
"union",
"(",
"*",
"val",
")",
"elif",
"isinstance",
"(",
"val",
"[",
"0",
"]",
",",
"(",
"set",
")",
")",
":",
"# Sets are unioned",
"val_",
"=",
"set",
".",
"union",
"(",
"*",
"val",
")",
"elif",
"isinstance",
"(",
"val",
"[",
"0",
"]",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"# Lists are merged together",
"val_",
"=",
"ut",
".",
"flatten",
"(",
"val",
")",
"#val_ = ut.unique(ut.flatten(val))",
"else",
":",
"if",
"ut",
".",
"allsame",
"(",
"val",
")",
":",
"# Merge items that are the same",
"val_",
"=",
"val",
"[",
"0",
"]",
"else",
":",
"if",
"merge_scalars",
":",
"# If mergeing scalars is ok, then",
"# Values become lists if they are different",
"val_",
"=",
"val",
"else",
":",
"if",
"True",
":",
"# If there is only one non-none value then use that.",
"other_vals",
"=",
"ut",
".",
"filter_Nones",
"(",
"val",
")",
"if",
"len",
"(",
"other_vals",
")",
"==",
"1",
":",
"val_",
"=",
"val",
"[",
"0",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'tried to merge a scalar in %r, val=%r'",
"%",
"(",
"key_",
",",
"val",
")",
")",
"else",
":",
"# If merging scalars is not ok, then",
"# we must raise an error",
"raise",
"ValueError",
"(",
"'tried to merge a scalar in %r, val=%r'",
"%",
"(",
"key_",
",",
"val",
")",
")",
"newgroup",
"[",
"key_",
"]",
"=",
"[",
"val_",
"]",
"merged_groups",
".",
"append",
"(",
"ut",
".",
"ColumnLists",
"(",
"newgroup",
")",
")",
"merged_multi",
"=",
"self",
".",
"__class__",
".",
"flatten",
"(",
"merged_groups",
")",
"merged",
"=",
"singles",
"+",
"merged_multi",
"return",
"merged"
] | Uses key as a unique index an merges all duplicates rows. Use
cast_column to modify types of columns before merging to affect
behavior of duplicate rectification.
Args:
key: row to merge on
merge_scalars: if True, scalar values become lists
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_dev import * # NOQA
>>> import utool as ut
>>> key_to_list = {
>>> 'uuid': [1, 1, 2, 3, 4, 2, 1],
>>> 'a': [1, 2, 3, 4, 5, 6, 7],
>>> 'b': [[1], [2], [3], [4], [5], [6], [7]],
>>> 'c': [[1], [1], [2], [3], [4], [2], [1]],
>>> }
>>> self = ColumnLists(key_to_list)
>>> key = 'uuid'
>>> newself = self.merge_rows('uuid')
>>> print(newself.to_csv())
# a, c, b, uuid
4, [3], [4], 3
5, [4], [5], 4
"[1, 2, 7]", "[1, 1, 1]", "[1, 2, 7]", "[1, 1, 1]"
"[3, 6]", "[2, 2]", "[3, 6]", "[2, 2]" | [
"Uses",
"key",
"as",
"a",
"unique",
"index",
"an",
"merges",
"all",
"duplicates",
"rows",
".",
"Use",
"cast_column",
"to",
"modify",
"types",
"of",
"columns",
"before",
"merging",
"to",
"affect",
"behavior",
"of",
"duplicate",
"rectification",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L2934-L3024 | train |
Erotemic/utool | utool/util_dev.py | PriorityQueue.peek | def peek(self):
"""
Peek at the next item in the queue
"""
# Ammortized O(1)
_heap = self._heap
_dict = self._dict
val, key = _heap[0]
# Remove items marked for lazy deletion as they are encountered
while key not in _dict or _dict[key] != val:
self._heappop(_heap)
val, key = _heap[0]
return key, val | python | def peek(self):
"""
Peek at the next item in the queue
"""
# Ammortized O(1)
_heap = self._heap
_dict = self._dict
val, key = _heap[0]
# Remove items marked for lazy deletion as they are encountered
while key not in _dict or _dict[key] != val:
self._heappop(_heap)
val, key = _heap[0]
return key, val | [
"def",
"peek",
"(",
"self",
")",
":",
"# Ammortized O(1)",
"_heap",
"=",
"self",
".",
"_heap",
"_dict",
"=",
"self",
".",
"_dict",
"val",
",",
"key",
"=",
"_heap",
"[",
"0",
"]",
"# Remove items marked for lazy deletion as they are encountered",
"while",
"key",
"not",
"in",
"_dict",
"or",
"_dict",
"[",
"key",
"]",
"!=",
"val",
":",
"self",
".",
"_heappop",
"(",
"_heap",
")",
"val",
",",
"key",
"=",
"_heap",
"[",
"0",
"]",
"return",
"key",
",",
"val"
] | Peek at the next item in the queue | [
"Peek",
"at",
"the",
"next",
"item",
"in",
"the",
"queue"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L3363-L3375 | train |
Erotemic/utool | utool/util_dev.py | PriorityQueue.peek_many | def peek_many(self, n):
"""
Actually this can be quite inefficient
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> items = list(zip(range(256), range(256)))
>>> n = 32
>>> ut.shuffle(items)
>>> self = ut.PriorityQueue(items, ascending=False)
>>> self.peek_many(56)
"""
if n == 0:
return []
elif n == 1:
return [self.peek()]
else:
items = list(self.pop_many(n))
self.update(items)
return items | python | def peek_many(self, n):
"""
Actually this can be quite inefficient
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> items = list(zip(range(256), range(256)))
>>> n = 32
>>> ut.shuffle(items)
>>> self = ut.PriorityQueue(items, ascending=False)
>>> self.peek_many(56)
"""
if n == 0:
return []
elif n == 1:
return [self.peek()]
else:
items = list(self.pop_many(n))
self.update(items)
return items | [
"def",
"peek_many",
"(",
"self",
",",
"n",
")",
":",
"if",
"n",
"==",
"0",
":",
"return",
"[",
"]",
"elif",
"n",
"==",
"1",
":",
"return",
"[",
"self",
".",
"peek",
"(",
")",
"]",
"else",
":",
"items",
"=",
"list",
"(",
"self",
".",
"pop_many",
"(",
"n",
")",
")",
"self",
".",
"update",
"(",
"items",
")",
"return",
"items"
] | Actually this can be quite inefficient
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> items = list(zip(range(256), range(256)))
>>> n = 32
>>> ut.shuffle(items)
>>> self = ut.PriorityQueue(items, ascending=False)
>>> self.peek_many(56) | [
"Actually",
"this",
"can",
"be",
"quite",
"inefficient"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L3377-L3397 | train |
Erotemic/utool | utool/util_dev.py | PriorityQueue.pop | def pop(self, key=util_const.NoParam, default=util_const.NoParam):
"""
Pop the next item off the queue
"""
# Dictionary pop if key is specified
if key is not util_const.NoParam:
if default is util_const.NoParam:
return (key, self._dict.pop(key))
else:
return (key, self._dict.pop(key, default))
# Otherwise do a heap pop
try:
# Ammortized O(1)
_heap = self._heap
_dict = self._dict
val, key = self._heappop(_heap)
# Remove items marked for lazy deletion as they are encountered
while key not in _dict or _dict[key] != val:
val, key = self._heappop(_heap)
except IndexError:
if len(_heap) == 0:
raise IndexError('queue is empty')
else:
raise
del _dict[key]
return key, val | python | def pop(self, key=util_const.NoParam, default=util_const.NoParam):
"""
Pop the next item off the queue
"""
# Dictionary pop if key is specified
if key is not util_const.NoParam:
if default is util_const.NoParam:
return (key, self._dict.pop(key))
else:
return (key, self._dict.pop(key, default))
# Otherwise do a heap pop
try:
# Ammortized O(1)
_heap = self._heap
_dict = self._dict
val, key = self._heappop(_heap)
# Remove items marked for lazy deletion as they are encountered
while key not in _dict or _dict[key] != val:
val, key = self._heappop(_heap)
except IndexError:
if len(_heap) == 0:
raise IndexError('queue is empty')
else:
raise
del _dict[key]
return key, val | [
"def",
"pop",
"(",
"self",
",",
"key",
"=",
"util_const",
".",
"NoParam",
",",
"default",
"=",
"util_const",
".",
"NoParam",
")",
":",
"# Dictionary pop if key is specified",
"if",
"key",
"is",
"not",
"util_const",
".",
"NoParam",
":",
"if",
"default",
"is",
"util_const",
".",
"NoParam",
":",
"return",
"(",
"key",
",",
"self",
".",
"_dict",
".",
"pop",
"(",
"key",
")",
")",
"else",
":",
"return",
"(",
"key",
",",
"self",
".",
"_dict",
".",
"pop",
"(",
"key",
",",
"default",
")",
")",
"# Otherwise do a heap pop",
"try",
":",
"# Ammortized O(1)",
"_heap",
"=",
"self",
".",
"_heap",
"_dict",
"=",
"self",
".",
"_dict",
"val",
",",
"key",
"=",
"self",
".",
"_heappop",
"(",
"_heap",
")",
"# Remove items marked for lazy deletion as they are encountered",
"while",
"key",
"not",
"in",
"_dict",
"or",
"_dict",
"[",
"key",
"]",
"!=",
"val",
":",
"val",
",",
"key",
"=",
"self",
".",
"_heappop",
"(",
"_heap",
")",
"except",
"IndexError",
":",
"if",
"len",
"(",
"_heap",
")",
"==",
"0",
":",
"raise",
"IndexError",
"(",
"'queue is empty'",
")",
"else",
":",
"raise",
"del",
"_dict",
"[",
"key",
"]",
"return",
"key",
",",
"val"
] | Pop the next item off the queue | [
"Pop",
"the",
"next",
"item",
"off",
"the",
"queue"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L3405-L3430 | train |
Erotemic/utool | utool/_internal/util_importer.py | __execute_fromimport | def __execute_fromimport(module, modname, import_tuples, verbose=False):
""" Module From Imports """
if verbose:
print('[UTIL_IMPORT] EXECUTING %d FROM IMPORT TUPLES' % (len(import_tuples),))
from_imports = __get_from_imports(import_tuples)
for name, fromlist in from_imports:
full_modname = '.'.join((modname, name))
tmp = __import__(full_modname, globals(), locals(), fromlist=fromlist, level=0)
for attrname in fromlist:
setattr(module, attrname, getattr(tmp, attrname))
return from_imports | python | def __execute_fromimport(module, modname, import_tuples, verbose=False):
""" Module From Imports """
if verbose:
print('[UTIL_IMPORT] EXECUTING %d FROM IMPORT TUPLES' % (len(import_tuples),))
from_imports = __get_from_imports(import_tuples)
for name, fromlist in from_imports:
full_modname = '.'.join((modname, name))
tmp = __import__(full_modname, globals(), locals(), fromlist=fromlist, level=0)
for attrname in fromlist:
setattr(module, attrname, getattr(tmp, attrname))
return from_imports | [
"def",
"__execute_fromimport",
"(",
"module",
",",
"modname",
",",
"import_tuples",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"'[UTIL_IMPORT] EXECUTING %d FROM IMPORT TUPLES'",
"%",
"(",
"len",
"(",
"import_tuples",
")",
",",
")",
")",
"from_imports",
"=",
"__get_from_imports",
"(",
"import_tuples",
")",
"for",
"name",
",",
"fromlist",
"in",
"from_imports",
":",
"full_modname",
"=",
"'.'",
".",
"join",
"(",
"(",
"modname",
",",
"name",
")",
")",
"tmp",
"=",
"__import__",
"(",
"full_modname",
",",
"globals",
"(",
")",
",",
"locals",
"(",
")",
",",
"fromlist",
"=",
"fromlist",
",",
"level",
"=",
"0",
")",
"for",
"attrname",
"in",
"fromlist",
":",
"setattr",
"(",
"module",
",",
"attrname",
",",
"getattr",
"(",
"tmp",
",",
"attrname",
")",
")",
"return",
"from_imports"
] | Module From Imports | [
"Module",
"From",
"Imports"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/_internal/util_importer.py#L34-L44 | train |
Erotemic/utool | utool/_internal/util_importer.py | _initstr | def _initstr(modname, imports, from_imports, inject_execstr, withheader=True):
""" Calls the other string makers """
header = _make_module_header() if withheader else ''
import_str = _make_imports_str(imports, modname)
fromimport_str = _make_fromimport_str(from_imports, modname)
initstr = '\n'.join([str_ for str_ in [
header,
import_str,
fromimport_str,
inject_execstr,
] if len(str_) > 0])
return initstr | python | def _initstr(modname, imports, from_imports, inject_execstr, withheader=True):
""" Calls the other string makers """
header = _make_module_header() if withheader else ''
import_str = _make_imports_str(imports, modname)
fromimport_str = _make_fromimport_str(from_imports, modname)
initstr = '\n'.join([str_ for str_ in [
header,
import_str,
fromimport_str,
inject_execstr,
] if len(str_) > 0])
return initstr | [
"def",
"_initstr",
"(",
"modname",
",",
"imports",
",",
"from_imports",
",",
"inject_execstr",
",",
"withheader",
"=",
"True",
")",
":",
"header",
"=",
"_make_module_header",
"(",
")",
"if",
"withheader",
"else",
"''",
"import_str",
"=",
"_make_imports_str",
"(",
"imports",
",",
"modname",
")",
"fromimport_str",
"=",
"_make_fromimport_str",
"(",
"from_imports",
",",
"modname",
")",
"initstr",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"str_",
"for",
"str_",
"in",
"[",
"header",
",",
"import_str",
",",
"fromimport_str",
",",
"inject_execstr",
",",
"]",
"if",
"len",
"(",
"str_",
")",
">",
"0",
"]",
")",
"return",
"initstr"
] | Calls the other string makers | [
"Calls",
"the",
"other",
"string",
"makers"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/_internal/util_importer.py#L152-L163 | train |
Erotemic/utool | utool/_internal/util_importer.py | _inject_execstr | def _inject_execstr(modname, import_tuples):
""" Injection and Reload String Defs """
if modname == 'utool':
# Special case import of the util_inject module
injecter = 'util_inject'
injecter_import = ''
else:
# Normal case implicit import of util_inject
injecter_import = 'import utool'
injecter = 'utool'
injectstr_fmt = textwrap.dedent(
r'''
# STARTBLOCK
{injecter_import}
print, rrr, profile = {injecter}.inject2(__name__, '[{modname}]')
def reassign_submodule_attributes(verbose=1):
"""
Updates attributes in the __init__ modules with updated attributes
in the submodules.
"""
import sys
if verbose and '--quiet' not in sys.argv:
print('dev reimport')
# Self import
import {modname}
# Implicit reassignment.
seen_ = set([])
for tup in IMPORT_TUPLES:
if len(tup) > 2 and tup[2]:
continue # dont import package names
submodname, fromimports = tup[0:2]
submod = getattr({modname}, submodname)
for attr in dir(submod):
if attr.startswith('_'):
continue
if attr in seen_:
# This just holds off bad behavior
# but it does mimic normal util_import behavior
# which is good
continue
seen_.add(attr)
setattr({modname}, attr, getattr(submod, attr))
def reload_subs(verbose=1):
""" Reloads {modname} and submodules """
if verbose:
print('Reloading {modname} submodules')
rrr(verbose > 1)
def wrap_fbrrr(mod):
def fbrrr(*args, **kwargs):
""" fallback reload """
if verbose > 0:
print('Auto-reload (using rrr) not setup for mod=%r' % (mod,))
return fbrrr
def get_rrr(mod):
if hasattr(mod, 'rrr'):
return mod.rrr
else:
return wrap_fbrrr(mod)
def get_reload_subs(mod):
return getattr(mod, 'reload_subs', wrap_fbrrr(mod))
{reload_body}
rrr(verbose > 1)
try:
# hackish way of propogating up the new reloaded submodule attributes
reassign_submodule_attributes(verbose=verbose)
except Exception as ex:
print(ex)
rrrr = reload_subs
# ENDBLOCK
''')
injectstr_fmt = injectstr_fmt.replace('# STARTBLOCK', '')
injectstr_fmt = injectstr_fmt.replace('# ENDBLOCK', '')
rrrdir_fmt = ' get_reload_subs({modname})(verbose=verbose)'
rrrfile_fmt = ' get_rrr({modname})(verbose > 1)'
def _reload_command(tup):
if len(tup) > 2 and tup[2] is True:
return rrrdir_fmt.format(modname=tup[0])
else:
return rrrfile_fmt.format(modname=tup[0])
reload_body = '\n'.join(map(_reload_command, import_tuples)).strip()
format_dict = {
'modname': modname,
'reload_body': reload_body,
'injecter': injecter,
'injecter_import': injecter_import,
}
inject_execstr = injectstr_fmt.format(**format_dict).strip()
return inject_execstr | python | def _inject_execstr(modname, import_tuples):
""" Injection and Reload String Defs """
if modname == 'utool':
# Special case import of the util_inject module
injecter = 'util_inject'
injecter_import = ''
else:
# Normal case implicit import of util_inject
injecter_import = 'import utool'
injecter = 'utool'
injectstr_fmt = textwrap.dedent(
r'''
# STARTBLOCK
{injecter_import}
print, rrr, profile = {injecter}.inject2(__name__, '[{modname}]')
def reassign_submodule_attributes(verbose=1):
"""
Updates attributes in the __init__ modules with updated attributes
in the submodules.
"""
import sys
if verbose and '--quiet' not in sys.argv:
print('dev reimport')
# Self import
import {modname}
# Implicit reassignment.
seen_ = set([])
for tup in IMPORT_TUPLES:
if len(tup) > 2 and tup[2]:
continue # dont import package names
submodname, fromimports = tup[0:2]
submod = getattr({modname}, submodname)
for attr in dir(submod):
if attr.startswith('_'):
continue
if attr in seen_:
# This just holds off bad behavior
# but it does mimic normal util_import behavior
# which is good
continue
seen_.add(attr)
setattr({modname}, attr, getattr(submod, attr))
def reload_subs(verbose=1):
""" Reloads {modname} and submodules """
if verbose:
print('Reloading {modname} submodules')
rrr(verbose > 1)
def wrap_fbrrr(mod):
def fbrrr(*args, **kwargs):
""" fallback reload """
if verbose > 0:
print('Auto-reload (using rrr) not setup for mod=%r' % (mod,))
return fbrrr
def get_rrr(mod):
if hasattr(mod, 'rrr'):
return mod.rrr
else:
return wrap_fbrrr(mod)
def get_reload_subs(mod):
return getattr(mod, 'reload_subs', wrap_fbrrr(mod))
{reload_body}
rrr(verbose > 1)
try:
# hackish way of propogating up the new reloaded submodule attributes
reassign_submodule_attributes(verbose=verbose)
except Exception as ex:
print(ex)
rrrr = reload_subs
# ENDBLOCK
''')
injectstr_fmt = injectstr_fmt.replace('# STARTBLOCK', '')
injectstr_fmt = injectstr_fmt.replace('# ENDBLOCK', '')
rrrdir_fmt = ' get_reload_subs({modname})(verbose=verbose)'
rrrfile_fmt = ' get_rrr({modname})(verbose > 1)'
def _reload_command(tup):
if len(tup) > 2 and tup[2] is True:
return rrrdir_fmt.format(modname=tup[0])
else:
return rrrfile_fmt.format(modname=tup[0])
reload_body = '\n'.join(map(_reload_command, import_tuples)).strip()
format_dict = {
'modname': modname,
'reload_body': reload_body,
'injecter': injecter,
'injecter_import': injecter_import,
}
inject_execstr = injectstr_fmt.format(**format_dict).strip()
return inject_execstr | [
"def",
"_inject_execstr",
"(",
"modname",
",",
"import_tuples",
")",
":",
"if",
"modname",
"==",
"'utool'",
":",
"# Special case import of the util_inject module",
"injecter",
"=",
"'util_inject'",
"injecter_import",
"=",
"''",
"else",
":",
"# Normal case implicit import of util_inject",
"injecter_import",
"=",
"'import utool'",
"injecter",
"=",
"'utool'",
"injectstr_fmt",
"=",
"textwrap",
".",
"dedent",
"(",
"r'''\n # STARTBLOCK\n {injecter_import}\n print, rrr, profile = {injecter}.inject2(__name__, '[{modname}]')\n\n\n def reassign_submodule_attributes(verbose=1):\n \"\"\"\n Updates attributes in the __init__ modules with updated attributes\n in the submodules.\n \"\"\"\n import sys\n if verbose and '--quiet' not in sys.argv:\n print('dev reimport')\n # Self import\n import {modname}\n # Implicit reassignment.\n seen_ = set([])\n for tup in IMPORT_TUPLES:\n if len(tup) > 2 and tup[2]:\n continue # dont import package names\n submodname, fromimports = tup[0:2]\n submod = getattr({modname}, submodname)\n for attr in dir(submod):\n if attr.startswith('_'):\n continue\n if attr in seen_:\n # This just holds off bad behavior\n # but it does mimic normal util_import behavior\n # which is good\n continue\n seen_.add(attr)\n setattr({modname}, attr, getattr(submod, attr))\n\n\n def reload_subs(verbose=1):\n \"\"\" Reloads {modname} and submodules \"\"\"\n if verbose:\n print('Reloading {modname} submodules')\n rrr(verbose > 1)\n def wrap_fbrrr(mod):\n def fbrrr(*args, **kwargs):\n \"\"\" fallback reload \"\"\"\n if verbose > 0:\n print('Auto-reload (using rrr) not setup for mod=%r' % (mod,))\n return fbrrr\n def get_rrr(mod):\n if hasattr(mod, 'rrr'):\n return mod.rrr\n else:\n return wrap_fbrrr(mod)\n def get_reload_subs(mod):\n return getattr(mod, 'reload_subs', wrap_fbrrr(mod))\n {reload_body}\n rrr(verbose > 1)\n try:\n # hackish way of propogating up the new reloaded submodule attributes\n reassign_submodule_attributes(verbose=verbose)\n except Exception as ex:\n print(ex)\n rrrr = reload_subs\n # ENDBLOCK\n '''",
")",
"injectstr_fmt",
"=",
"injectstr_fmt",
".",
"replace",
"(",
"'# STARTBLOCK'",
",",
"''",
")",
"injectstr_fmt",
"=",
"injectstr_fmt",
".",
"replace",
"(",
"'# ENDBLOCK'",
",",
"''",
")",
"rrrdir_fmt",
"=",
"' get_reload_subs({modname})(verbose=verbose)'",
"rrrfile_fmt",
"=",
"' get_rrr({modname})(verbose > 1)'",
"def",
"_reload_command",
"(",
"tup",
")",
":",
"if",
"len",
"(",
"tup",
")",
">",
"2",
"and",
"tup",
"[",
"2",
"]",
"is",
"True",
":",
"return",
"rrrdir_fmt",
".",
"format",
"(",
"modname",
"=",
"tup",
"[",
"0",
"]",
")",
"else",
":",
"return",
"rrrfile_fmt",
".",
"format",
"(",
"modname",
"=",
"tup",
"[",
"0",
"]",
")",
"reload_body",
"=",
"'\\n'",
".",
"join",
"(",
"map",
"(",
"_reload_command",
",",
"import_tuples",
")",
")",
".",
"strip",
"(",
")",
"format_dict",
"=",
"{",
"'modname'",
":",
"modname",
",",
"'reload_body'",
":",
"reload_body",
",",
"'injecter'",
":",
"injecter",
",",
"'injecter_import'",
":",
"injecter_import",
",",
"}",
"inject_execstr",
"=",
"injectstr_fmt",
".",
"format",
"(",
"*",
"*",
"format_dict",
")",
".",
"strip",
"(",
")",
"return",
"inject_execstr"
] | Injection and Reload String Defs | [
"Injection",
"and",
"Reload",
"String",
"Defs"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/_internal/util_importer.py#L196-L288 | train |
Erotemic/utool | utool/_internal/util_importer.py | dynamic_import | def dynamic_import(modname, import_tuples, developing=True, ignore_froms=[],
dump=False, ignore_startswith=[], ignore_endswith=[],
ignore_list=[], check_not_imported=True, return_initstr=False,
verbose=False):
"""
MAIN ENTRY POINT
Dynamically import listed util libraries and their attributes.
Create reload_subs function.
Using __import__ like this is typically not considered good style However,
it is better than import * and this will generate the good file text that
can be used when the module is 'frozen"
Returns:
str: init_inject_str - by default all imports are executed in this
function and only the remainig code needed to be executed is
returned to define the reload logic.
str, str: init_inject_str, init_str - if return_initstr is True then
also returns init_str defining the from imports.
Ignore:
ignore_startswith = []
ignore_endswith = []
check_not_imported = True
verbose = True
"""
if verbose:
print('[UTIL_IMPORT] Running Dynamic Imports for modname=%r ' % modname)
# Get the module that will be imported into
try:
module = sys.modules[modname]
except:
module = __import__(modname)
# List of modules to be imported
imports = [tup[0] for tup in import_tuples]
# Import the modules
__excecute_imports(module, modname, imports, verbose=verbose)
# If developing do explicit import stars
if developing:
from_imports = __execute_fromimport_star(module, modname, import_tuples,
ignore_list=ignore_list,
ignore_startswith=ignore_startswith,
ignore_endswith=ignore_endswith,
check_not_imported=check_not_imported,
verbose=verbose)
else:
from_imports = __execute_fromimport(module, modname, import_tuples, verbose=verbose)
inject_execstr = _inject_execstr(modname, import_tuples)
# If requested: print what the __init__ module should look like
dump_requested = (('--dump-%s-init' % modname) in sys.argv or
('--print-%s-init' % modname) in sys.argv) or dump
overwrite_requested = ('--update-%s-init' % modname) in sys.argv
if verbose:
print('[UTIL_IMPORT] Finished Dynamic Imports for modname=%r ' % modname)
if dump_requested:
is_main_proc = multiprocessing.current_process().name == 'MainProcess'
if is_main_proc:
from utool import util_str
initstr = _initstr(modname, imports, from_imports, inject_execstr)
print(util_str.indent(initstr))
# Overwrite the __init__.py file with new explicit imports
if overwrite_requested:
"""
SeeAlso:
util_inject.inject_python_code
util_str.replace_between_tags
"""
is_main_proc = multiprocessing.current_process().name == 'MainProcess'
if is_main_proc:
from utool import util_str
from os.path import join, exists
initstr = _initstr(modname, imports, from_imports, inject_execstr, withheader=False)
new_else = util_str.indent(initstr)
#print(new_else)
# Get path to init file so we can overwrite it
init_fpath = join(module.__path__[0], '__init__.py')
print('attempting to update: %r' % init_fpath)
assert exists(init_fpath)
new_lines = []
editing = False
updated = False
#start_tag = '# <AUTOGEN_INIT>'
#end_tag = '# </AUTOGEN_INIT>'
with open(init_fpath, 'r') as file_:
#text = file_.read()
lines = file_.readlines()
for line in lines:
if not editing:
new_lines.append(line)
if line.strip().startswith('# <AUTOGEN_INIT>'):
new_lines.append('\n' + new_else + '\n # </AUTOGEN_INIT>\n')
editing = True
updated = True
if line.strip().startswith('# </AUTOGEN_INIT>'):
editing = False
# TODO:
#new_text = util_str.replace_between_tags(text, new_else, start_tag, end_tag)
if updated:
print('writing updated file: %r' % init_fpath)
new_text = ''.join(new_lines)
with open(init_fpath, 'w') as file_:
file_.write(new_text)
else:
print('no write hook for file: %r' % init_fpath)
if return_initstr:
initstr = _initstr(modname, imports, from_imports, '', withheader=False)
return inject_execstr, initstr
else:
return inject_execstr | python | def dynamic_import(modname, import_tuples, developing=True, ignore_froms=[],
dump=False, ignore_startswith=[], ignore_endswith=[],
ignore_list=[], check_not_imported=True, return_initstr=False,
verbose=False):
"""
MAIN ENTRY POINT
Dynamically import listed util libraries and their attributes.
Create reload_subs function.
Using __import__ like this is typically not considered good style However,
it is better than import * and this will generate the good file text that
can be used when the module is 'frozen"
Returns:
str: init_inject_str - by default all imports are executed in this
function and only the remainig code needed to be executed is
returned to define the reload logic.
str, str: init_inject_str, init_str - if return_initstr is True then
also returns init_str defining the from imports.
Ignore:
ignore_startswith = []
ignore_endswith = []
check_not_imported = True
verbose = True
"""
if verbose:
print('[UTIL_IMPORT] Running Dynamic Imports for modname=%r ' % modname)
# Get the module that will be imported into
try:
module = sys.modules[modname]
except:
module = __import__(modname)
# List of modules to be imported
imports = [tup[0] for tup in import_tuples]
# Import the modules
__excecute_imports(module, modname, imports, verbose=verbose)
# If developing do explicit import stars
if developing:
from_imports = __execute_fromimport_star(module, modname, import_tuples,
ignore_list=ignore_list,
ignore_startswith=ignore_startswith,
ignore_endswith=ignore_endswith,
check_not_imported=check_not_imported,
verbose=verbose)
else:
from_imports = __execute_fromimport(module, modname, import_tuples, verbose=verbose)
inject_execstr = _inject_execstr(modname, import_tuples)
# If requested: print what the __init__ module should look like
dump_requested = (('--dump-%s-init' % modname) in sys.argv or
('--print-%s-init' % modname) in sys.argv) or dump
overwrite_requested = ('--update-%s-init' % modname) in sys.argv
if verbose:
print('[UTIL_IMPORT] Finished Dynamic Imports for modname=%r ' % modname)
if dump_requested:
is_main_proc = multiprocessing.current_process().name == 'MainProcess'
if is_main_proc:
from utool import util_str
initstr = _initstr(modname, imports, from_imports, inject_execstr)
print(util_str.indent(initstr))
# Overwrite the __init__.py file with new explicit imports
if overwrite_requested:
"""
SeeAlso:
util_inject.inject_python_code
util_str.replace_between_tags
"""
is_main_proc = multiprocessing.current_process().name == 'MainProcess'
if is_main_proc:
from utool import util_str
from os.path import join, exists
initstr = _initstr(modname, imports, from_imports, inject_execstr, withheader=False)
new_else = util_str.indent(initstr)
#print(new_else)
# Get path to init file so we can overwrite it
init_fpath = join(module.__path__[0], '__init__.py')
print('attempting to update: %r' % init_fpath)
assert exists(init_fpath)
new_lines = []
editing = False
updated = False
#start_tag = '# <AUTOGEN_INIT>'
#end_tag = '# </AUTOGEN_INIT>'
with open(init_fpath, 'r') as file_:
#text = file_.read()
lines = file_.readlines()
for line in lines:
if not editing:
new_lines.append(line)
if line.strip().startswith('# <AUTOGEN_INIT>'):
new_lines.append('\n' + new_else + '\n # </AUTOGEN_INIT>\n')
editing = True
updated = True
if line.strip().startswith('# </AUTOGEN_INIT>'):
editing = False
# TODO:
#new_text = util_str.replace_between_tags(text, new_else, start_tag, end_tag)
if updated:
print('writing updated file: %r' % init_fpath)
new_text = ''.join(new_lines)
with open(init_fpath, 'w') as file_:
file_.write(new_text)
else:
print('no write hook for file: %r' % init_fpath)
if return_initstr:
initstr = _initstr(modname, imports, from_imports, '', withheader=False)
return inject_execstr, initstr
else:
return inject_execstr | [
"def",
"dynamic_import",
"(",
"modname",
",",
"import_tuples",
",",
"developing",
"=",
"True",
",",
"ignore_froms",
"=",
"[",
"]",
",",
"dump",
"=",
"False",
",",
"ignore_startswith",
"=",
"[",
"]",
",",
"ignore_endswith",
"=",
"[",
"]",
",",
"ignore_list",
"=",
"[",
"]",
",",
"check_not_imported",
"=",
"True",
",",
"return_initstr",
"=",
"False",
",",
"verbose",
"=",
"False",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"'[UTIL_IMPORT] Running Dynamic Imports for modname=%r '",
"%",
"modname",
")",
"# Get the module that will be imported into",
"try",
":",
"module",
"=",
"sys",
".",
"modules",
"[",
"modname",
"]",
"except",
":",
"module",
"=",
"__import__",
"(",
"modname",
")",
"# List of modules to be imported",
"imports",
"=",
"[",
"tup",
"[",
"0",
"]",
"for",
"tup",
"in",
"import_tuples",
"]",
"# Import the modules",
"__excecute_imports",
"(",
"module",
",",
"modname",
",",
"imports",
",",
"verbose",
"=",
"verbose",
")",
"# If developing do explicit import stars",
"if",
"developing",
":",
"from_imports",
"=",
"__execute_fromimport_star",
"(",
"module",
",",
"modname",
",",
"import_tuples",
",",
"ignore_list",
"=",
"ignore_list",
",",
"ignore_startswith",
"=",
"ignore_startswith",
",",
"ignore_endswith",
"=",
"ignore_endswith",
",",
"check_not_imported",
"=",
"check_not_imported",
",",
"verbose",
"=",
"verbose",
")",
"else",
":",
"from_imports",
"=",
"__execute_fromimport",
"(",
"module",
",",
"modname",
",",
"import_tuples",
",",
"verbose",
"=",
"verbose",
")",
"inject_execstr",
"=",
"_inject_execstr",
"(",
"modname",
",",
"import_tuples",
")",
"# If requested: print what the __init__ module should look like",
"dump_requested",
"=",
"(",
"(",
"'--dump-%s-init'",
"%",
"modname",
")",
"in",
"sys",
".",
"argv",
"or",
"(",
"'--print-%s-init'",
"%",
"modname",
")",
"in",
"sys",
".",
"argv",
")",
"or",
"dump",
"overwrite_requested",
"=",
"(",
"'--update-%s-init'",
"%",
"modname",
")",
"in",
"sys",
".",
"argv",
"if",
"verbose",
":",
"print",
"(",
"'[UTIL_IMPORT] Finished Dynamic Imports for modname=%r '",
"%",
"modname",
")",
"if",
"dump_requested",
":",
"is_main_proc",
"=",
"multiprocessing",
".",
"current_process",
"(",
")",
".",
"name",
"==",
"'MainProcess'",
"if",
"is_main_proc",
":",
"from",
"utool",
"import",
"util_str",
"initstr",
"=",
"_initstr",
"(",
"modname",
",",
"imports",
",",
"from_imports",
",",
"inject_execstr",
")",
"print",
"(",
"util_str",
".",
"indent",
"(",
"initstr",
")",
")",
"# Overwrite the __init__.py file with new explicit imports",
"if",
"overwrite_requested",
":",
"\"\"\"\n SeeAlso:\n util_inject.inject_python_code\n util_str.replace_between_tags\n \"\"\"",
"is_main_proc",
"=",
"multiprocessing",
".",
"current_process",
"(",
")",
".",
"name",
"==",
"'MainProcess'",
"if",
"is_main_proc",
":",
"from",
"utool",
"import",
"util_str",
"from",
"os",
".",
"path",
"import",
"join",
",",
"exists",
"initstr",
"=",
"_initstr",
"(",
"modname",
",",
"imports",
",",
"from_imports",
",",
"inject_execstr",
",",
"withheader",
"=",
"False",
")",
"new_else",
"=",
"util_str",
".",
"indent",
"(",
"initstr",
")",
"#print(new_else)",
"# Get path to init file so we can overwrite it",
"init_fpath",
"=",
"join",
"(",
"module",
".",
"__path__",
"[",
"0",
"]",
",",
"'__init__.py'",
")",
"print",
"(",
"'attempting to update: %r'",
"%",
"init_fpath",
")",
"assert",
"exists",
"(",
"init_fpath",
")",
"new_lines",
"=",
"[",
"]",
"editing",
"=",
"False",
"updated",
"=",
"False",
"#start_tag = '# <AUTOGEN_INIT>'",
"#end_tag = '# </AUTOGEN_INIT>'",
"with",
"open",
"(",
"init_fpath",
",",
"'r'",
")",
"as",
"file_",
":",
"#text = file_.read()",
"lines",
"=",
"file_",
".",
"readlines",
"(",
")",
"for",
"line",
"in",
"lines",
":",
"if",
"not",
"editing",
":",
"new_lines",
".",
"append",
"(",
"line",
")",
"if",
"line",
".",
"strip",
"(",
")",
".",
"startswith",
"(",
"'# <AUTOGEN_INIT>'",
")",
":",
"new_lines",
".",
"append",
"(",
"'\\n'",
"+",
"new_else",
"+",
"'\\n # </AUTOGEN_INIT>\\n'",
")",
"editing",
"=",
"True",
"updated",
"=",
"True",
"if",
"line",
".",
"strip",
"(",
")",
".",
"startswith",
"(",
"'# </AUTOGEN_INIT>'",
")",
":",
"editing",
"=",
"False",
"# TODO:",
"#new_text = util_str.replace_between_tags(text, new_else, start_tag, end_tag)",
"if",
"updated",
":",
"print",
"(",
"'writing updated file: %r'",
"%",
"init_fpath",
")",
"new_text",
"=",
"''",
".",
"join",
"(",
"new_lines",
")",
"with",
"open",
"(",
"init_fpath",
",",
"'w'",
")",
"as",
"file_",
":",
"file_",
".",
"write",
"(",
"new_text",
")",
"else",
":",
"print",
"(",
"'no write hook for file: %r'",
"%",
"init_fpath",
")",
"if",
"return_initstr",
":",
"initstr",
"=",
"_initstr",
"(",
"modname",
",",
"imports",
",",
"from_imports",
",",
"''",
",",
"withheader",
"=",
"False",
")",
"return",
"inject_execstr",
",",
"initstr",
"else",
":",
"return",
"inject_execstr"
] | MAIN ENTRY POINT
Dynamically import listed util libraries and their attributes.
Create reload_subs function.
Using __import__ like this is typically not considered good style However,
it is better than import * and this will generate the good file text that
can be used when the module is 'frozen"
Returns:
str: init_inject_str - by default all imports are executed in this
function and only the remainig code needed to be executed is
returned to define the reload logic.
str, str: init_inject_str, init_str - if return_initstr is True then
also returns init_str defining the from imports.
Ignore:
ignore_startswith = []
ignore_endswith = []
check_not_imported = True
verbose = True | [
"MAIN",
"ENTRY",
"POINT"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/_internal/util_importer.py#L294-L407 | train |
Erotemic/utool | utool/_internal/util_importer.py | make_initstr | def make_initstr(modname, import_tuples, verbose=False):
"""
Just creates the string representation. Does no importing.
"""
imports = [tup[0] for tup in import_tuples]
from_imports = __get_from_imports(import_tuples)
inject_execstr = _inject_execstr(modname, import_tuples)
return _initstr(modname, imports, from_imports, inject_execstr) | python | def make_initstr(modname, import_tuples, verbose=False):
"""
Just creates the string representation. Does no importing.
"""
imports = [tup[0] for tup in import_tuples]
from_imports = __get_from_imports(import_tuples)
inject_execstr = _inject_execstr(modname, import_tuples)
return _initstr(modname, imports, from_imports, inject_execstr) | [
"def",
"make_initstr",
"(",
"modname",
",",
"import_tuples",
",",
"verbose",
"=",
"False",
")",
":",
"imports",
"=",
"[",
"tup",
"[",
"0",
"]",
"for",
"tup",
"in",
"import_tuples",
"]",
"from_imports",
"=",
"__get_from_imports",
"(",
"import_tuples",
")",
"inject_execstr",
"=",
"_inject_execstr",
"(",
"modname",
",",
"import_tuples",
")",
"return",
"_initstr",
"(",
"modname",
",",
"imports",
",",
"from_imports",
",",
"inject_execstr",
")"
] | Just creates the string representation. Does no importing. | [
"Just",
"creates",
"the",
"string",
"representation",
".",
"Does",
"no",
"importing",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/_internal/util_importer.py#L410-L417 | train |
Erotemic/utool | utool/_internal/util_importer.py | make_import_tuples | def make_import_tuples(module_path, exclude_modnames=[]):
""" Infer the import_tuples from a module_path """
from utool import util_path
kwargs = dict(private=False, full=False)
module_list = util_path.ls_modulefiles(module_path, noext=True, **kwargs)
package_list = util_path.ls_moduledirs(module_path, **kwargs)
exclude_set = set(exclude_modnames)
module_import_tuples = [(modname, None) for modname in module_list
if modname not in exclude_set]
package_import_tuples = [(modname, None, True) for modname in package_list
if modname not in exclude_set]
import_tuples = (module_import_tuples + package_import_tuples)
return import_tuples | python | def make_import_tuples(module_path, exclude_modnames=[]):
""" Infer the import_tuples from a module_path """
from utool import util_path
kwargs = dict(private=False, full=False)
module_list = util_path.ls_modulefiles(module_path, noext=True, **kwargs)
package_list = util_path.ls_moduledirs(module_path, **kwargs)
exclude_set = set(exclude_modnames)
module_import_tuples = [(modname, None) for modname in module_list
if modname not in exclude_set]
package_import_tuples = [(modname, None, True) for modname in package_list
if modname not in exclude_set]
import_tuples = (module_import_tuples + package_import_tuples)
return import_tuples | [
"def",
"make_import_tuples",
"(",
"module_path",
",",
"exclude_modnames",
"=",
"[",
"]",
")",
":",
"from",
"utool",
"import",
"util_path",
"kwargs",
"=",
"dict",
"(",
"private",
"=",
"False",
",",
"full",
"=",
"False",
")",
"module_list",
"=",
"util_path",
".",
"ls_modulefiles",
"(",
"module_path",
",",
"noext",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"package_list",
"=",
"util_path",
".",
"ls_moduledirs",
"(",
"module_path",
",",
"*",
"*",
"kwargs",
")",
"exclude_set",
"=",
"set",
"(",
"exclude_modnames",
")",
"module_import_tuples",
"=",
"[",
"(",
"modname",
",",
"None",
")",
"for",
"modname",
"in",
"module_list",
"if",
"modname",
"not",
"in",
"exclude_set",
"]",
"package_import_tuples",
"=",
"[",
"(",
"modname",
",",
"None",
",",
"True",
")",
"for",
"modname",
"in",
"package_list",
"if",
"modname",
"not",
"in",
"exclude_set",
"]",
"import_tuples",
"=",
"(",
"module_import_tuples",
"+",
"package_import_tuples",
")",
"return",
"import_tuples"
] | Infer the import_tuples from a module_path | [
"Infer",
"the",
"import_tuples",
"from",
"a",
"module_path"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/_internal/util_importer.py#L420-L432 | train |
Erotemic/utool | utool/_internal/meta_util_cplat.py | get_resource_dir | def get_resource_dir():
"""
Returns a directory which should be writable for any application
"""
#resource_prefix = '~'
if WIN32:
dpath_ = '~/AppData/Roaming'
elif LINUX:
dpath_ = '~/.config'
elif DARWIN:
dpath_ = '~/Library/Application Support'
else:
raise AssertionError('unknown os')
dpath = normpath(expanduser(dpath_))
return dpath | python | def get_resource_dir():
"""
Returns a directory which should be writable for any application
"""
#resource_prefix = '~'
if WIN32:
dpath_ = '~/AppData/Roaming'
elif LINUX:
dpath_ = '~/.config'
elif DARWIN:
dpath_ = '~/Library/Application Support'
else:
raise AssertionError('unknown os')
dpath = normpath(expanduser(dpath_))
return dpath | [
"def",
"get_resource_dir",
"(",
")",
":",
"#resource_prefix = '~'",
"if",
"WIN32",
":",
"dpath_",
"=",
"'~/AppData/Roaming'",
"elif",
"LINUX",
":",
"dpath_",
"=",
"'~/.config'",
"elif",
"DARWIN",
":",
"dpath_",
"=",
"'~/Library/Application Support'",
"else",
":",
"raise",
"AssertionError",
"(",
"'unknown os'",
")",
"dpath",
"=",
"normpath",
"(",
"expanduser",
"(",
"dpath_",
")",
")",
"return",
"dpath"
] | Returns a directory which should be writable for any application | [
"Returns",
"a",
"directory",
"which",
"should",
"be",
"writable",
"for",
"any",
"application"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/_internal/meta_util_cplat.py#L16-L30 | train |
Erotemic/utool | utool/util_io.py | load_data | def load_data(fpath, **kwargs):
""" More generic interface to load data """
ext = splitext(fpath)[1]
if ext in ['.pickle', '.cPkl', '.pkl']:
return load_cPkl(fpath, **kwargs)
elif ext in ['.json']:
return load_json(fpath, **kwargs)
elif ext in ['.hdf5']:
return load_hdf5(fpath, **kwargs)
elif ext in ['.txt']:
return load_text(fpath, **kwargs)
elif HAS_NUMPY and ext in ['.npz', '.npy']:
return load_numpy(fpath, **kwargs)
else:
assert False, 'unknown ext=%r for fpath=%r' % (ext, fpath) | python | def load_data(fpath, **kwargs):
""" More generic interface to load data """
ext = splitext(fpath)[1]
if ext in ['.pickle', '.cPkl', '.pkl']:
return load_cPkl(fpath, **kwargs)
elif ext in ['.json']:
return load_json(fpath, **kwargs)
elif ext in ['.hdf5']:
return load_hdf5(fpath, **kwargs)
elif ext in ['.txt']:
return load_text(fpath, **kwargs)
elif HAS_NUMPY and ext in ['.npz', '.npy']:
return load_numpy(fpath, **kwargs)
else:
assert False, 'unknown ext=%r for fpath=%r' % (ext, fpath) | [
"def",
"load_data",
"(",
"fpath",
",",
"*",
"*",
"kwargs",
")",
":",
"ext",
"=",
"splitext",
"(",
"fpath",
")",
"[",
"1",
"]",
"if",
"ext",
"in",
"[",
"'.pickle'",
",",
"'.cPkl'",
",",
"'.pkl'",
"]",
":",
"return",
"load_cPkl",
"(",
"fpath",
",",
"*",
"*",
"kwargs",
")",
"elif",
"ext",
"in",
"[",
"'.json'",
"]",
":",
"return",
"load_json",
"(",
"fpath",
",",
"*",
"*",
"kwargs",
")",
"elif",
"ext",
"in",
"[",
"'.hdf5'",
"]",
":",
"return",
"load_hdf5",
"(",
"fpath",
",",
"*",
"*",
"kwargs",
")",
"elif",
"ext",
"in",
"[",
"'.txt'",
"]",
":",
"return",
"load_text",
"(",
"fpath",
",",
"*",
"*",
"kwargs",
")",
"elif",
"HAS_NUMPY",
"and",
"ext",
"in",
"[",
"'.npz'",
",",
"'.npy'",
"]",
":",
"return",
"load_numpy",
"(",
"fpath",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"assert",
"False",
",",
"'unknown ext=%r for fpath=%r'",
"%",
"(",
"ext",
",",
"fpath",
")"
] | More generic interface to load data | [
"More",
"generic",
"interface",
"to",
"load",
"data"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_io.py#L32-L46 | train |
Erotemic/utool | utool/util_io.py | save_data | def save_data(fpath, data, **kwargs):
""" More generic interface to write data """
ext = splitext(fpath)[1]
if ext in ['.pickle', '.cPkl', '.pkl']:
return save_cPkl(fpath, data, **kwargs)
elif ext in ['.json']:
return save_json(fpath, data, **kwargs)
elif ext in ['.hdf5']:
return save_hdf5(fpath, data, **kwargs)
elif ext in ['.txt']:
return save_text(fpath, **kwargs)
elif HAS_NUMPY and ext in ['.npz', '.npy']:
return save_numpy(fpath, data, **kwargs)
else:
assert False, 'unknown ext=%r for fpath=%r' % (ext, fpath) | python | def save_data(fpath, data, **kwargs):
""" More generic interface to write data """
ext = splitext(fpath)[1]
if ext in ['.pickle', '.cPkl', '.pkl']:
return save_cPkl(fpath, data, **kwargs)
elif ext in ['.json']:
return save_json(fpath, data, **kwargs)
elif ext in ['.hdf5']:
return save_hdf5(fpath, data, **kwargs)
elif ext in ['.txt']:
return save_text(fpath, **kwargs)
elif HAS_NUMPY and ext in ['.npz', '.npy']:
return save_numpy(fpath, data, **kwargs)
else:
assert False, 'unknown ext=%r for fpath=%r' % (ext, fpath) | [
"def",
"save_data",
"(",
"fpath",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
":",
"ext",
"=",
"splitext",
"(",
"fpath",
")",
"[",
"1",
"]",
"if",
"ext",
"in",
"[",
"'.pickle'",
",",
"'.cPkl'",
",",
"'.pkl'",
"]",
":",
"return",
"save_cPkl",
"(",
"fpath",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
"elif",
"ext",
"in",
"[",
"'.json'",
"]",
":",
"return",
"save_json",
"(",
"fpath",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
"elif",
"ext",
"in",
"[",
"'.hdf5'",
"]",
":",
"return",
"save_hdf5",
"(",
"fpath",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
"elif",
"ext",
"in",
"[",
"'.txt'",
"]",
":",
"return",
"save_text",
"(",
"fpath",
",",
"*",
"*",
"kwargs",
")",
"elif",
"HAS_NUMPY",
"and",
"ext",
"in",
"[",
"'.npz'",
",",
"'.npy'",
"]",
":",
"return",
"save_numpy",
"(",
"fpath",
",",
"data",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"assert",
"False",
",",
"'unknown ext=%r for fpath=%r'",
"%",
"(",
"ext",
",",
"fpath",
")"
] | More generic interface to write data | [
"More",
"generic",
"interface",
"to",
"write",
"data"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_io.py#L49-L63 | train |
Erotemic/utool | utool/util_io.py | write_to | def write_to(fpath, to_write, aslines=False, verbose=None,
onlyifdiff=False, mode='w', n=None):
""" Writes text to a file. Automatically encodes text as utf8.
Args:
fpath (str): file path
to_write (str): text to write (must be unicode text)
aslines (bool): if True to_write is assumed to be a list of lines
verbose (bool): verbosity flag
onlyifdiff (bool): only writes if needed!
checks hash of to_write vs the hash of the contents of fpath
mode (unicode): (default = u'w')
n (int): (default = 2)
CommandLine:
python -m utool.util_io --exec-write_to --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_io import * # NOQA
>>> import utool as ut
>>> fpath = ut.unixjoin(ut.get_app_resource_dir('utool'), 'testwrite.txt')
>>> ut.delete(fpath)
>>> to_write = 'utf-8 symbols Δ, Й, ק, م, ๗, あ, 叶, 葉, and 말.'
>>> aslines = False
>>> verbose = True
>>> onlyifdiff = False
>>> mode = u'w'
>>> n = 2
>>> write_to(fpath, to_write, aslines, verbose, onlyifdiff, mode, n)
>>> read_ = ut.read_from(fpath)
>>> print('read_ = ' + read_)
>>> print('to_write = ' + to_write)
>>> assert read_ == to_write
"""
if onlyifdiff:
import utool as ut
if ut.hashstr(read_from(fpath)) == ut.hashstr(to_write):
print('[util_io] * no difference')
return
verbose = _rectify_verb_write(verbose)
if verbose:
# n = None if verbose > 1 else 2
# print('[util_io] * Writing to text file: %r ' % util_path.tail(fpath, n=n))
print('[util_io] * Writing to text file: {}'.format(fpath))
backup = False and exists(fpath)
if backup:
util_path.copy(fpath, fpath + '.backup')
if not isinstance(fpath, six.string_types):
# Assuming a file object with a name attribute
# Should just read from the file
fpath = fpath.name
with open(fpath, mode) as file_:
if aslines:
file_.writelines(to_write)
else:
# Ensure python2 writes in bytes
if six.PY2:
if isinstance(to_write, unicode): # NOQA
to_write = to_write.encode('utf8')
try:
file_.write(to_write)
except UnicodeEncodeError as ex:
start = max(ex.args[2] - 10, 0)
end = ex.args[3] + 10
context = to_write[start:end]
print(repr(context))
print(context)
from utool import util_dbg
util_dbg.printex(ex, keys=[(type, 'to_write')])
file_.close()
if backup:
# restore
util_path.copy(fpath + '.backup', fpath)
# import utool
# utool.embed()
raise | python | def write_to(fpath, to_write, aslines=False, verbose=None,
onlyifdiff=False, mode='w', n=None):
""" Writes text to a file. Automatically encodes text as utf8.
Args:
fpath (str): file path
to_write (str): text to write (must be unicode text)
aslines (bool): if True to_write is assumed to be a list of lines
verbose (bool): verbosity flag
onlyifdiff (bool): only writes if needed!
checks hash of to_write vs the hash of the contents of fpath
mode (unicode): (default = u'w')
n (int): (default = 2)
CommandLine:
python -m utool.util_io --exec-write_to --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_io import * # NOQA
>>> import utool as ut
>>> fpath = ut.unixjoin(ut.get_app_resource_dir('utool'), 'testwrite.txt')
>>> ut.delete(fpath)
>>> to_write = 'utf-8 symbols Δ, Й, ק, م, ๗, あ, 叶, 葉, and 말.'
>>> aslines = False
>>> verbose = True
>>> onlyifdiff = False
>>> mode = u'w'
>>> n = 2
>>> write_to(fpath, to_write, aslines, verbose, onlyifdiff, mode, n)
>>> read_ = ut.read_from(fpath)
>>> print('read_ = ' + read_)
>>> print('to_write = ' + to_write)
>>> assert read_ == to_write
"""
if onlyifdiff:
import utool as ut
if ut.hashstr(read_from(fpath)) == ut.hashstr(to_write):
print('[util_io] * no difference')
return
verbose = _rectify_verb_write(verbose)
if verbose:
# n = None if verbose > 1 else 2
# print('[util_io] * Writing to text file: %r ' % util_path.tail(fpath, n=n))
print('[util_io] * Writing to text file: {}'.format(fpath))
backup = False and exists(fpath)
if backup:
util_path.copy(fpath, fpath + '.backup')
if not isinstance(fpath, six.string_types):
# Assuming a file object with a name attribute
# Should just read from the file
fpath = fpath.name
with open(fpath, mode) as file_:
if aslines:
file_.writelines(to_write)
else:
# Ensure python2 writes in bytes
if six.PY2:
if isinstance(to_write, unicode): # NOQA
to_write = to_write.encode('utf8')
try:
file_.write(to_write)
except UnicodeEncodeError as ex:
start = max(ex.args[2] - 10, 0)
end = ex.args[3] + 10
context = to_write[start:end]
print(repr(context))
print(context)
from utool import util_dbg
util_dbg.printex(ex, keys=[(type, 'to_write')])
file_.close()
if backup:
# restore
util_path.copy(fpath + '.backup', fpath)
# import utool
# utool.embed()
raise | [
"def",
"write_to",
"(",
"fpath",
",",
"to_write",
",",
"aslines",
"=",
"False",
",",
"verbose",
"=",
"None",
",",
"onlyifdiff",
"=",
"False",
",",
"mode",
"=",
"'w'",
",",
"n",
"=",
"None",
")",
":",
"if",
"onlyifdiff",
":",
"import",
"utool",
"as",
"ut",
"if",
"ut",
".",
"hashstr",
"(",
"read_from",
"(",
"fpath",
")",
")",
"==",
"ut",
".",
"hashstr",
"(",
"to_write",
")",
":",
"print",
"(",
"'[util_io] * no difference'",
")",
"return",
"verbose",
"=",
"_rectify_verb_write",
"(",
"verbose",
")",
"if",
"verbose",
":",
"# n = None if verbose > 1 else 2",
"# print('[util_io] * Writing to text file: %r ' % util_path.tail(fpath, n=n))",
"print",
"(",
"'[util_io] * Writing to text file: {}'",
".",
"format",
"(",
"fpath",
")",
")",
"backup",
"=",
"False",
"and",
"exists",
"(",
"fpath",
")",
"if",
"backup",
":",
"util_path",
".",
"copy",
"(",
"fpath",
",",
"fpath",
"+",
"'.backup'",
")",
"if",
"not",
"isinstance",
"(",
"fpath",
",",
"six",
".",
"string_types",
")",
":",
"# Assuming a file object with a name attribute",
"# Should just read from the file",
"fpath",
"=",
"fpath",
".",
"name",
"with",
"open",
"(",
"fpath",
",",
"mode",
")",
"as",
"file_",
":",
"if",
"aslines",
":",
"file_",
".",
"writelines",
"(",
"to_write",
")",
"else",
":",
"# Ensure python2 writes in bytes",
"if",
"six",
".",
"PY2",
":",
"if",
"isinstance",
"(",
"to_write",
",",
"unicode",
")",
":",
"# NOQA",
"to_write",
"=",
"to_write",
".",
"encode",
"(",
"'utf8'",
")",
"try",
":",
"file_",
".",
"write",
"(",
"to_write",
")",
"except",
"UnicodeEncodeError",
"as",
"ex",
":",
"start",
"=",
"max",
"(",
"ex",
".",
"args",
"[",
"2",
"]",
"-",
"10",
",",
"0",
")",
"end",
"=",
"ex",
".",
"args",
"[",
"3",
"]",
"+",
"10",
"context",
"=",
"to_write",
"[",
"start",
":",
"end",
"]",
"print",
"(",
"repr",
"(",
"context",
")",
")",
"print",
"(",
"context",
")",
"from",
"utool",
"import",
"util_dbg",
"util_dbg",
".",
"printex",
"(",
"ex",
",",
"keys",
"=",
"[",
"(",
"type",
",",
"'to_write'",
")",
"]",
")",
"file_",
".",
"close",
"(",
")",
"if",
"backup",
":",
"# restore",
"util_path",
".",
"copy",
"(",
"fpath",
"+",
"'.backup'",
",",
"fpath",
")",
"# import utool",
"# utool.embed()",
"raise"
] | Writes text to a file. Automatically encodes text as utf8.
Args:
fpath (str): file path
to_write (str): text to write (must be unicode text)
aslines (bool): if True to_write is assumed to be a list of lines
verbose (bool): verbosity flag
onlyifdiff (bool): only writes if needed!
checks hash of to_write vs the hash of the contents of fpath
mode (unicode): (default = u'w')
n (int): (default = 2)
CommandLine:
python -m utool.util_io --exec-write_to --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_io import * # NOQA
>>> import utool as ut
>>> fpath = ut.unixjoin(ut.get_app_resource_dir('utool'), 'testwrite.txt')
>>> ut.delete(fpath)
>>> to_write = 'utf-8 symbols Δ, Й, ק, م, ๗, あ, 叶, 葉, and 말.'
>>> aslines = False
>>> verbose = True
>>> onlyifdiff = False
>>> mode = u'w'
>>> n = 2
>>> write_to(fpath, to_write, aslines, verbose, onlyifdiff, mode, n)
>>> read_ = ut.read_from(fpath)
>>> print('read_ = ' + read_)
>>> print('to_write = ' + to_write)
>>> assert read_ == to_write | [
"Writes",
"text",
"to",
"a",
"file",
".",
"Automatically",
"encodes",
"text",
"as",
"utf8",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_io.py#L82-L161 | train |
Erotemic/utool | utool/util_io.py | read_from | def read_from(fpath, verbose=None, aslines=False, strict=True, n=None, errors='replace'):
r""" Reads text from a file. Automatically returns utf8.
Args:
fpath (str): file path
aslines (bool): if True returns list of lines
verbose (bool): verbosity flag
Returns:
str: text from fpath (this is unicode)
Ignore:
x = b'''/whaleshark_003_fors\xc3\xb8g.wmv" />\r\n'''
ut.writeto('foo.txt', x)
y = ut.readfrom('foo.txt')
y.encode('utf8') == x
"""
if n is None:
n = __READ_TAIL_N__
verbose = _rectify_verb_read(verbose)
if verbose:
print('[util_io] * Reading text file: %r ' % util_path.tail(fpath, n=n))
try:
if not util_path.checkpath(fpath, verbose=verbose, n=n):
raise IOError('[io] * FILE DOES NOT EXIST!')
#with open(fpath, 'r') as file_:
with open(fpath, 'rb') as file_:
if aslines:
#text = file_.readlines()
if six.PY2:
# python2 writes in bytes, so read as bytes then convert to
# utf8
text = [line.decode('utf8', errors=errors)
for line in file_.readlines()]
else:
text = [line.decode('utf8', errors=errors)
for line in file_.readlines()]
#text = file_.readlines()
else:
# text = file_.read()
if six.PY2:
text = file_.read().decode('utf8', errors=errors)
else:
#text = file_.read()
text = file_.read().decode('utf8', errors=errors)
return text
except IOError as ex:
from utool import util_dbg
if verbose or strict:
util_dbg.printex(ex, ' * Error reading fpath=%r' %
util_path.tail(fpath, n=n), '[io]')
if strict:
raise | python | def read_from(fpath, verbose=None, aslines=False, strict=True, n=None, errors='replace'):
r""" Reads text from a file. Automatically returns utf8.
Args:
fpath (str): file path
aslines (bool): if True returns list of lines
verbose (bool): verbosity flag
Returns:
str: text from fpath (this is unicode)
Ignore:
x = b'''/whaleshark_003_fors\xc3\xb8g.wmv" />\r\n'''
ut.writeto('foo.txt', x)
y = ut.readfrom('foo.txt')
y.encode('utf8') == x
"""
if n is None:
n = __READ_TAIL_N__
verbose = _rectify_verb_read(verbose)
if verbose:
print('[util_io] * Reading text file: %r ' % util_path.tail(fpath, n=n))
try:
if not util_path.checkpath(fpath, verbose=verbose, n=n):
raise IOError('[io] * FILE DOES NOT EXIST!')
#with open(fpath, 'r') as file_:
with open(fpath, 'rb') as file_:
if aslines:
#text = file_.readlines()
if six.PY2:
# python2 writes in bytes, so read as bytes then convert to
# utf8
text = [line.decode('utf8', errors=errors)
for line in file_.readlines()]
else:
text = [line.decode('utf8', errors=errors)
for line in file_.readlines()]
#text = file_.readlines()
else:
# text = file_.read()
if six.PY2:
text = file_.read().decode('utf8', errors=errors)
else:
#text = file_.read()
text = file_.read().decode('utf8', errors=errors)
return text
except IOError as ex:
from utool import util_dbg
if verbose or strict:
util_dbg.printex(ex, ' * Error reading fpath=%r' %
util_path.tail(fpath, n=n), '[io]')
if strict:
raise | [
"def",
"read_from",
"(",
"fpath",
",",
"verbose",
"=",
"None",
",",
"aslines",
"=",
"False",
",",
"strict",
"=",
"True",
",",
"n",
"=",
"None",
",",
"errors",
"=",
"'replace'",
")",
":",
"if",
"n",
"is",
"None",
":",
"n",
"=",
"__READ_TAIL_N__",
"verbose",
"=",
"_rectify_verb_read",
"(",
"verbose",
")",
"if",
"verbose",
":",
"print",
"(",
"'[util_io] * Reading text file: %r '",
"%",
"util_path",
".",
"tail",
"(",
"fpath",
",",
"n",
"=",
"n",
")",
")",
"try",
":",
"if",
"not",
"util_path",
".",
"checkpath",
"(",
"fpath",
",",
"verbose",
"=",
"verbose",
",",
"n",
"=",
"n",
")",
":",
"raise",
"IOError",
"(",
"'[io] * FILE DOES NOT EXIST!'",
")",
"#with open(fpath, 'r') as file_:",
"with",
"open",
"(",
"fpath",
",",
"'rb'",
")",
"as",
"file_",
":",
"if",
"aslines",
":",
"#text = file_.readlines()",
"if",
"six",
".",
"PY2",
":",
"# python2 writes in bytes, so read as bytes then convert to",
"# utf8",
"text",
"=",
"[",
"line",
".",
"decode",
"(",
"'utf8'",
",",
"errors",
"=",
"errors",
")",
"for",
"line",
"in",
"file_",
".",
"readlines",
"(",
")",
"]",
"else",
":",
"text",
"=",
"[",
"line",
".",
"decode",
"(",
"'utf8'",
",",
"errors",
"=",
"errors",
")",
"for",
"line",
"in",
"file_",
".",
"readlines",
"(",
")",
"]",
"#text = file_.readlines()",
"else",
":",
"# text = file_.read()",
"if",
"six",
".",
"PY2",
":",
"text",
"=",
"file_",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf8'",
",",
"errors",
"=",
"errors",
")",
"else",
":",
"#text = file_.read()",
"text",
"=",
"file_",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf8'",
",",
"errors",
"=",
"errors",
")",
"return",
"text",
"except",
"IOError",
"as",
"ex",
":",
"from",
"utool",
"import",
"util_dbg",
"if",
"verbose",
"or",
"strict",
":",
"util_dbg",
".",
"printex",
"(",
"ex",
",",
"' * Error reading fpath=%r'",
"%",
"util_path",
".",
"tail",
"(",
"fpath",
",",
"n",
"=",
"n",
")",
",",
"'[io]'",
")",
"if",
"strict",
":",
"raise"
] | r""" Reads text from a file. Automatically returns utf8.
Args:
fpath (str): file path
aslines (bool): if True returns list of lines
verbose (bool): verbosity flag
Returns:
str: text from fpath (this is unicode)
Ignore:
x = b'''/whaleshark_003_fors\xc3\xb8g.wmv" />\r\n'''
ut.writeto('foo.txt', x)
y = ut.readfrom('foo.txt')
y.encode('utf8') == x | [
"r",
"Reads",
"text",
"from",
"a",
"file",
".",
"Automatically",
"returns",
"utf8",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_io.py#L164-L216 | train |
Erotemic/utool | utool/util_io.py | save_cPkl | def save_cPkl(fpath, data, verbose=None, n=None):
""" Saves data to a pickled file with optional verbosity """
verbose = _rectify_verb_write(verbose)
if verbose:
print('[util_io] * save_cPkl(%r, data)' % (util_path.tail(fpath, n=n),))
with open(fpath, 'wb') as file_:
# Use protocol 2 to support python2 and 3
pickle.dump(data, file_, protocol=2) | python | def save_cPkl(fpath, data, verbose=None, n=None):
""" Saves data to a pickled file with optional verbosity """
verbose = _rectify_verb_write(verbose)
if verbose:
print('[util_io] * save_cPkl(%r, data)' % (util_path.tail(fpath, n=n),))
with open(fpath, 'wb') as file_:
# Use protocol 2 to support python2 and 3
pickle.dump(data, file_, protocol=2) | [
"def",
"save_cPkl",
"(",
"fpath",
",",
"data",
",",
"verbose",
"=",
"None",
",",
"n",
"=",
"None",
")",
":",
"verbose",
"=",
"_rectify_verb_write",
"(",
"verbose",
")",
"if",
"verbose",
":",
"print",
"(",
"'[util_io] * save_cPkl(%r, data)'",
"%",
"(",
"util_path",
".",
"tail",
"(",
"fpath",
",",
"n",
"=",
"n",
")",
",",
")",
")",
"with",
"open",
"(",
"fpath",
",",
"'wb'",
")",
"as",
"file_",
":",
"# Use protocol 2 to support python2 and 3",
"pickle",
".",
"dump",
"(",
"data",
",",
"file_",
",",
"protocol",
"=",
"2",
")"
] | Saves data to a pickled file with optional verbosity | [
"Saves",
"data",
"to",
"a",
"pickled",
"file",
"with",
"optional",
"verbosity"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_io.py#L249-L256 | train |
Erotemic/utool | utool/util_io.py | load_cPkl | def load_cPkl(fpath, verbose=None, n=None):
"""
Loads a pickled file with optional verbosity.
Aims for compatibility between python2 and python3.
TestPickleExtentsSimple:
>>> def makedata_simple():
>>> data = np.empty((500, 2 ** 20), dtype=np.uint8) + 1
>>> return data
>>> memtrack = ut.MemoryTracker()
>>> # create a large amount of data
>>> data = makedata_simple()
>>> memtrack.report()
>>> print(ut.get_object_size_str(data))
>>> fpath = 'tmp.pkl'
>>> ut.save_cPkl(fpath, data)
>>> print(ut.get_file_nBytes_str('tmp.pkl'))
>>> #del data
>>> memtrack.collect()
>>> memtrack.report()
>>> data = ut.load_cPkl(fpath)
>>> memtrack.report()
TestPickleExtentsComplex:
>>> def makedata_complex():
>>> rng = np.random.RandomState(42)
>>> item1 = np.empty((100, 2 ** 20), dtype=np.uint8) + 1
>>> item2 = [np.empty((10, 2 ** 10), dtype=np.uint8) + 1
>>> for a in range(1000)]
>>> item3 = {a: np.empty(int(rng.rand() * 10), dtype=np.int16) + 1
>>> for a in range(100)}
>>> item4 = {np.int32(a): np.empty((int(rng.rand() * 10), 2), dtype=np.float64) + 1
>>> for a in range(200)}
>>> data = {'item1': item1, 'item2': item2,
>>> 'item3': item3, 'item4': item4}
>>> return data
>>> memtrack = ut.MemoryTracker()
>>> # create a large amount of data
>>> data = makedata_complex()
>>> memtrack.report()
>>> print(ut.get_object_size_str(data))
>>> fpath = 'tmp.pkl'
>>> ut.save_cPkl(fpath, data)
>>> print(ut.get_file_nBytes_str('tmp.pkl'))
>>> #del data
>>> memtrack.collect()
>>> memtrack.report()
>>> data2 = ut.load_cPkl(fpath)
>>> memtrack.report()
TestPickleCacher:
>>> memtrack = ut.MemoryTracker()
>>> cacher = ut.Cacher('tmp', cache_dir='.', cfgstr='foo')
>>> data3 = cacher.ensure(makedata_complex)
>>> memtrack.report()
>>> data4 = cacher.ensure(makedata_complex)
>>> memtrack.report()
>>> memtrack = ut.MemoryTracker()
>>> fpath = '/home/joncrall/Desktop/smkcache/inva_PZ_Master1VUUIDS((5616)vxihbjwtggyovrto)_vpgwpcafbjkkpjdf.cPkl'
>>> print(ut.get_file_nBytes_str(fpath))
>>> data = ut.load_cPkl(fpath)
>>> memtrack.report()
def makedata_complex():
data = np.empty((1000, 2 ** 20), dtype=np.uint8)
data[:] = 0
return data
"""
verbose = _rectify_verb_read(verbose)
if verbose:
print('[util_io] * load_cPkl(%r)' % (util_path.tail(fpath, n=n),))
try:
with open(fpath, 'rb') as file_:
data = pickle.load(file_)
except UnicodeDecodeError:
if six.PY3:
# try to open python2 pickle
with open(fpath, 'rb') as file_:
data = pickle.load(file_, encoding='latin1')
else:
raise
except ValueError as ex:
if six.PY2:
if ex.message == 'unsupported pickle protocol: 4':
raise ValueError(
'unsupported Python3 pickle protocol 4 '
'in Python2 for fpath=%r' % (fpath,))
else:
raise
else:
raise
return data | python | def load_cPkl(fpath, verbose=None, n=None):
"""
Loads a pickled file with optional verbosity.
Aims for compatibility between python2 and python3.
TestPickleExtentsSimple:
>>> def makedata_simple():
>>> data = np.empty((500, 2 ** 20), dtype=np.uint8) + 1
>>> return data
>>> memtrack = ut.MemoryTracker()
>>> # create a large amount of data
>>> data = makedata_simple()
>>> memtrack.report()
>>> print(ut.get_object_size_str(data))
>>> fpath = 'tmp.pkl'
>>> ut.save_cPkl(fpath, data)
>>> print(ut.get_file_nBytes_str('tmp.pkl'))
>>> #del data
>>> memtrack.collect()
>>> memtrack.report()
>>> data = ut.load_cPkl(fpath)
>>> memtrack.report()
TestPickleExtentsComplex:
>>> def makedata_complex():
>>> rng = np.random.RandomState(42)
>>> item1 = np.empty((100, 2 ** 20), dtype=np.uint8) + 1
>>> item2 = [np.empty((10, 2 ** 10), dtype=np.uint8) + 1
>>> for a in range(1000)]
>>> item3 = {a: np.empty(int(rng.rand() * 10), dtype=np.int16) + 1
>>> for a in range(100)}
>>> item4 = {np.int32(a): np.empty((int(rng.rand() * 10), 2), dtype=np.float64) + 1
>>> for a in range(200)}
>>> data = {'item1': item1, 'item2': item2,
>>> 'item3': item3, 'item4': item4}
>>> return data
>>> memtrack = ut.MemoryTracker()
>>> # create a large amount of data
>>> data = makedata_complex()
>>> memtrack.report()
>>> print(ut.get_object_size_str(data))
>>> fpath = 'tmp.pkl'
>>> ut.save_cPkl(fpath, data)
>>> print(ut.get_file_nBytes_str('tmp.pkl'))
>>> #del data
>>> memtrack.collect()
>>> memtrack.report()
>>> data2 = ut.load_cPkl(fpath)
>>> memtrack.report()
TestPickleCacher:
>>> memtrack = ut.MemoryTracker()
>>> cacher = ut.Cacher('tmp', cache_dir='.', cfgstr='foo')
>>> data3 = cacher.ensure(makedata_complex)
>>> memtrack.report()
>>> data4 = cacher.ensure(makedata_complex)
>>> memtrack.report()
>>> memtrack = ut.MemoryTracker()
>>> fpath = '/home/joncrall/Desktop/smkcache/inva_PZ_Master1VUUIDS((5616)vxihbjwtggyovrto)_vpgwpcafbjkkpjdf.cPkl'
>>> print(ut.get_file_nBytes_str(fpath))
>>> data = ut.load_cPkl(fpath)
>>> memtrack.report()
def makedata_complex():
data = np.empty((1000, 2 ** 20), dtype=np.uint8)
data[:] = 0
return data
"""
verbose = _rectify_verb_read(verbose)
if verbose:
print('[util_io] * load_cPkl(%r)' % (util_path.tail(fpath, n=n),))
try:
with open(fpath, 'rb') as file_:
data = pickle.load(file_)
except UnicodeDecodeError:
if six.PY3:
# try to open python2 pickle
with open(fpath, 'rb') as file_:
data = pickle.load(file_, encoding='latin1')
else:
raise
except ValueError as ex:
if six.PY2:
if ex.message == 'unsupported pickle protocol: 4':
raise ValueError(
'unsupported Python3 pickle protocol 4 '
'in Python2 for fpath=%r' % (fpath,))
else:
raise
else:
raise
return data | [
"def",
"load_cPkl",
"(",
"fpath",
",",
"verbose",
"=",
"None",
",",
"n",
"=",
"None",
")",
":",
"verbose",
"=",
"_rectify_verb_read",
"(",
"verbose",
")",
"if",
"verbose",
":",
"print",
"(",
"'[util_io] * load_cPkl(%r)'",
"%",
"(",
"util_path",
".",
"tail",
"(",
"fpath",
",",
"n",
"=",
"n",
")",
",",
")",
")",
"try",
":",
"with",
"open",
"(",
"fpath",
",",
"'rb'",
")",
"as",
"file_",
":",
"data",
"=",
"pickle",
".",
"load",
"(",
"file_",
")",
"except",
"UnicodeDecodeError",
":",
"if",
"six",
".",
"PY3",
":",
"# try to open python2 pickle",
"with",
"open",
"(",
"fpath",
",",
"'rb'",
")",
"as",
"file_",
":",
"data",
"=",
"pickle",
".",
"load",
"(",
"file_",
",",
"encoding",
"=",
"'latin1'",
")",
"else",
":",
"raise",
"except",
"ValueError",
"as",
"ex",
":",
"if",
"six",
".",
"PY2",
":",
"if",
"ex",
".",
"message",
"==",
"'unsupported pickle protocol: 4'",
":",
"raise",
"ValueError",
"(",
"'unsupported Python3 pickle protocol 4 '",
"'in Python2 for fpath=%r'",
"%",
"(",
"fpath",
",",
")",
")",
"else",
":",
"raise",
"else",
":",
"raise",
"return",
"data"
] | Loads a pickled file with optional verbosity.
Aims for compatibility between python2 and python3.
TestPickleExtentsSimple:
>>> def makedata_simple():
>>> data = np.empty((500, 2 ** 20), dtype=np.uint8) + 1
>>> return data
>>> memtrack = ut.MemoryTracker()
>>> # create a large amount of data
>>> data = makedata_simple()
>>> memtrack.report()
>>> print(ut.get_object_size_str(data))
>>> fpath = 'tmp.pkl'
>>> ut.save_cPkl(fpath, data)
>>> print(ut.get_file_nBytes_str('tmp.pkl'))
>>> #del data
>>> memtrack.collect()
>>> memtrack.report()
>>> data = ut.load_cPkl(fpath)
>>> memtrack.report()
TestPickleExtentsComplex:
>>> def makedata_complex():
>>> rng = np.random.RandomState(42)
>>> item1 = np.empty((100, 2 ** 20), dtype=np.uint8) + 1
>>> item2 = [np.empty((10, 2 ** 10), dtype=np.uint8) + 1
>>> for a in range(1000)]
>>> item3 = {a: np.empty(int(rng.rand() * 10), dtype=np.int16) + 1
>>> for a in range(100)}
>>> item4 = {np.int32(a): np.empty((int(rng.rand() * 10), 2), dtype=np.float64) + 1
>>> for a in range(200)}
>>> data = {'item1': item1, 'item2': item2,
>>> 'item3': item3, 'item4': item4}
>>> return data
>>> memtrack = ut.MemoryTracker()
>>> # create a large amount of data
>>> data = makedata_complex()
>>> memtrack.report()
>>> print(ut.get_object_size_str(data))
>>> fpath = 'tmp.pkl'
>>> ut.save_cPkl(fpath, data)
>>> print(ut.get_file_nBytes_str('tmp.pkl'))
>>> #del data
>>> memtrack.collect()
>>> memtrack.report()
>>> data2 = ut.load_cPkl(fpath)
>>> memtrack.report()
TestPickleCacher:
>>> memtrack = ut.MemoryTracker()
>>> cacher = ut.Cacher('tmp', cache_dir='.', cfgstr='foo')
>>> data3 = cacher.ensure(makedata_complex)
>>> memtrack.report()
>>> data4 = cacher.ensure(makedata_complex)
>>> memtrack.report()
>>> memtrack = ut.MemoryTracker()
>>> fpath = '/home/joncrall/Desktop/smkcache/inva_PZ_Master1VUUIDS((5616)vxihbjwtggyovrto)_vpgwpcafbjkkpjdf.cPkl'
>>> print(ut.get_file_nBytes_str(fpath))
>>> data = ut.load_cPkl(fpath)
>>> memtrack.report()
def makedata_complex():
data = np.empty((1000, 2 ** 20), dtype=np.uint8)
data[:] = 0
return data | [
"Loads",
"a",
"pickled",
"file",
"with",
"optional",
"verbosity",
".",
"Aims",
"for",
"compatibility",
"between",
"python2",
"and",
"python3",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_io.py#L259-L354 | train |
Erotemic/utool | utool/util_io.py | save_hdf5 | def save_hdf5(fpath, data, verbose=None, compression='lzf'):
r"""
Restricted save of data using hdf5. Can only save ndarrays and dicts of
ndarrays.
Args:
fpath (str):
data (ndarray):
compression (str):
DEFLATE/GZIP - standard
LZF - fast
SHUFFLE - compression ratio
FLETCHER32 - error detection
Scale-offset - integer / float scaling and truncation
SZIP - fast and patented
CommandLine:
python -m utool.util_io --test-save_hdf5
References:
http://docs.h5py.org/en/latest/quick.html
http://docs.h5py.org/en/latest/mpi.html
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_io import * # NOQA
>>> import numpy as np
>>> import utool as ut
>>> rng = np.random.RandomState(0)
>>> data = (rng.rand(100000, 128) * 255).astype(np.uint8).copy()
>>> verbose = True
>>> fpath = ut.unixjoin(ut.ensure_app_resource_dir('utool'), 'myfile.hdf5')
>>> compression = 'lzf'
>>> ut.delete(fpath)
>>> save_hdf5(fpath, data, verbose, compression)
>>> data2 = load_hdf5(fpath, verbose)
>>> assert data is not data2
>>> assert np.all(data == data2)
>>> assert ut.delete(fpath)
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_io import * # NOQA
>>> import numpy as np
>>> import utool as ut
>>> rng = np.random.RandomState(0)
>>> data = {'name': 'foobar', 'x': [1, 2, 3], 'y': np.array([3, 2, 1])}
>>> ut.exec_funckw(save_hdf5, globals())
>>> fpath = ut.unixjoin(ut.ensure_app_resource_dir('utool'), 'myfile2.hdf5')
>>> ut.delete(fpath)
>>> save_hdf5(fpath, data, verbose, compression)
>>> data2 = load_hdf5(fpath, verbose)
>>> assert data is not data2
>>> assert all([np.all(data[key] == data2[key]) for key in data.keys()])
>>> assert ut.delete(fpath)
Timeit:
>>> # DISABLE_DOCTEST
>>> # cPkl / numpy seems to be faster with this initial implementation
>>> import utool as ut
>>> data = (rng.rand(1000000, 128) * 255).astype(np.uint8).copy()
>>> print(ut.get_object_size_str(data))
>>> del data
>>> setup = ut.codeblock(
>>> '''
import numpy as np
import utool as ut
rng = np.random.RandomState(0)
fpath = ut.unixjoin(ut.ensure_app_resource_dir('utool'), 'io_test_data')
data = (rng.rand(1000000, 128) * 255).astype(np.uint8).copy()
#print(ut.get_object_size_str(data))
''')
>>> # Test load time
>>> stmt_list1 = ut.codeblock(
>>> '''
ut.save_hdf5(fpath + '.hdf5', data, verbose=False, compression='gzip')
ut.save_hdf5(fpath + '.hdf5', data, verbose=False, compression='lzf')
ut.save_cPkl(fpath + '.cPkl', data, verbose=False)
ut.save_numpy(fpath + '.npy', data, verbose=False)
ut.save_pytables(fpath + '.tables', data, verbose=False)
''').split('\n')
>>> ut.util_dev.timeit_compare(stmt_list1, setup, int(10))
>>> # Test save time
>>> stmt_list2 = ut.codeblock(
>>> '''
ut.load_hdf5(fpath + '.hdf5', verbose=False)
ut.load_cPkl(fpath + '.cPkl', verbose=False)
ut.load_numpy(fpath + '.npy', verbose=False)
ut.load_pytables(fpath + '.tables', verbose=False)
''').split('\n')
>>> ut.util_dev.timeit_compare(stmt_list2, setup, int(10))
>>> print('finished timeing')
+----------------
| TIMEIT COMPARE
+----------------
| iterations = 10
| Input:
| | num | stmt
| | 0 | u"ut.save_hdf5(fpath + '.hdf5', data, verbose=False, compression='gzip')"
| | 1 | u"ut.save_hdf5(fpath + '.hdf5', data, verbose=False, compression='lzf')"
| | 2 | u"ut.save_cPkl(fpath + '.cPkl', data, verbose=False)"
| | 3 | u"ut.save_numpy(fpath + '.npy', data, verbose=False)"
| | 4 | u"ut.save_pytables(fpath + '.tables', data, verbose=False)"
...
| Output:
| * PASSED: each statement produced the same result
| | num | total time | per loop | stmt
| | 0 | 0.03 ks | 3.15 s | ut.save_hdf5(fpath + '.hdf5', data, verbose=False, compression='gzip')
| | 1 | 0.01 ks | 1.25 s | ut.save_hdf5(fpath + '.hdf5', data, verbose=False, compression='lzf')
| | 2 | 5.30 s | 0.53 s | ut.save_cPkl(fpath + '.cPkl', data, verbose=False)
| | 3 | 4.97 s | 0.50 s | ut.save_numpy(fpath + '.npy', data, verbose=False)
| | 4 | 9.23 s | 0.92 s | ut.save_pytables(fpath + '.tables', data, verbose=False)
L_________________
+----------------
| TIMEIT COMPARE
+----------------
| iterations = 10
| Input:
| | num | stmt
| | 0 | u"ut.load_hdf5(fpath + '.hdf5', verbose=False)"
| | 1 | u"ut.load_cPkl(fpath + '.cPkl', verbose=False)"
| | 2 | u"ut.load_numpy(fpath + '.npy', verbose=False)"
| | 3 | u"ut.load_pytables(fpath + '.tables', verbose=False)"
...
| Output:
| * PASSED: each statement produced the same result
| | num | total time | per loop | stmt
| | 0 | 2.39 s | 0.24 s | ut.load_hdf5(fpath + '.hdf5', verbose=False)
| | 1 | 0.39 s | 0.04 s | ut.load_cPkl(fpath + '.cPkl', verbose=False)
| | 2 | 0.19 s | 0.02 s | ut.load_numpy(fpath + '.npy', verbose=False)
| | 3 | 0.33 s | 0.03 s | ut.load_pytables(fpath + '.tables', verbose=False)
L_________________
Ignore:
%timeit save_hdf5(fpath, data, verbose=False, compression='gzip')
%timeit save_hdf5(fpath, data, verbose=False, compression='lzf')
%timeit save_cPkl(fpath + '.cPkl', data, verbose=False)
%timeit save_pytables(fpath + '.tables', data, verbose=False)
1 loops, best of 3: 258 ms per loop
10 loops, best of 3: 111 ms per loop
10 loops, best of 3: 53.1 ms per loop
10 loops, best of 3: 96.5 ms per loop
save_hdf5(fpath, data, verbose=False, compression='gzip')
%timeit load_hdf5(fpath, verbose=False)
save_hdf5(fpath, data, verbose=False, compression='lzf')
%timeit load_hdf5(fpath, verbose=False)
%timeit load_cPkl(fpath + '.cPkl', verbose=False)
%timeit load_pytables(fpath + '.tables', verbose=False)
100 loops, best of 3: 19.4 ms per loop
100 loops, best of 3: 14.4 ms per loop
100 loops, best of 3: 3.92 ms per loop
100 loops, best of 3: 6.22 ms per loop
Notes:
pip install mpi4py
"""
import h5py
verbose = _rectify_verb_write(verbose)
if verbose:
print('[util_io] * save_hdf5(%r, data)' % (util_path.tail(fpath),))
if verbose > 1:
if isinstance(data, dict):
print('[util_io] ... shapes=%r' % ([val.shape for val in data.values()],))
else:
print('[util_io] ... shape=%r' % (data.shape,))
chunks = True # True enables auto-chunking
fname = basename(fpath)
# check for parallel hdf5
#have_mpi = h5py.h5.get_config().mpi
#if have_mpi:
# import mpi4py
# h5kw = dict(driver='mpio', comm=mpi4py.MPI.COMM_WORLD)
# # cant use compression with mpi
# #ValueError: Unable to create dataset (Parallel i/o does not support filters yet)
#else:
h5kw = {}
if isinstance(data, dict):
array_data = {key: val for key, val in data.items()
if isinstance(val, (list, np.ndarray))}
attr_data = {key: val for key, val in data.items() if key not in array_data}
#assert all([
# isinstance(vals, np.ndarray)
# for vals in six.itervalues(data)
#]), ('can only save dicts as ndarrays')
# file_ = h5py.File(fpath, 'w', **h5kw)
with h5py.File(fpath, mode='w', **h5kw) as file_:
grp = file_.create_group(fname)
for key, val in six.iteritems(array_data):
val = np.asarray(val)
dset = grp.create_dataset(
key, val.shape, val.dtype, chunks=chunks,
compression=compression)
dset[...] = val
for key, val in six.iteritems(attr_data):
grp.attrs[key] = val
else:
assert isinstance(data, np.ndarray)
shape = data.shape
dtype = data.dtype
#if verbose or (verbose is None and __PRINT_WRITES__):
# print('[util_io] * save_hdf5(%r, data)' % (util_path.tail(fpath),))
# file_ = h5py.File(fpath, 'w', **h5kw)
with h5py.File(fpath, mode='w', **h5kw) as file_:
#file_.create_dataset(
# fname, shape, dtype, chunks=chunks, compression=compression,
# data=data)
dset = file_.create_dataset(
fname, shape, dtype, chunks=chunks, compression=compression)
dset[...] = data | python | def save_hdf5(fpath, data, verbose=None, compression='lzf'):
r"""
Restricted save of data using hdf5. Can only save ndarrays and dicts of
ndarrays.
Args:
fpath (str):
data (ndarray):
compression (str):
DEFLATE/GZIP - standard
LZF - fast
SHUFFLE - compression ratio
FLETCHER32 - error detection
Scale-offset - integer / float scaling and truncation
SZIP - fast and patented
CommandLine:
python -m utool.util_io --test-save_hdf5
References:
http://docs.h5py.org/en/latest/quick.html
http://docs.h5py.org/en/latest/mpi.html
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_io import * # NOQA
>>> import numpy as np
>>> import utool as ut
>>> rng = np.random.RandomState(0)
>>> data = (rng.rand(100000, 128) * 255).astype(np.uint8).copy()
>>> verbose = True
>>> fpath = ut.unixjoin(ut.ensure_app_resource_dir('utool'), 'myfile.hdf5')
>>> compression = 'lzf'
>>> ut.delete(fpath)
>>> save_hdf5(fpath, data, verbose, compression)
>>> data2 = load_hdf5(fpath, verbose)
>>> assert data is not data2
>>> assert np.all(data == data2)
>>> assert ut.delete(fpath)
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_io import * # NOQA
>>> import numpy as np
>>> import utool as ut
>>> rng = np.random.RandomState(0)
>>> data = {'name': 'foobar', 'x': [1, 2, 3], 'y': np.array([3, 2, 1])}
>>> ut.exec_funckw(save_hdf5, globals())
>>> fpath = ut.unixjoin(ut.ensure_app_resource_dir('utool'), 'myfile2.hdf5')
>>> ut.delete(fpath)
>>> save_hdf5(fpath, data, verbose, compression)
>>> data2 = load_hdf5(fpath, verbose)
>>> assert data is not data2
>>> assert all([np.all(data[key] == data2[key]) for key in data.keys()])
>>> assert ut.delete(fpath)
Timeit:
>>> # DISABLE_DOCTEST
>>> # cPkl / numpy seems to be faster with this initial implementation
>>> import utool as ut
>>> data = (rng.rand(1000000, 128) * 255).astype(np.uint8).copy()
>>> print(ut.get_object_size_str(data))
>>> del data
>>> setup = ut.codeblock(
>>> '''
import numpy as np
import utool as ut
rng = np.random.RandomState(0)
fpath = ut.unixjoin(ut.ensure_app_resource_dir('utool'), 'io_test_data')
data = (rng.rand(1000000, 128) * 255).astype(np.uint8).copy()
#print(ut.get_object_size_str(data))
''')
>>> # Test load time
>>> stmt_list1 = ut.codeblock(
>>> '''
ut.save_hdf5(fpath + '.hdf5', data, verbose=False, compression='gzip')
ut.save_hdf5(fpath + '.hdf5', data, verbose=False, compression='lzf')
ut.save_cPkl(fpath + '.cPkl', data, verbose=False)
ut.save_numpy(fpath + '.npy', data, verbose=False)
ut.save_pytables(fpath + '.tables', data, verbose=False)
''').split('\n')
>>> ut.util_dev.timeit_compare(stmt_list1, setup, int(10))
>>> # Test save time
>>> stmt_list2 = ut.codeblock(
>>> '''
ut.load_hdf5(fpath + '.hdf5', verbose=False)
ut.load_cPkl(fpath + '.cPkl', verbose=False)
ut.load_numpy(fpath + '.npy', verbose=False)
ut.load_pytables(fpath + '.tables', verbose=False)
''').split('\n')
>>> ut.util_dev.timeit_compare(stmt_list2, setup, int(10))
>>> print('finished timeing')
+----------------
| TIMEIT COMPARE
+----------------
| iterations = 10
| Input:
| | num | stmt
| | 0 | u"ut.save_hdf5(fpath + '.hdf5', data, verbose=False, compression='gzip')"
| | 1 | u"ut.save_hdf5(fpath + '.hdf5', data, verbose=False, compression='lzf')"
| | 2 | u"ut.save_cPkl(fpath + '.cPkl', data, verbose=False)"
| | 3 | u"ut.save_numpy(fpath + '.npy', data, verbose=False)"
| | 4 | u"ut.save_pytables(fpath + '.tables', data, verbose=False)"
...
| Output:
| * PASSED: each statement produced the same result
| | num | total time | per loop | stmt
| | 0 | 0.03 ks | 3.15 s | ut.save_hdf5(fpath + '.hdf5', data, verbose=False, compression='gzip')
| | 1 | 0.01 ks | 1.25 s | ut.save_hdf5(fpath + '.hdf5', data, verbose=False, compression='lzf')
| | 2 | 5.30 s | 0.53 s | ut.save_cPkl(fpath + '.cPkl', data, verbose=False)
| | 3 | 4.97 s | 0.50 s | ut.save_numpy(fpath + '.npy', data, verbose=False)
| | 4 | 9.23 s | 0.92 s | ut.save_pytables(fpath + '.tables', data, verbose=False)
L_________________
+----------------
| TIMEIT COMPARE
+----------------
| iterations = 10
| Input:
| | num | stmt
| | 0 | u"ut.load_hdf5(fpath + '.hdf5', verbose=False)"
| | 1 | u"ut.load_cPkl(fpath + '.cPkl', verbose=False)"
| | 2 | u"ut.load_numpy(fpath + '.npy', verbose=False)"
| | 3 | u"ut.load_pytables(fpath + '.tables', verbose=False)"
...
| Output:
| * PASSED: each statement produced the same result
| | num | total time | per loop | stmt
| | 0 | 2.39 s | 0.24 s | ut.load_hdf5(fpath + '.hdf5', verbose=False)
| | 1 | 0.39 s | 0.04 s | ut.load_cPkl(fpath + '.cPkl', verbose=False)
| | 2 | 0.19 s | 0.02 s | ut.load_numpy(fpath + '.npy', verbose=False)
| | 3 | 0.33 s | 0.03 s | ut.load_pytables(fpath + '.tables', verbose=False)
L_________________
Ignore:
%timeit save_hdf5(fpath, data, verbose=False, compression='gzip')
%timeit save_hdf5(fpath, data, verbose=False, compression='lzf')
%timeit save_cPkl(fpath + '.cPkl', data, verbose=False)
%timeit save_pytables(fpath + '.tables', data, verbose=False)
1 loops, best of 3: 258 ms per loop
10 loops, best of 3: 111 ms per loop
10 loops, best of 3: 53.1 ms per loop
10 loops, best of 3: 96.5 ms per loop
save_hdf5(fpath, data, verbose=False, compression='gzip')
%timeit load_hdf5(fpath, verbose=False)
save_hdf5(fpath, data, verbose=False, compression='lzf')
%timeit load_hdf5(fpath, verbose=False)
%timeit load_cPkl(fpath + '.cPkl', verbose=False)
%timeit load_pytables(fpath + '.tables', verbose=False)
100 loops, best of 3: 19.4 ms per loop
100 loops, best of 3: 14.4 ms per loop
100 loops, best of 3: 3.92 ms per loop
100 loops, best of 3: 6.22 ms per loop
Notes:
pip install mpi4py
"""
import h5py
verbose = _rectify_verb_write(verbose)
if verbose:
print('[util_io] * save_hdf5(%r, data)' % (util_path.tail(fpath),))
if verbose > 1:
if isinstance(data, dict):
print('[util_io] ... shapes=%r' % ([val.shape for val in data.values()],))
else:
print('[util_io] ... shape=%r' % (data.shape,))
chunks = True # True enables auto-chunking
fname = basename(fpath)
# check for parallel hdf5
#have_mpi = h5py.h5.get_config().mpi
#if have_mpi:
# import mpi4py
# h5kw = dict(driver='mpio', comm=mpi4py.MPI.COMM_WORLD)
# # cant use compression with mpi
# #ValueError: Unable to create dataset (Parallel i/o does not support filters yet)
#else:
h5kw = {}
if isinstance(data, dict):
array_data = {key: val for key, val in data.items()
if isinstance(val, (list, np.ndarray))}
attr_data = {key: val for key, val in data.items() if key not in array_data}
#assert all([
# isinstance(vals, np.ndarray)
# for vals in six.itervalues(data)
#]), ('can only save dicts as ndarrays')
# file_ = h5py.File(fpath, 'w', **h5kw)
with h5py.File(fpath, mode='w', **h5kw) as file_:
grp = file_.create_group(fname)
for key, val in six.iteritems(array_data):
val = np.asarray(val)
dset = grp.create_dataset(
key, val.shape, val.dtype, chunks=chunks,
compression=compression)
dset[...] = val
for key, val in six.iteritems(attr_data):
grp.attrs[key] = val
else:
assert isinstance(data, np.ndarray)
shape = data.shape
dtype = data.dtype
#if verbose or (verbose is None and __PRINT_WRITES__):
# print('[util_io] * save_hdf5(%r, data)' % (util_path.tail(fpath),))
# file_ = h5py.File(fpath, 'w', **h5kw)
with h5py.File(fpath, mode='w', **h5kw) as file_:
#file_.create_dataset(
# fname, shape, dtype, chunks=chunks, compression=compression,
# data=data)
dset = file_.create_dataset(
fname, shape, dtype, chunks=chunks, compression=compression)
dset[...] = data | [
"def",
"save_hdf5",
"(",
"fpath",
",",
"data",
",",
"verbose",
"=",
"None",
",",
"compression",
"=",
"'lzf'",
")",
":",
"import",
"h5py",
"verbose",
"=",
"_rectify_verb_write",
"(",
"verbose",
")",
"if",
"verbose",
":",
"print",
"(",
"'[util_io] * save_hdf5(%r, data)'",
"%",
"(",
"util_path",
".",
"tail",
"(",
"fpath",
")",
",",
")",
")",
"if",
"verbose",
">",
"1",
":",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"print",
"(",
"'[util_io] ... shapes=%r'",
"%",
"(",
"[",
"val",
".",
"shape",
"for",
"val",
"in",
"data",
".",
"values",
"(",
")",
"]",
",",
")",
")",
"else",
":",
"print",
"(",
"'[util_io] ... shape=%r'",
"%",
"(",
"data",
".",
"shape",
",",
")",
")",
"chunks",
"=",
"True",
"# True enables auto-chunking",
"fname",
"=",
"basename",
"(",
"fpath",
")",
"# check for parallel hdf5",
"#have_mpi = h5py.h5.get_config().mpi",
"#if have_mpi:",
"# import mpi4py",
"# h5kw = dict(driver='mpio', comm=mpi4py.MPI.COMM_WORLD)",
"# # cant use compression with mpi",
"# #ValueError: Unable to create dataset (Parallel i/o does not support filters yet)",
"#else:",
"h5kw",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"array_data",
"=",
"{",
"key",
":",
"val",
"for",
"key",
",",
"val",
"in",
"data",
".",
"items",
"(",
")",
"if",
"isinstance",
"(",
"val",
",",
"(",
"list",
",",
"np",
".",
"ndarray",
")",
")",
"}",
"attr_data",
"=",
"{",
"key",
":",
"val",
"for",
"key",
",",
"val",
"in",
"data",
".",
"items",
"(",
")",
"if",
"key",
"not",
"in",
"array_data",
"}",
"#assert all([",
"# isinstance(vals, np.ndarray)",
"# for vals in six.itervalues(data)",
"#]), ('can only save dicts as ndarrays')",
"# file_ = h5py.File(fpath, 'w', **h5kw)",
"with",
"h5py",
".",
"File",
"(",
"fpath",
",",
"mode",
"=",
"'w'",
",",
"*",
"*",
"h5kw",
")",
"as",
"file_",
":",
"grp",
"=",
"file_",
".",
"create_group",
"(",
"fname",
")",
"for",
"key",
",",
"val",
"in",
"six",
".",
"iteritems",
"(",
"array_data",
")",
":",
"val",
"=",
"np",
".",
"asarray",
"(",
"val",
")",
"dset",
"=",
"grp",
".",
"create_dataset",
"(",
"key",
",",
"val",
".",
"shape",
",",
"val",
".",
"dtype",
",",
"chunks",
"=",
"chunks",
",",
"compression",
"=",
"compression",
")",
"dset",
"[",
"...",
"]",
"=",
"val",
"for",
"key",
",",
"val",
"in",
"six",
".",
"iteritems",
"(",
"attr_data",
")",
":",
"grp",
".",
"attrs",
"[",
"key",
"]",
"=",
"val",
"else",
":",
"assert",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
"shape",
"=",
"data",
".",
"shape",
"dtype",
"=",
"data",
".",
"dtype",
"#if verbose or (verbose is None and __PRINT_WRITES__):",
"# print('[util_io] * save_hdf5(%r, data)' % (util_path.tail(fpath),))",
"# file_ = h5py.File(fpath, 'w', **h5kw)",
"with",
"h5py",
".",
"File",
"(",
"fpath",
",",
"mode",
"=",
"'w'",
",",
"*",
"*",
"h5kw",
")",
"as",
"file_",
":",
"#file_.create_dataset(",
"# fname, shape, dtype, chunks=chunks, compression=compression,",
"# data=data)",
"dset",
"=",
"file_",
".",
"create_dataset",
"(",
"fname",
",",
"shape",
",",
"dtype",
",",
"chunks",
"=",
"chunks",
",",
"compression",
"=",
"compression",
")",
"dset",
"[",
"...",
"]",
"=",
"data"
] | r"""
Restricted save of data using hdf5. Can only save ndarrays and dicts of
ndarrays.
Args:
fpath (str):
data (ndarray):
compression (str):
DEFLATE/GZIP - standard
LZF - fast
SHUFFLE - compression ratio
FLETCHER32 - error detection
Scale-offset - integer / float scaling and truncation
SZIP - fast and patented
CommandLine:
python -m utool.util_io --test-save_hdf5
References:
http://docs.h5py.org/en/latest/quick.html
http://docs.h5py.org/en/latest/mpi.html
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_io import * # NOQA
>>> import numpy as np
>>> import utool as ut
>>> rng = np.random.RandomState(0)
>>> data = (rng.rand(100000, 128) * 255).astype(np.uint8).copy()
>>> verbose = True
>>> fpath = ut.unixjoin(ut.ensure_app_resource_dir('utool'), 'myfile.hdf5')
>>> compression = 'lzf'
>>> ut.delete(fpath)
>>> save_hdf5(fpath, data, verbose, compression)
>>> data2 = load_hdf5(fpath, verbose)
>>> assert data is not data2
>>> assert np.all(data == data2)
>>> assert ut.delete(fpath)
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_io import * # NOQA
>>> import numpy as np
>>> import utool as ut
>>> rng = np.random.RandomState(0)
>>> data = {'name': 'foobar', 'x': [1, 2, 3], 'y': np.array([3, 2, 1])}
>>> ut.exec_funckw(save_hdf5, globals())
>>> fpath = ut.unixjoin(ut.ensure_app_resource_dir('utool'), 'myfile2.hdf5')
>>> ut.delete(fpath)
>>> save_hdf5(fpath, data, verbose, compression)
>>> data2 = load_hdf5(fpath, verbose)
>>> assert data is not data2
>>> assert all([np.all(data[key] == data2[key]) for key in data.keys()])
>>> assert ut.delete(fpath)
Timeit:
>>> # DISABLE_DOCTEST
>>> # cPkl / numpy seems to be faster with this initial implementation
>>> import utool as ut
>>> data = (rng.rand(1000000, 128) * 255).astype(np.uint8).copy()
>>> print(ut.get_object_size_str(data))
>>> del data
>>> setup = ut.codeblock(
>>> '''
import numpy as np
import utool as ut
rng = np.random.RandomState(0)
fpath = ut.unixjoin(ut.ensure_app_resource_dir('utool'), 'io_test_data')
data = (rng.rand(1000000, 128) * 255).astype(np.uint8).copy()
#print(ut.get_object_size_str(data))
''')
>>> # Test load time
>>> stmt_list1 = ut.codeblock(
>>> '''
ut.save_hdf5(fpath + '.hdf5', data, verbose=False, compression='gzip')
ut.save_hdf5(fpath + '.hdf5', data, verbose=False, compression='lzf')
ut.save_cPkl(fpath + '.cPkl', data, verbose=False)
ut.save_numpy(fpath + '.npy', data, verbose=False)
ut.save_pytables(fpath + '.tables', data, verbose=False)
''').split('\n')
>>> ut.util_dev.timeit_compare(stmt_list1, setup, int(10))
>>> # Test save time
>>> stmt_list2 = ut.codeblock(
>>> '''
ut.load_hdf5(fpath + '.hdf5', verbose=False)
ut.load_cPkl(fpath + '.cPkl', verbose=False)
ut.load_numpy(fpath + '.npy', verbose=False)
ut.load_pytables(fpath + '.tables', verbose=False)
''').split('\n')
>>> ut.util_dev.timeit_compare(stmt_list2, setup, int(10))
>>> print('finished timeing')
+----------------
| TIMEIT COMPARE
+----------------
| iterations = 10
| Input:
| | num | stmt
| | 0 | u"ut.save_hdf5(fpath + '.hdf5', data, verbose=False, compression='gzip')"
| | 1 | u"ut.save_hdf5(fpath + '.hdf5', data, verbose=False, compression='lzf')"
| | 2 | u"ut.save_cPkl(fpath + '.cPkl', data, verbose=False)"
| | 3 | u"ut.save_numpy(fpath + '.npy', data, verbose=False)"
| | 4 | u"ut.save_pytables(fpath + '.tables', data, verbose=False)"
...
| Output:
| * PASSED: each statement produced the same result
| | num | total time | per loop | stmt
| | 0 | 0.03 ks | 3.15 s | ut.save_hdf5(fpath + '.hdf5', data, verbose=False, compression='gzip')
| | 1 | 0.01 ks | 1.25 s | ut.save_hdf5(fpath + '.hdf5', data, verbose=False, compression='lzf')
| | 2 | 5.30 s | 0.53 s | ut.save_cPkl(fpath + '.cPkl', data, verbose=False)
| | 3 | 4.97 s | 0.50 s | ut.save_numpy(fpath + '.npy', data, verbose=False)
| | 4 | 9.23 s | 0.92 s | ut.save_pytables(fpath + '.tables', data, verbose=False)
L_________________
+----------------
| TIMEIT COMPARE
+----------------
| iterations = 10
| Input:
| | num | stmt
| | 0 | u"ut.load_hdf5(fpath + '.hdf5', verbose=False)"
| | 1 | u"ut.load_cPkl(fpath + '.cPkl', verbose=False)"
| | 2 | u"ut.load_numpy(fpath + '.npy', verbose=False)"
| | 3 | u"ut.load_pytables(fpath + '.tables', verbose=False)"
...
| Output:
| * PASSED: each statement produced the same result
| | num | total time | per loop | stmt
| | 0 | 2.39 s | 0.24 s | ut.load_hdf5(fpath + '.hdf5', verbose=False)
| | 1 | 0.39 s | 0.04 s | ut.load_cPkl(fpath + '.cPkl', verbose=False)
| | 2 | 0.19 s | 0.02 s | ut.load_numpy(fpath + '.npy', verbose=False)
| | 3 | 0.33 s | 0.03 s | ut.load_pytables(fpath + '.tables', verbose=False)
L_________________
Ignore:
%timeit save_hdf5(fpath, data, verbose=False, compression='gzip')
%timeit save_hdf5(fpath, data, verbose=False, compression='lzf')
%timeit save_cPkl(fpath + '.cPkl', data, verbose=False)
%timeit save_pytables(fpath + '.tables', data, verbose=False)
1 loops, best of 3: 258 ms per loop
10 loops, best of 3: 111 ms per loop
10 loops, best of 3: 53.1 ms per loop
10 loops, best of 3: 96.5 ms per loop
save_hdf5(fpath, data, verbose=False, compression='gzip')
%timeit load_hdf5(fpath, verbose=False)
save_hdf5(fpath, data, verbose=False, compression='lzf')
%timeit load_hdf5(fpath, verbose=False)
%timeit load_cPkl(fpath + '.cPkl', verbose=False)
%timeit load_pytables(fpath + '.tables', verbose=False)
100 loops, best of 3: 19.4 ms per loop
100 loops, best of 3: 14.4 ms per loop
100 loops, best of 3: 3.92 ms per loop
100 loops, best of 3: 6.22 ms per loop
Notes:
pip install mpi4py | [
"r",
"Restricted",
"save",
"of",
"data",
"using",
"hdf5",
".",
"Can",
"only",
"save",
"ndarrays",
"and",
"dicts",
"of",
"ndarrays",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_io.py#L398-L612 | train |
Erotemic/utool | utool/util_io.py | save_pytables | def save_pytables(fpath, data, verbose=False):
"""
sudo pip install numexpr
sudo pip install tables
References:
https://pytables.github.io/cookbook/py2exe_howto.html
https://gist.github.com/andrewgiessel/7515520
http://stackoverflow.com/questions/8843062/python-how-to-store-a-numpy-multidimensional-array-in-pytables
http://pytables.github.io/usersguide/tutorials.html#creating-new-array-objects
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_io import * # NOQA
>>> import numpy as np
>>> import utool as ut
>>> # build test data
>>> verbose = True
>>> fpath = 'myfile.pytables.hdf5'
>>> np.random.seed(0)
>>> compression = 'gzip'
>>> data = (np.random.rand(100000, 128) * 255).astype(np.uint8).copy()
>>> # execute function
>>> ut.delete(fpath)
>>> save_pytables(fpath, data, verbose)
>>> data2 = load_pytables(fpath, verbose)
>>> assert data is not data2
>>> assert np.all(data == data2)
>>> assert ut.delete(fpath)
"""
import tables
#from os.path import basename
#fname = basename(fpath)
#shape = data.shape
#dtype = data.dtype
#file_ = tables.open_file(fpath)
verbose = _rectify_verb_write(verbose)
if verbose:
print('[util_io] * save_pytables(%r, data)' % (util_path.tail(fpath),))
with tables.open_file(fpath, 'w') as file_:
atom = tables.Atom.from_dtype(data.dtype)
filters = tables.Filters(complib='blosc', complevel=5)
dset = file_.createCArray(file_.root, 'data', atom, data.shape, filters=filters)
# save w/o compressive filter
#dset = file_.createCArray(file_.root, 'all_data', atom, all_data.shape)
dset[:] = data | python | def save_pytables(fpath, data, verbose=False):
"""
sudo pip install numexpr
sudo pip install tables
References:
https://pytables.github.io/cookbook/py2exe_howto.html
https://gist.github.com/andrewgiessel/7515520
http://stackoverflow.com/questions/8843062/python-how-to-store-a-numpy-multidimensional-array-in-pytables
http://pytables.github.io/usersguide/tutorials.html#creating-new-array-objects
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_io import * # NOQA
>>> import numpy as np
>>> import utool as ut
>>> # build test data
>>> verbose = True
>>> fpath = 'myfile.pytables.hdf5'
>>> np.random.seed(0)
>>> compression = 'gzip'
>>> data = (np.random.rand(100000, 128) * 255).astype(np.uint8).copy()
>>> # execute function
>>> ut.delete(fpath)
>>> save_pytables(fpath, data, verbose)
>>> data2 = load_pytables(fpath, verbose)
>>> assert data is not data2
>>> assert np.all(data == data2)
>>> assert ut.delete(fpath)
"""
import tables
#from os.path import basename
#fname = basename(fpath)
#shape = data.shape
#dtype = data.dtype
#file_ = tables.open_file(fpath)
verbose = _rectify_verb_write(verbose)
if verbose:
print('[util_io] * save_pytables(%r, data)' % (util_path.tail(fpath),))
with tables.open_file(fpath, 'w') as file_:
atom = tables.Atom.from_dtype(data.dtype)
filters = tables.Filters(complib='blosc', complevel=5)
dset = file_.createCArray(file_.root, 'data', atom, data.shape, filters=filters)
# save w/o compressive filter
#dset = file_.createCArray(file_.root, 'all_data', atom, all_data.shape)
dset[:] = data | [
"def",
"save_pytables",
"(",
"fpath",
",",
"data",
",",
"verbose",
"=",
"False",
")",
":",
"import",
"tables",
"#from os.path import basename",
"#fname = basename(fpath)",
"#shape = data.shape",
"#dtype = data.dtype",
"#file_ = tables.open_file(fpath)",
"verbose",
"=",
"_rectify_verb_write",
"(",
"verbose",
")",
"if",
"verbose",
":",
"print",
"(",
"'[util_io] * save_pytables(%r, data)'",
"%",
"(",
"util_path",
".",
"tail",
"(",
"fpath",
")",
",",
")",
")",
"with",
"tables",
".",
"open_file",
"(",
"fpath",
",",
"'w'",
")",
"as",
"file_",
":",
"atom",
"=",
"tables",
".",
"Atom",
".",
"from_dtype",
"(",
"data",
".",
"dtype",
")",
"filters",
"=",
"tables",
".",
"Filters",
"(",
"complib",
"=",
"'blosc'",
",",
"complevel",
"=",
"5",
")",
"dset",
"=",
"file_",
".",
"createCArray",
"(",
"file_",
".",
"root",
",",
"'data'",
",",
"atom",
",",
"data",
".",
"shape",
",",
"filters",
"=",
"filters",
")",
"# save w/o compressive filter",
"#dset = file_.createCArray(file_.root, 'all_data', atom, all_data.shape)",
"dset",
"[",
":",
"]",
"=",
"data"
] | sudo pip install numexpr
sudo pip install tables
References:
https://pytables.github.io/cookbook/py2exe_howto.html
https://gist.github.com/andrewgiessel/7515520
http://stackoverflow.com/questions/8843062/python-how-to-store-a-numpy-multidimensional-array-in-pytables
http://pytables.github.io/usersguide/tutorials.html#creating-new-array-objects
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_io import * # NOQA
>>> import numpy as np
>>> import utool as ut
>>> # build test data
>>> verbose = True
>>> fpath = 'myfile.pytables.hdf5'
>>> np.random.seed(0)
>>> compression = 'gzip'
>>> data = (np.random.rand(100000, 128) * 255).astype(np.uint8).copy()
>>> # execute function
>>> ut.delete(fpath)
>>> save_pytables(fpath, data, verbose)
>>> data2 = load_pytables(fpath, verbose)
>>> assert data is not data2
>>> assert np.all(data == data2)
>>> assert ut.delete(fpath) | [
"sudo",
"pip",
"install",
"numexpr",
"sudo",
"pip",
"install",
"tables"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_io.py#L648-L693 | train |
Erotemic/utool | utool/util_web.py | start_simple_webserver | def start_simple_webserver(domain=None, port=5832):
r"""
simple webserver that echos its arguments
Args:
domain (None): (default = None)
port (int): (default = 5832)
CommandLine:
python -m utool.util_web --exec-start_simple_webserver:0
python -m utool.util_web --exec-start_simple_webserver:1
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_web import * # NOQA
>>> domain = None
>>> port = 5832
>>> result = start_simple_webserver(domain, port)
>>> print(result)
"""
import tornado.ioloop
import tornado.web
import tornado.httpserver
import tornado.wsgi
import flask
app = flask.Flask('__simple__')
@app.route('/', methods=['GET', 'POST', 'DELETE', 'PUT'])
def echo_args(*args, **kwargs):
from flask import request
print('Simple server was pinged')
print('args = %r' % (args,))
print('kwargs = %r' % (kwargs,))
print('request.args = %r' % (request.args,))
print('request.form = %r' % (request.form,))
return ''
if domain is None:
domain = get_localhost()
app.server_domain = domain
app.server_port = port
app.server_url = 'http://%s:%s' % (app.server_domain, app.server_port)
print('app.server_url = %s' % (app.server_url,))
http_server = tornado.httpserver.HTTPServer(
tornado.wsgi.WSGIContainer(app))
http_server.listen(app.server_port)
tornado.ioloop.IOLoop.instance().start() | python | def start_simple_webserver(domain=None, port=5832):
r"""
simple webserver that echos its arguments
Args:
domain (None): (default = None)
port (int): (default = 5832)
CommandLine:
python -m utool.util_web --exec-start_simple_webserver:0
python -m utool.util_web --exec-start_simple_webserver:1
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_web import * # NOQA
>>> domain = None
>>> port = 5832
>>> result = start_simple_webserver(domain, port)
>>> print(result)
"""
import tornado.ioloop
import tornado.web
import tornado.httpserver
import tornado.wsgi
import flask
app = flask.Flask('__simple__')
@app.route('/', methods=['GET', 'POST', 'DELETE', 'PUT'])
def echo_args(*args, **kwargs):
from flask import request
print('Simple server was pinged')
print('args = %r' % (args,))
print('kwargs = %r' % (kwargs,))
print('request.args = %r' % (request.args,))
print('request.form = %r' % (request.form,))
return ''
if domain is None:
domain = get_localhost()
app.server_domain = domain
app.server_port = port
app.server_url = 'http://%s:%s' % (app.server_domain, app.server_port)
print('app.server_url = %s' % (app.server_url,))
http_server = tornado.httpserver.HTTPServer(
tornado.wsgi.WSGIContainer(app))
http_server.listen(app.server_port)
tornado.ioloop.IOLoop.instance().start() | [
"def",
"start_simple_webserver",
"(",
"domain",
"=",
"None",
",",
"port",
"=",
"5832",
")",
":",
"import",
"tornado",
".",
"ioloop",
"import",
"tornado",
".",
"web",
"import",
"tornado",
".",
"httpserver",
"import",
"tornado",
".",
"wsgi",
"import",
"flask",
"app",
"=",
"flask",
".",
"Flask",
"(",
"'__simple__'",
")",
"@",
"app",
".",
"route",
"(",
"'/'",
",",
"methods",
"=",
"[",
"'GET'",
",",
"'POST'",
",",
"'DELETE'",
",",
"'PUT'",
"]",
")",
"def",
"echo_args",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"flask",
"import",
"request",
"print",
"(",
"'Simple server was pinged'",
")",
"print",
"(",
"'args = %r'",
"%",
"(",
"args",
",",
")",
")",
"print",
"(",
"'kwargs = %r'",
"%",
"(",
"kwargs",
",",
")",
")",
"print",
"(",
"'request.args = %r'",
"%",
"(",
"request",
".",
"args",
",",
")",
")",
"print",
"(",
"'request.form = %r'",
"%",
"(",
"request",
".",
"form",
",",
")",
")",
"return",
"''",
"if",
"domain",
"is",
"None",
":",
"domain",
"=",
"get_localhost",
"(",
")",
"app",
".",
"server_domain",
"=",
"domain",
"app",
".",
"server_port",
"=",
"port",
"app",
".",
"server_url",
"=",
"'http://%s:%s'",
"%",
"(",
"app",
".",
"server_domain",
",",
"app",
".",
"server_port",
")",
"print",
"(",
"'app.server_url = %s'",
"%",
"(",
"app",
".",
"server_url",
",",
")",
")",
"http_server",
"=",
"tornado",
".",
"httpserver",
".",
"HTTPServer",
"(",
"tornado",
".",
"wsgi",
".",
"WSGIContainer",
"(",
"app",
")",
")",
"http_server",
".",
"listen",
"(",
"app",
".",
"server_port",
")",
"tornado",
".",
"ioloop",
".",
"IOLoop",
".",
"instance",
"(",
")",
".",
"start",
"(",
")"
] | r"""
simple webserver that echos its arguments
Args:
domain (None): (default = None)
port (int): (default = 5832)
CommandLine:
python -m utool.util_web --exec-start_simple_webserver:0
python -m utool.util_web --exec-start_simple_webserver:1
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_web import * # NOQA
>>> domain = None
>>> port = 5832
>>> result = start_simple_webserver(domain, port)
>>> print(result) | [
"r",
"simple",
"webserver",
"that",
"echos",
"its",
"arguments"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_web.py#L66-L110 | train |
Erotemic/utool | utool/util_web.py | render_html | def render_html(html_str):
"""
makes a temporary html rendering
"""
import utool as ut
from os.path import abspath
import webbrowser
try:
html_str = html_str.decode('utf8')
except Exception:
pass
html_dpath = ut.ensure_app_resource_dir('utool', 'temp_html')
fpath = abspath(ut.unixjoin(html_dpath, 'temp.html'))
url = 'file://' + fpath
ut.writeto(fpath, html_str)
webbrowser.open(url) | python | def render_html(html_str):
"""
makes a temporary html rendering
"""
import utool as ut
from os.path import abspath
import webbrowser
try:
html_str = html_str.decode('utf8')
except Exception:
pass
html_dpath = ut.ensure_app_resource_dir('utool', 'temp_html')
fpath = abspath(ut.unixjoin(html_dpath, 'temp.html'))
url = 'file://' + fpath
ut.writeto(fpath, html_str)
webbrowser.open(url) | [
"def",
"render_html",
"(",
"html_str",
")",
":",
"import",
"utool",
"as",
"ut",
"from",
"os",
".",
"path",
"import",
"abspath",
"import",
"webbrowser",
"try",
":",
"html_str",
"=",
"html_str",
".",
"decode",
"(",
"'utf8'",
")",
"except",
"Exception",
":",
"pass",
"html_dpath",
"=",
"ut",
".",
"ensure_app_resource_dir",
"(",
"'utool'",
",",
"'temp_html'",
")",
"fpath",
"=",
"abspath",
"(",
"ut",
".",
"unixjoin",
"(",
"html_dpath",
",",
"'temp.html'",
")",
")",
"url",
"=",
"'file://'",
"+",
"fpath",
"ut",
".",
"writeto",
"(",
"fpath",
",",
"html_str",
")",
"webbrowser",
".",
"open",
"(",
"url",
")"
] | makes a temporary html rendering | [
"makes",
"a",
"temporary",
"html",
"rendering"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_web.py#L113-L130 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.