repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
Erotemic/utool | utool/util_list.py | length_hint | def length_hint(obj, default=0):
"""
Return an estimate of the number of items in obj.
This is the PEP 424 implementation.
If the object supports len(), the result will be
exact. Otherwise, it may over- or under-estimate by an
arbitrary amount. The result will be an integer >= 0.
"""
try:
return len(obj)
except TypeError:
try:
get_hint = type(obj).__length_hint__
except AttributeError:
return default
try:
hint = get_hint(obj)
except TypeError:
return default
if hint is NotImplemented:
return default
if not isinstance(hint, int):
raise TypeError("Length hint must be an integer, not %r" %
type(hint))
if hint < 0:
raise ValueError("__length_hint__() should return >= 0")
return hint | python | def length_hint(obj, default=0):
"""
Return an estimate of the number of items in obj.
This is the PEP 424 implementation.
If the object supports len(), the result will be
exact. Otherwise, it may over- or under-estimate by an
arbitrary amount. The result will be an integer >= 0.
"""
try:
return len(obj)
except TypeError:
try:
get_hint = type(obj).__length_hint__
except AttributeError:
return default
try:
hint = get_hint(obj)
except TypeError:
return default
if hint is NotImplemented:
return default
if not isinstance(hint, int):
raise TypeError("Length hint must be an integer, not %r" %
type(hint))
if hint < 0:
raise ValueError("__length_hint__() should return >= 0")
return hint | [
"def",
"length_hint",
"(",
"obj",
",",
"default",
"=",
"0",
")",
":",
"try",
":",
"return",
"len",
"(",
"obj",
")",
"except",
"TypeError",
":",
"try",
":",
"get_hint",
"=",
"type",
"(",
"obj",
")",
".",
"__length_hint__",
"except",
"AttributeError",
":",
"return",
"default",
"try",
":",
"hint",
"=",
"get_hint",
"(",
"obj",
")",
"except",
"TypeError",
":",
"return",
"default",
"if",
"hint",
"is",
"NotImplemented",
":",
"return",
"default",
"if",
"not",
"isinstance",
"(",
"hint",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"\"Length hint must be an integer, not %r\"",
"%",
"type",
"(",
"hint",
")",
")",
"if",
"hint",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"__length_hint__() should return >= 0\"",
")",
"return",
"hint"
] | Return an estimate of the number of items in obj.
This is the PEP 424 implementation.
If the object supports len(), the result will be
exact. Otherwise, it may over- or under-estimate by an
arbitrary amount. The result will be an integer >= 0. | [
"Return",
"an",
"estimate",
"of",
"the",
"number",
"of",
"items",
"in",
"obj",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_list.py#L3459-L3486 | train |
product-definition-center/pdc-client | pdc_client/plugin_helpers.py | add_parser_arguments | def add_parser_arguments(parser, args, group=None, prefix=DATA_PREFIX):
"""
Helper method that populates parser arguments. The argument values can
be later retrieved with `extract_arguments` method.
The `args` argument to this method should be a dict with strings as
keys and dicts as values. The keys will be used as keys in returned
data. Their values will be passed as kwargs to `parser.add_argument`.
There is special value `arg` that will be used as argument name if
present, otherwise a name will be generated based on the key.
If `group` is a string, it will be used as group header in help output.
"""
if group:
parser = parser.add_argument_group(group)
for arg, kwargs in iteritems(args):
arg_name = kwargs.pop('arg', arg.replace('_', '-'))
if 'metavar' not in kwargs:
kwargs['metavar'] = arg.upper()
if 'dest' in kwargs:
kwargs['dest'] = prefix + kwargs['dest']
else:
kwargs['dest'] = prefix + arg
parser.add_argument('--' + arg_name, **kwargs) | python | def add_parser_arguments(parser, args, group=None, prefix=DATA_PREFIX):
"""
Helper method that populates parser arguments. The argument values can
be later retrieved with `extract_arguments` method.
The `args` argument to this method should be a dict with strings as
keys and dicts as values. The keys will be used as keys in returned
data. Their values will be passed as kwargs to `parser.add_argument`.
There is special value `arg` that will be used as argument name if
present, otherwise a name will be generated based on the key.
If `group` is a string, it will be used as group header in help output.
"""
if group:
parser = parser.add_argument_group(group)
for arg, kwargs in iteritems(args):
arg_name = kwargs.pop('arg', arg.replace('_', '-'))
if 'metavar' not in kwargs:
kwargs['metavar'] = arg.upper()
if 'dest' in kwargs:
kwargs['dest'] = prefix + kwargs['dest']
else:
kwargs['dest'] = prefix + arg
parser.add_argument('--' + arg_name, **kwargs) | [
"def",
"add_parser_arguments",
"(",
"parser",
",",
"args",
",",
"group",
"=",
"None",
",",
"prefix",
"=",
"DATA_PREFIX",
")",
":",
"if",
"group",
":",
"parser",
"=",
"parser",
".",
"add_argument_group",
"(",
"group",
")",
"for",
"arg",
",",
"kwargs",
"in",
"iteritems",
"(",
"args",
")",
":",
"arg_name",
"=",
"kwargs",
".",
"pop",
"(",
"'arg'",
",",
"arg",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
")",
"if",
"'metavar'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'metavar'",
"]",
"=",
"arg",
".",
"upper",
"(",
")",
"if",
"'dest'",
"in",
"kwargs",
":",
"kwargs",
"[",
"'dest'",
"]",
"=",
"prefix",
"+",
"kwargs",
"[",
"'dest'",
"]",
"else",
":",
"kwargs",
"[",
"'dest'",
"]",
"=",
"prefix",
"+",
"arg",
"parser",
".",
"add_argument",
"(",
"'--'",
"+",
"arg_name",
",",
"*",
"*",
"kwargs",
")"
] | Helper method that populates parser arguments. The argument values can
be later retrieved with `extract_arguments` method.
The `args` argument to this method should be a dict with strings as
keys and dicts as values. The keys will be used as keys in returned
data. Their values will be passed as kwargs to `parser.add_argument`.
There is special value `arg` that will be used as argument name if
present, otherwise a name will be generated based on the key.
If `group` is a string, it will be used as group header in help output. | [
"Helper",
"method",
"that",
"populates",
"parser",
"arguments",
".",
"The",
"argument",
"values",
"can",
"be",
"later",
"retrieved",
"with",
"extract_arguments",
"method",
"."
] | 7236fd8b72e675ebb321bbe337289d9fbeb6119f | https://github.com/product-definition-center/pdc-client/blob/7236fd8b72e675ebb321bbe337289d9fbeb6119f/pdc_client/plugin_helpers.py#L102-L125 | train |
product-definition-center/pdc-client | pdc_client/plugin_helpers.py | add_mutually_exclusive_args | def add_mutually_exclusive_args(parser, args, required=False, prefix=DATA_PREFIX):
"""
Helper method that populates mutually exclusive arguments. The argument values can
be later retrieved with `extract_arguments` method.
The `args` argument to this method should be a dict with strings as
keys and dicts as values. The keys will be used as keys in returned
data. Their values will be passed as kwargs to `parser.add_argument`.
There is special value `arg` that will be used as argument name if
present, otherwise a name will be generated based on the key.
``required`` will be passed to `parser.add_mutually_exclusive_group` to
to indicate that at least one of the mutually exclusive arguments is required.
"""
parser = parser.add_mutually_exclusive_group(required=required)
for arg, kwargs in iteritems(args):
arg_name = kwargs.pop('arg', arg.replace('_', '-'))
if 'metavar' not in kwargs:
kwargs['metavar'] = arg.upper()
parser.add_argument('--' + arg_name, dest=prefix + arg, **kwargs) | python | def add_mutually_exclusive_args(parser, args, required=False, prefix=DATA_PREFIX):
"""
Helper method that populates mutually exclusive arguments. The argument values can
be later retrieved with `extract_arguments` method.
The `args` argument to this method should be a dict with strings as
keys and dicts as values. The keys will be used as keys in returned
data. Their values will be passed as kwargs to `parser.add_argument`.
There is special value `arg` that will be used as argument name if
present, otherwise a name will be generated based on the key.
``required`` will be passed to `parser.add_mutually_exclusive_group` to
to indicate that at least one of the mutually exclusive arguments is required.
"""
parser = parser.add_mutually_exclusive_group(required=required)
for arg, kwargs in iteritems(args):
arg_name = kwargs.pop('arg', arg.replace('_', '-'))
if 'metavar' not in kwargs:
kwargs['metavar'] = arg.upper()
parser.add_argument('--' + arg_name, dest=prefix + arg, **kwargs) | [
"def",
"add_mutually_exclusive_args",
"(",
"parser",
",",
"args",
",",
"required",
"=",
"False",
",",
"prefix",
"=",
"DATA_PREFIX",
")",
":",
"parser",
"=",
"parser",
".",
"add_mutually_exclusive_group",
"(",
"required",
"=",
"required",
")",
"for",
"arg",
",",
"kwargs",
"in",
"iteritems",
"(",
"args",
")",
":",
"arg_name",
"=",
"kwargs",
".",
"pop",
"(",
"'arg'",
",",
"arg",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
")",
"if",
"'metavar'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'metavar'",
"]",
"=",
"arg",
".",
"upper",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'--'",
"+",
"arg_name",
",",
"dest",
"=",
"prefix",
"+",
"arg",
",",
"*",
"*",
"kwargs",
")"
] | Helper method that populates mutually exclusive arguments. The argument values can
be later retrieved with `extract_arguments` method.
The `args` argument to this method should be a dict with strings as
keys and dicts as values. The keys will be used as keys in returned
data. Their values will be passed as kwargs to `parser.add_argument`.
There is special value `arg` that will be used as argument name if
present, otherwise a name will be generated based on the key.
``required`` will be passed to `parser.add_mutually_exclusive_group` to
to indicate that at least one of the mutually exclusive arguments is required. | [
"Helper",
"method",
"that",
"populates",
"mutually",
"exclusive",
"arguments",
".",
"The",
"argument",
"values",
"can",
"be",
"later",
"retrieved",
"with",
"extract_arguments",
"method",
"."
] | 7236fd8b72e675ebb321bbe337289d9fbeb6119f | https://github.com/product-definition-center/pdc-client/blob/7236fd8b72e675ebb321bbe337289d9fbeb6119f/pdc_client/plugin_helpers.py#L128-L147 | train |
product-definition-center/pdc-client | pdc_client/plugin_helpers.py | add_create_update_args | def add_create_update_args(parser, required_args, optional_args, create=False):
"""Wrapper around ``add_parser_arguments``.
If ``create`` is True, one argument group will be created for each of
``required_args`` and ``optional_args``. Each required argument will have
the ``required`` parameter set to True automatically.
If ``create`` is False, only one group of optional arguments will be
created containing all the arguments.
The arguments should be specified the same way as for
``add_parser_arguments``.
"""
if create:
for key in required_args:
required_args[key]['required'] = True
add_parser_arguments(parser, required_args, group='required arguments')
else:
optional_args.update(required_args)
add_parser_arguments(parser, optional_args) | python | def add_create_update_args(parser, required_args, optional_args, create=False):
"""Wrapper around ``add_parser_arguments``.
If ``create`` is True, one argument group will be created for each of
``required_args`` and ``optional_args``. Each required argument will have
the ``required`` parameter set to True automatically.
If ``create`` is False, only one group of optional arguments will be
created containing all the arguments.
The arguments should be specified the same way as for
``add_parser_arguments``.
"""
if create:
for key in required_args:
required_args[key]['required'] = True
add_parser_arguments(parser, required_args, group='required arguments')
else:
optional_args.update(required_args)
add_parser_arguments(parser, optional_args) | [
"def",
"add_create_update_args",
"(",
"parser",
",",
"required_args",
",",
"optional_args",
",",
"create",
"=",
"False",
")",
":",
"if",
"create",
":",
"for",
"key",
"in",
"required_args",
":",
"required_args",
"[",
"key",
"]",
"[",
"'required'",
"]",
"=",
"True",
"add_parser_arguments",
"(",
"parser",
",",
"required_args",
",",
"group",
"=",
"'required arguments'",
")",
"else",
":",
"optional_args",
".",
"update",
"(",
"required_args",
")",
"add_parser_arguments",
"(",
"parser",
",",
"optional_args",
")"
] | Wrapper around ``add_parser_arguments``.
If ``create`` is True, one argument group will be created for each of
``required_args`` and ``optional_args``. Each required argument will have
the ``required`` parameter set to True automatically.
If ``create`` is False, only one group of optional arguments will be
created containing all the arguments.
The arguments should be specified the same way as for
``add_parser_arguments``. | [
"Wrapper",
"around",
"add_parser_arguments",
"."
] | 7236fd8b72e675ebb321bbe337289d9fbeb6119f | https://github.com/product-definition-center/pdc-client/blob/7236fd8b72e675ebb321bbe337289d9fbeb6119f/pdc_client/plugin_helpers.py#L150-L169 | train |
product-definition-center/pdc-client | pdc_client/plugin_helpers.py | extract_arguments | def extract_arguments(args, prefix=DATA_PREFIX):
"""Return a dict of arguments created by `add_parser_arguments`.
If the key in `args` contains two underscores, a nested dictionary will be
created. Only keys starting with given prefix are examined. The prefix is
stripped away and does not appear in the result.
"""
data = {}
for key, value in iteritems(args.__dict__):
if key.startswith(prefix) and value is not None:
parts = key[len(prefix):].split('__')
# Think of `d` as a pointer into the resulting nested dictionary.
# The `for` loop iterates over all parts of the key except the last
# to find the proper dict into which the value should be inserted.
# If the subdicts do not exist, they are created.
d = data
for p in parts[:-1]:
assert p not in d or isinstance(d[p], dict)
d = d.setdefault(p, {})
# At this point `d` points to the correct dict and value can be
# inserted.
d[parts[-1]] = value if value != '' else None
return data | python | def extract_arguments(args, prefix=DATA_PREFIX):
"""Return a dict of arguments created by `add_parser_arguments`.
If the key in `args` contains two underscores, a nested dictionary will be
created. Only keys starting with given prefix are examined. The prefix is
stripped away and does not appear in the result.
"""
data = {}
for key, value in iteritems(args.__dict__):
if key.startswith(prefix) and value is not None:
parts = key[len(prefix):].split('__')
# Think of `d` as a pointer into the resulting nested dictionary.
# The `for` loop iterates over all parts of the key except the last
# to find the proper dict into which the value should be inserted.
# If the subdicts do not exist, they are created.
d = data
for p in parts[:-1]:
assert p not in d or isinstance(d[p], dict)
d = d.setdefault(p, {})
# At this point `d` points to the correct dict and value can be
# inserted.
d[parts[-1]] = value if value != '' else None
return data | [
"def",
"extract_arguments",
"(",
"args",
",",
"prefix",
"=",
"DATA_PREFIX",
")",
":",
"data",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"args",
".",
"__dict__",
")",
":",
"if",
"key",
".",
"startswith",
"(",
"prefix",
")",
"and",
"value",
"is",
"not",
"None",
":",
"parts",
"=",
"key",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
".",
"split",
"(",
"'__'",
")",
"# Think of `d` as a pointer into the resulting nested dictionary.",
"# The `for` loop iterates over all parts of the key except the last",
"# to find the proper dict into which the value should be inserted.",
"# If the subdicts do not exist, they are created.",
"d",
"=",
"data",
"for",
"p",
"in",
"parts",
"[",
":",
"-",
"1",
"]",
":",
"assert",
"p",
"not",
"in",
"d",
"or",
"isinstance",
"(",
"d",
"[",
"p",
"]",
",",
"dict",
")",
"d",
"=",
"d",
".",
"setdefault",
"(",
"p",
",",
"{",
"}",
")",
"# At this point `d` points to the correct dict and value can be",
"# inserted.",
"d",
"[",
"parts",
"[",
"-",
"1",
"]",
"]",
"=",
"value",
"if",
"value",
"!=",
"''",
"else",
"None",
"return",
"data"
] | Return a dict of arguments created by `add_parser_arguments`.
If the key in `args` contains two underscores, a nested dictionary will be
created. Only keys starting with given prefix are examined. The prefix is
stripped away and does not appear in the result. | [
"Return",
"a",
"dict",
"of",
"arguments",
"created",
"by",
"add_parser_arguments",
"."
] | 7236fd8b72e675ebb321bbe337289d9fbeb6119f | https://github.com/product-definition-center/pdc-client/blob/7236fd8b72e675ebb321bbe337289d9fbeb6119f/pdc_client/plugin_helpers.py#L172-L194 | train |
glormph/msstitch | src/app/actions/mslookup/searchspace.py | create_searchspace | def create_searchspace(lookup, fastafn, proline_cut=False,
reverse_seqs=True, do_trypsinize=True):
"""Given a FASTA database, proteins are trypsinized and resulting peptides
stored in a database or dict for lookups"""
allpeps = []
for record in SeqIO.parse(fastafn, 'fasta'):
if do_trypsinize:
pepseqs = trypsinize(record.seq, proline_cut)
else:
pepseqs = [record.seq]
# Exchange all leucines to isoleucines because MS can't differ
pepseqs = [(str(pep).replace('L', 'I'),) for pep in pepseqs]
allpeps.extend(pepseqs)
if len(allpeps) > 1000000: # more than x peps, write to SQLite
lookup.write_peps(allpeps, reverse_seqs)
allpeps = []
# write remaining peps to sqlite
lookup.write_peps(allpeps, reverse_seqs)
lookup.index_peps(reverse_seqs)
lookup.close_connection() | python | def create_searchspace(lookup, fastafn, proline_cut=False,
reverse_seqs=True, do_trypsinize=True):
"""Given a FASTA database, proteins are trypsinized and resulting peptides
stored in a database or dict for lookups"""
allpeps = []
for record in SeqIO.parse(fastafn, 'fasta'):
if do_trypsinize:
pepseqs = trypsinize(record.seq, proline_cut)
else:
pepseqs = [record.seq]
# Exchange all leucines to isoleucines because MS can't differ
pepseqs = [(str(pep).replace('L', 'I'),) for pep in pepseqs]
allpeps.extend(pepseqs)
if len(allpeps) > 1000000: # more than x peps, write to SQLite
lookup.write_peps(allpeps, reverse_seqs)
allpeps = []
# write remaining peps to sqlite
lookup.write_peps(allpeps, reverse_seqs)
lookup.index_peps(reverse_seqs)
lookup.close_connection() | [
"def",
"create_searchspace",
"(",
"lookup",
",",
"fastafn",
",",
"proline_cut",
"=",
"False",
",",
"reverse_seqs",
"=",
"True",
",",
"do_trypsinize",
"=",
"True",
")",
":",
"allpeps",
"=",
"[",
"]",
"for",
"record",
"in",
"SeqIO",
".",
"parse",
"(",
"fastafn",
",",
"'fasta'",
")",
":",
"if",
"do_trypsinize",
":",
"pepseqs",
"=",
"trypsinize",
"(",
"record",
".",
"seq",
",",
"proline_cut",
")",
"else",
":",
"pepseqs",
"=",
"[",
"record",
".",
"seq",
"]",
"# Exchange all leucines to isoleucines because MS can't differ",
"pepseqs",
"=",
"[",
"(",
"str",
"(",
"pep",
")",
".",
"replace",
"(",
"'L'",
",",
"'I'",
")",
",",
")",
"for",
"pep",
"in",
"pepseqs",
"]",
"allpeps",
".",
"extend",
"(",
"pepseqs",
")",
"if",
"len",
"(",
"allpeps",
")",
">",
"1000000",
":",
"# more than x peps, write to SQLite",
"lookup",
".",
"write_peps",
"(",
"allpeps",
",",
"reverse_seqs",
")",
"allpeps",
"=",
"[",
"]",
"# write remaining peps to sqlite",
"lookup",
".",
"write_peps",
"(",
"allpeps",
",",
"reverse_seqs",
")",
"lookup",
".",
"index_peps",
"(",
"reverse_seqs",
")",
"lookup",
".",
"close_connection",
"(",
")"
] | Given a FASTA database, proteins are trypsinized and resulting peptides
stored in a database or dict for lookups | [
"Given",
"a",
"FASTA",
"database",
"proteins",
"are",
"trypsinized",
"and",
"resulting",
"peptides",
"stored",
"in",
"a",
"database",
"or",
"dict",
"for",
"lookups"
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mslookup/searchspace.py#L24-L43 | train |
Erotemic/utool | utool/util_hash.py | hashid_arr | def hashid_arr(arr, label='arr', hashlen=16):
""" newer version of hashstr_arr2 """
hashstr = hash_data(arr)[0:hashlen]
if isinstance(arr, (list, tuple)):
shapestr = len(arr)
else:
shapestr = ','.join(list(map(str, arr.shape)))
hashid = '{}-{}-{}'.format(label, shapestr, hashstr)
return hashid | python | def hashid_arr(arr, label='arr', hashlen=16):
""" newer version of hashstr_arr2 """
hashstr = hash_data(arr)[0:hashlen]
if isinstance(arr, (list, tuple)):
shapestr = len(arr)
else:
shapestr = ','.join(list(map(str, arr.shape)))
hashid = '{}-{}-{}'.format(label, shapestr, hashstr)
return hashid | [
"def",
"hashid_arr",
"(",
"arr",
",",
"label",
"=",
"'arr'",
",",
"hashlen",
"=",
"16",
")",
":",
"hashstr",
"=",
"hash_data",
"(",
"arr",
")",
"[",
"0",
":",
"hashlen",
"]",
"if",
"isinstance",
"(",
"arr",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"shapestr",
"=",
"len",
"(",
"arr",
")",
"else",
":",
"shapestr",
"=",
"','",
".",
"join",
"(",
"list",
"(",
"map",
"(",
"str",
",",
"arr",
".",
"shape",
")",
")",
")",
"hashid",
"=",
"'{}-{}-{}'",
".",
"format",
"(",
"label",
",",
"shapestr",
",",
"hashstr",
")",
"return",
"hashid"
] | newer version of hashstr_arr2 | [
"newer",
"version",
"of",
"hashstr_arr2"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L163-L171 | train |
Erotemic/utool | utool/util_hash.py | _update_hasher | def _update_hasher(hasher, data):
"""
This is the clear winner over the generate version.
Used by hash_data
Ignore:
import utool
rng = np.random.RandomState(0)
# str1 = rng.rand(0).dumps()
str1 = b'SEP'
str2 = rng.rand(10000).dumps()
for timer in utool.Timerit(100, label='twocall'):
hasher = hashlib.sha256()
with timer:
hasher.update(str1)
hasher.update(str2)
a = hasher.hexdigest()
for timer in utool.Timerit(100, label='concat'):
hasher = hashlib.sha256()
with timer:
hasher.update(str1 + str2)
b = hasher.hexdigest()
assert a == b
# CONCLUSION: Faster to concat in case of prefixes and seps
nested_data = {'1': [rng.rand(100), '2', '3'],
'2': ['1', '2', '3', '4', '5'],
'3': [('1', '2'), ('3', '4'), ('5', '6')]}
data = list(nested_data.values())
for timer in utool.Timerit(1000, label='cat-generate'):
hasher = hashlib.sha256()
with timer:
hasher.update(b''.join(_bytes_generator(data)))
for timer in utool.Timerit(1000, label='inc-generate'):
hasher = hashlib.sha256()
with timer:
for b in _bytes_generator(data):
hasher.update(b)
for timer in utool.Timerit(1000, label='inc-generate'):
hasher = hashlib.sha256()
with timer:
for b in _bytes_generator(data):
hasher.update(b)
for timer in utool.Timerit(1000, label='chunk-inc-generate'):
hasher = hashlib.sha256()
import ubelt as ub
with timer:
for chunk in ub.chunks(_bytes_generator(data), 5):
hasher.update(b''.join(chunk))
for timer in utool.Timerit(1000, label='inc-update'):
hasher = hashlib.sha256()
with timer:
_update_hasher(hasher, data)
data = ut.lorium_ipsum()
hash_data(data)
ut.hashstr27(data)
%timeit hash_data(data)
%timeit ut.hashstr27(repr(data))
for timer in utool.Timerit(100, label='twocall'):
hasher = hashlib.sha256()
with timer:
hash_data(data)
hasher = hashlib.sha256()
hasher.update(memoryview(np.array([1])))
print(hasher.hexdigest())
hasher = hashlib.sha256()
hasher.update(np.array(['1'], dtype=object))
print(hasher.hexdigest())
"""
if isinstance(data, (tuple, list, zip)):
needs_iteration = True
elif (util_type.HAVE_NUMPY and isinstance(data, np.ndarray) and
data.dtype.kind == 'O'):
# ndarrays of objects cannot be hashed directly.
needs_iteration = True
else:
needs_iteration = False
if needs_iteration:
# try to nest quickly without recursive calls
SEP = b'SEP'
iter_prefix = b'ITER'
# if isinstance(data, tuple):
# iter_prefix = b'TUP'
# else:
# iter_prefix = b'LIST'
iter_ = iter(data)
hasher.update(iter_prefix)
try:
for item in iter_:
prefix, hashable = _covert_to_hashable(data)
binary_data = SEP + prefix + hashable
# b''.join([SEP, prefix, hashable])
hasher.update(binary_data)
except TypeError:
# need to use recursive calls
# Update based on current item
_update_hasher(hasher, item)
for item in iter_:
# Ensure the items have a spacer between them
hasher.update(SEP)
_update_hasher(hasher, item)
else:
prefix, hashable = _covert_to_hashable(data)
binary_data = prefix + hashable
# b''.join([prefix, hashable])
hasher.update(binary_data) | python | def _update_hasher(hasher, data):
"""
This is the clear winner over the generate version.
Used by hash_data
Ignore:
import utool
rng = np.random.RandomState(0)
# str1 = rng.rand(0).dumps()
str1 = b'SEP'
str2 = rng.rand(10000).dumps()
for timer in utool.Timerit(100, label='twocall'):
hasher = hashlib.sha256()
with timer:
hasher.update(str1)
hasher.update(str2)
a = hasher.hexdigest()
for timer in utool.Timerit(100, label='concat'):
hasher = hashlib.sha256()
with timer:
hasher.update(str1 + str2)
b = hasher.hexdigest()
assert a == b
# CONCLUSION: Faster to concat in case of prefixes and seps
nested_data = {'1': [rng.rand(100), '2', '3'],
'2': ['1', '2', '3', '4', '5'],
'3': [('1', '2'), ('3', '4'), ('5', '6')]}
data = list(nested_data.values())
for timer in utool.Timerit(1000, label='cat-generate'):
hasher = hashlib.sha256()
with timer:
hasher.update(b''.join(_bytes_generator(data)))
for timer in utool.Timerit(1000, label='inc-generate'):
hasher = hashlib.sha256()
with timer:
for b in _bytes_generator(data):
hasher.update(b)
for timer in utool.Timerit(1000, label='inc-generate'):
hasher = hashlib.sha256()
with timer:
for b in _bytes_generator(data):
hasher.update(b)
for timer in utool.Timerit(1000, label='chunk-inc-generate'):
hasher = hashlib.sha256()
import ubelt as ub
with timer:
for chunk in ub.chunks(_bytes_generator(data), 5):
hasher.update(b''.join(chunk))
for timer in utool.Timerit(1000, label='inc-update'):
hasher = hashlib.sha256()
with timer:
_update_hasher(hasher, data)
data = ut.lorium_ipsum()
hash_data(data)
ut.hashstr27(data)
%timeit hash_data(data)
%timeit ut.hashstr27(repr(data))
for timer in utool.Timerit(100, label='twocall'):
hasher = hashlib.sha256()
with timer:
hash_data(data)
hasher = hashlib.sha256()
hasher.update(memoryview(np.array([1])))
print(hasher.hexdigest())
hasher = hashlib.sha256()
hasher.update(np.array(['1'], dtype=object))
print(hasher.hexdigest())
"""
if isinstance(data, (tuple, list, zip)):
needs_iteration = True
elif (util_type.HAVE_NUMPY and isinstance(data, np.ndarray) and
data.dtype.kind == 'O'):
# ndarrays of objects cannot be hashed directly.
needs_iteration = True
else:
needs_iteration = False
if needs_iteration:
# try to nest quickly without recursive calls
SEP = b'SEP'
iter_prefix = b'ITER'
# if isinstance(data, tuple):
# iter_prefix = b'TUP'
# else:
# iter_prefix = b'LIST'
iter_ = iter(data)
hasher.update(iter_prefix)
try:
for item in iter_:
prefix, hashable = _covert_to_hashable(data)
binary_data = SEP + prefix + hashable
# b''.join([SEP, prefix, hashable])
hasher.update(binary_data)
except TypeError:
# need to use recursive calls
# Update based on current item
_update_hasher(hasher, item)
for item in iter_:
# Ensure the items have a spacer between them
hasher.update(SEP)
_update_hasher(hasher, item)
else:
prefix, hashable = _covert_to_hashable(data)
binary_data = prefix + hashable
# b''.join([prefix, hashable])
hasher.update(binary_data) | [
"def",
"_update_hasher",
"(",
"hasher",
",",
"data",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"(",
"tuple",
",",
"list",
",",
"zip",
")",
")",
":",
"needs_iteration",
"=",
"True",
"elif",
"(",
"util_type",
".",
"HAVE_NUMPY",
"and",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
"and",
"data",
".",
"dtype",
".",
"kind",
"==",
"'O'",
")",
":",
"# ndarrays of objects cannot be hashed directly.",
"needs_iteration",
"=",
"True",
"else",
":",
"needs_iteration",
"=",
"False",
"if",
"needs_iteration",
":",
"# try to nest quickly without recursive calls",
"SEP",
"=",
"b'SEP'",
"iter_prefix",
"=",
"b'ITER'",
"# if isinstance(data, tuple):",
"# iter_prefix = b'TUP'",
"# else:",
"# iter_prefix = b'LIST'",
"iter_",
"=",
"iter",
"(",
"data",
")",
"hasher",
".",
"update",
"(",
"iter_prefix",
")",
"try",
":",
"for",
"item",
"in",
"iter_",
":",
"prefix",
",",
"hashable",
"=",
"_covert_to_hashable",
"(",
"data",
")",
"binary_data",
"=",
"SEP",
"+",
"prefix",
"+",
"hashable",
"# b''.join([SEP, prefix, hashable])",
"hasher",
".",
"update",
"(",
"binary_data",
")",
"except",
"TypeError",
":",
"# need to use recursive calls",
"# Update based on current item",
"_update_hasher",
"(",
"hasher",
",",
"item",
")",
"for",
"item",
"in",
"iter_",
":",
"# Ensure the items have a spacer between them",
"hasher",
".",
"update",
"(",
"SEP",
")",
"_update_hasher",
"(",
"hasher",
",",
"item",
")",
"else",
":",
"prefix",
",",
"hashable",
"=",
"_covert_to_hashable",
"(",
"data",
")",
"binary_data",
"=",
"prefix",
"+",
"hashable",
"# b''.join([prefix, hashable])",
"hasher",
".",
"update",
"(",
"binary_data",
")"
] | This is the clear winner over the generate version.
Used by hash_data
Ignore:
import utool
rng = np.random.RandomState(0)
# str1 = rng.rand(0).dumps()
str1 = b'SEP'
str2 = rng.rand(10000).dumps()
for timer in utool.Timerit(100, label='twocall'):
hasher = hashlib.sha256()
with timer:
hasher.update(str1)
hasher.update(str2)
a = hasher.hexdigest()
for timer in utool.Timerit(100, label='concat'):
hasher = hashlib.sha256()
with timer:
hasher.update(str1 + str2)
b = hasher.hexdigest()
assert a == b
# CONCLUSION: Faster to concat in case of prefixes and seps
nested_data = {'1': [rng.rand(100), '2', '3'],
'2': ['1', '2', '3', '4', '5'],
'3': [('1', '2'), ('3', '4'), ('5', '6')]}
data = list(nested_data.values())
for timer in utool.Timerit(1000, label='cat-generate'):
hasher = hashlib.sha256()
with timer:
hasher.update(b''.join(_bytes_generator(data)))
for timer in utool.Timerit(1000, label='inc-generate'):
hasher = hashlib.sha256()
with timer:
for b in _bytes_generator(data):
hasher.update(b)
for timer in utool.Timerit(1000, label='inc-generate'):
hasher = hashlib.sha256()
with timer:
for b in _bytes_generator(data):
hasher.update(b)
for timer in utool.Timerit(1000, label='chunk-inc-generate'):
hasher = hashlib.sha256()
import ubelt as ub
with timer:
for chunk in ub.chunks(_bytes_generator(data), 5):
hasher.update(b''.join(chunk))
for timer in utool.Timerit(1000, label='inc-update'):
hasher = hashlib.sha256()
with timer:
_update_hasher(hasher, data)
data = ut.lorium_ipsum()
hash_data(data)
ut.hashstr27(data)
%timeit hash_data(data)
%timeit ut.hashstr27(repr(data))
for timer in utool.Timerit(100, label='twocall'):
hasher = hashlib.sha256()
with timer:
hash_data(data)
hasher = hashlib.sha256()
hasher.update(memoryview(np.array([1])))
print(hasher.hexdigest())
hasher = hashlib.sha256()
hasher.update(np.array(['1'], dtype=object))
print(hasher.hexdigest()) | [
"This",
"is",
"the",
"clear",
"winner",
"over",
"the",
"generate",
"version",
".",
"Used",
"by",
"hash_data"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L241-L358 | train |
Erotemic/utool | utool/util_hash.py | combine_hashes | def combine_hashes(bytes_list, hasher=None):
"""
Only works on bytes
Example:
>>> # DISABLE_DOCTEST
>>> x = [b('1111'), b('2222')]
>>> y = [b('11'), b('11'), b('22'), b('22')]
>>> bytes_list = y
>>> out1 = ut.combine_hashes(x, hashlib.sha1())
>>> hasher = hashlib.sha1()
>>> out2 = ut.combine_hashes(y, hasher)
>>> bytes_ = out2
>>> assert hasher.hexdigest() == freeze_hash_bytes(hasher.digest())
>>> assert convert_bytes_to_bigbase(hasher.digest()) == convert_hexstr_to_bigbase(hasher.hexdigest())
>>> assert out1 != out2
>>> print('out1 = %r' % (out1,))
>>> print('out2 = %r' % (out2,))
"""
if hasher is None:
hasher = hashlib.sha256()
for b in bytes_list:
hasher.update(b)
hasher.update(SEP_BYTE)
return hasher.digest() | python | def combine_hashes(bytes_list, hasher=None):
"""
Only works on bytes
Example:
>>> # DISABLE_DOCTEST
>>> x = [b('1111'), b('2222')]
>>> y = [b('11'), b('11'), b('22'), b('22')]
>>> bytes_list = y
>>> out1 = ut.combine_hashes(x, hashlib.sha1())
>>> hasher = hashlib.sha1()
>>> out2 = ut.combine_hashes(y, hasher)
>>> bytes_ = out2
>>> assert hasher.hexdigest() == freeze_hash_bytes(hasher.digest())
>>> assert convert_bytes_to_bigbase(hasher.digest()) == convert_hexstr_to_bigbase(hasher.hexdigest())
>>> assert out1 != out2
>>> print('out1 = %r' % (out1,))
>>> print('out2 = %r' % (out2,))
"""
if hasher is None:
hasher = hashlib.sha256()
for b in bytes_list:
hasher.update(b)
hasher.update(SEP_BYTE)
return hasher.digest() | [
"def",
"combine_hashes",
"(",
"bytes_list",
",",
"hasher",
"=",
"None",
")",
":",
"if",
"hasher",
"is",
"None",
":",
"hasher",
"=",
"hashlib",
".",
"sha256",
"(",
")",
"for",
"b",
"in",
"bytes_list",
":",
"hasher",
".",
"update",
"(",
"b",
")",
"hasher",
".",
"update",
"(",
"SEP_BYTE",
")",
"return",
"hasher",
".",
"digest",
"(",
")"
] | Only works on bytes
Example:
>>> # DISABLE_DOCTEST
>>> x = [b('1111'), b('2222')]
>>> y = [b('11'), b('11'), b('22'), b('22')]
>>> bytes_list = y
>>> out1 = ut.combine_hashes(x, hashlib.sha1())
>>> hasher = hashlib.sha1()
>>> out2 = ut.combine_hashes(y, hasher)
>>> bytes_ = out2
>>> assert hasher.hexdigest() == freeze_hash_bytes(hasher.digest())
>>> assert convert_bytes_to_bigbase(hasher.digest()) == convert_hexstr_to_bigbase(hasher.hexdigest())
>>> assert out1 != out2
>>> print('out1 = %r' % (out1,))
>>> print('out2 = %r' % (out2,)) | [
"Only",
"works",
"on",
"bytes"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L410-L434 | train |
Erotemic/utool | utool/util_hash.py | hash_data | def hash_data(data, hashlen=None, alphabet=None):
r"""
Get a unique hash depending on the state of the data.
Args:
data (object): any sort of loosely organized data
hashlen (None): (default = None)
alphabet (None): (default = None)
Returns:
str: text - hash string
CommandLine:
python -m utool.util_hash hash_data
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> import utool as ut
>>> counter = [0]
>>> failed = []
>>> def check_hash(input_, want=None):
>>> count = counter[0] = counter[0] + 1
>>> got = ut.hash_data(input_)
>>> print('({}) {}'.format(count, got))
>>> if want is not None and not got.startswith(want):
>>> failed.append((got, input_, count, want))
>>> check_hash('1', 'wuvrng')
>>> check_hash(['1'], 'dekbfpby')
>>> check_hash(tuple(['1']), 'dekbfpby')
>>> check_hash(b'12', 'marreflbv')
>>> check_hash([b'1', b'2'], 'nwfs')
>>> check_hash(['1', '2', '3'], 'arfrp')
>>> check_hash(['1', np.array([1,2,3]), '3'], 'uyqwcq')
>>> check_hash('123', 'ehkgxk')
>>> check_hash(zip([1, 2, 3], [4, 5, 6]), 'mjcpwa')
>>> import numpy as np
>>> rng = np.random.RandomState(0)
>>> check_hash(rng.rand(100000), 'bdwosuey')
>>> for got, input_, count, want in failed:
>>> print('failed {} on {}'.format(count, input_))
>>> print('got={}, want={}'.format(got, want))
>>> assert not failed
"""
if alphabet is None:
alphabet = ALPHABET_27
if hashlen is None:
hashlen = HASH_LEN2
if isinstance(data, stringlike) and len(data) == 0:
# Make a special hash for empty data
text = (alphabet[0] * hashlen)
else:
hasher = hashlib.sha512()
_update_hasher(hasher, data)
# Get a 128 character hex string
text = hasher.hexdigest()
# Shorten length of string (by increasing base)
hashstr2 = convert_hexstr_to_bigbase(text, alphabet, bigbase=len(alphabet))
# Truncate
text = hashstr2[:hashlen]
return text | python | def hash_data(data, hashlen=None, alphabet=None):
r"""
Get a unique hash depending on the state of the data.
Args:
data (object): any sort of loosely organized data
hashlen (None): (default = None)
alphabet (None): (default = None)
Returns:
str: text - hash string
CommandLine:
python -m utool.util_hash hash_data
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> import utool as ut
>>> counter = [0]
>>> failed = []
>>> def check_hash(input_, want=None):
>>> count = counter[0] = counter[0] + 1
>>> got = ut.hash_data(input_)
>>> print('({}) {}'.format(count, got))
>>> if want is not None and not got.startswith(want):
>>> failed.append((got, input_, count, want))
>>> check_hash('1', 'wuvrng')
>>> check_hash(['1'], 'dekbfpby')
>>> check_hash(tuple(['1']), 'dekbfpby')
>>> check_hash(b'12', 'marreflbv')
>>> check_hash([b'1', b'2'], 'nwfs')
>>> check_hash(['1', '2', '3'], 'arfrp')
>>> check_hash(['1', np.array([1,2,3]), '3'], 'uyqwcq')
>>> check_hash('123', 'ehkgxk')
>>> check_hash(zip([1, 2, 3], [4, 5, 6]), 'mjcpwa')
>>> import numpy as np
>>> rng = np.random.RandomState(0)
>>> check_hash(rng.rand(100000), 'bdwosuey')
>>> for got, input_, count, want in failed:
>>> print('failed {} on {}'.format(count, input_))
>>> print('got={}, want={}'.format(got, want))
>>> assert not failed
"""
if alphabet is None:
alphabet = ALPHABET_27
if hashlen is None:
hashlen = HASH_LEN2
if isinstance(data, stringlike) and len(data) == 0:
# Make a special hash for empty data
text = (alphabet[0] * hashlen)
else:
hasher = hashlib.sha512()
_update_hasher(hasher, data)
# Get a 128 character hex string
text = hasher.hexdigest()
# Shorten length of string (by increasing base)
hashstr2 = convert_hexstr_to_bigbase(text, alphabet, bigbase=len(alphabet))
# Truncate
text = hashstr2[:hashlen]
return text | [
"def",
"hash_data",
"(",
"data",
",",
"hashlen",
"=",
"None",
",",
"alphabet",
"=",
"None",
")",
":",
"if",
"alphabet",
"is",
"None",
":",
"alphabet",
"=",
"ALPHABET_27",
"if",
"hashlen",
"is",
"None",
":",
"hashlen",
"=",
"HASH_LEN2",
"if",
"isinstance",
"(",
"data",
",",
"stringlike",
")",
"and",
"len",
"(",
"data",
")",
"==",
"0",
":",
"# Make a special hash for empty data",
"text",
"=",
"(",
"alphabet",
"[",
"0",
"]",
"*",
"hashlen",
")",
"else",
":",
"hasher",
"=",
"hashlib",
".",
"sha512",
"(",
")",
"_update_hasher",
"(",
"hasher",
",",
"data",
")",
"# Get a 128 character hex string",
"text",
"=",
"hasher",
".",
"hexdigest",
"(",
")",
"# Shorten length of string (by increasing base)",
"hashstr2",
"=",
"convert_hexstr_to_bigbase",
"(",
"text",
",",
"alphabet",
",",
"bigbase",
"=",
"len",
"(",
"alphabet",
")",
")",
"# Truncate",
"text",
"=",
"hashstr2",
"[",
":",
"hashlen",
"]",
"return",
"text"
] | r"""
Get a unique hash depending on the state of the data.
Args:
data (object): any sort of loosely organized data
hashlen (None): (default = None)
alphabet (None): (default = None)
Returns:
str: text - hash string
CommandLine:
python -m utool.util_hash hash_data
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> import utool as ut
>>> counter = [0]
>>> failed = []
>>> def check_hash(input_, want=None):
>>> count = counter[0] = counter[0] + 1
>>> got = ut.hash_data(input_)
>>> print('({}) {}'.format(count, got))
>>> if want is not None and not got.startswith(want):
>>> failed.append((got, input_, count, want))
>>> check_hash('1', 'wuvrng')
>>> check_hash(['1'], 'dekbfpby')
>>> check_hash(tuple(['1']), 'dekbfpby')
>>> check_hash(b'12', 'marreflbv')
>>> check_hash([b'1', b'2'], 'nwfs')
>>> check_hash(['1', '2', '3'], 'arfrp')
>>> check_hash(['1', np.array([1,2,3]), '3'], 'uyqwcq')
>>> check_hash('123', 'ehkgxk')
>>> check_hash(zip([1, 2, 3], [4, 5, 6]), 'mjcpwa')
>>> import numpy as np
>>> rng = np.random.RandomState(0)
>>> check_hash(rng.rand(100000), 'bdwosuey')
>>> for got, input_, count, want in failed:
>>> print('failed {} on {}'.format(count, input_))
>>> print('got={}, want={}'.format(got, want))
>>> assert not failed | [
"r",
"Get",
"a",
"unique",
"hash",
"depending",
"on",
"the",
"state",
"of",
"the",
"data",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L438-L498 | train |
Erotemic/utool | utool/util_hash.py | convert_hexstr_to_bigbase | def convert_hexstr_to_bigbase(hexstr, alphabet=ALPHABET, bigbase=BIGBASE):
r"""
Packs a long hexstr into a shorter length string with a larger base
Ignore:
# Determine the length savings with lossless conversion
import sympy as sy
consts = dict(hexbase=16, hexlen=256, bigbase=27)
symbols = sy.symbols('hexbase, hexlen, bigbase, newlen')
haexbase, hexlen, bigbase, newlen = symbols
eqn = sy.Eq(16 ** hexlen, bigbase ** newlen)
newlen_ans = sy.solve(eqn, newlen)[0].subs(consts).evalf()
print('newlen_ans = %r' % (newlen_ans,))
# for a 27 char alphabet we can get 216
print('Required length for lossless conversion len2 = %r' % (len2,))
def info(base, len):
bits = base ** len
print('base = %r' % (base,))
print('len = %r' % (len,))
print('bits = %r' % (bits,))
info(16, 256)
info(27, 16)
info(27, 64)
info(27, 216)
"""
x = int(hexstr, 16) # first convert to base 16
if x == 0:
return '0'
sign = 1 if x > 0 else -1
x *= sign
digits = []
while x:
digits.append(alphabet[x % bigbase])
x //= bigbase
if sign < 0:
digits.append('-')
digits.reverse()
newbase_str = ''.join(digits)
return newbase_str | python | def convert_hexstr_to_bigbase(hexstr, alphabet=ALPHABET, bigbase=BIGBASE):
r"""
Packs a long hexstr into a shorter length string with a larger base
Ignore:
# Determine the length savings with lossless conversion
import sympy as sy
consts = dict(hexbase=16, hexlen=256, bigbase=27)
symbols = sy.symbols('hexbase, hexlen, bigbase, newlen')
haexbase, hexlen, bigbase, newlen = symbols
eqn = sy.Eq(16 ** hexlen, bigbase ** newlen)
newlen_ans = sy.solve(eqn, newlen)[0].subs(consts).evalf()
print('newlen_ans = %r' % (newlen_ans,))
# for a 27 char alphabet we can get 216
print('Required length for lossless conversion len2 = %r' % (len2,))
def info(base, len):
bits = base ** len
print('base = %r' % (base,))
print('len = %r' % (len,))
print('bits = %r' % (bits,))
info(16, 256)
info(27, 16)
info(27, 64)
info(27, 216)
"""
x = int(hexstr, 16) # first convert to base 16
if x == 0:
return '0'
sign = 1 if x > 0 else -1
x *= sign
digits = []
while x:
digits.append(alphabet[x % bigbase])
x //= bigbase
if sign < 0:
digits.append('-')
digits.reverse()
newbase_str = ''.join(digits)
return newbase_str | [
"def",
"convert_hexstr_to_bigbase",
"(",
"hexstr",
",",
"alphabet",
"=",
"ALPHABET",
",",
"bigbase",
"=",
"BIGBASE",
")",
":",
"x",
"=",
"int",
"(",
"hexstr",
",",
"16",
")",
"# first convert to base 16",
"if",
"x",
"==",
"0",
":",
"return",
"'0'",
"sign",
"=",
"1",
"if",
"x",
">",
"0",
"else",
"-",
"1",
"x",
"*=",
"sign",
"digits",
"=",
"[",
"]",
"while",
"x",
":",
"digits",
".",
"append",
"(",
"alphabet",
"[",
"x",
"%",
"bigbase",
"]",
")",
"x",
"//=",
"bigbase",
"if",
"sign",
"<",
"0",
":",
"digits",
".",
"append",
"(",
"'-'",
")",
"digits",
".",
"reverse",
"(",
")",
"newbase_str",
"=",
"''",
".",
"join",
"(",
"digits",
")",
"return",
"newbase_str"
] | r"""
Packs a long hexstr into a shorter length string with a larger base
Ignore:
# Determine the length savings with lossless conversion
import sympy as sy
consts = dict(hexbase=16, hexlen=256, bigbase=27)
symbols = sy.symbols('hexbase, hexlen, bigbase, newlen')
haexbase, hexlen, bigbase, newlen = symbols
eqn = sy.Eq(16 ** hexlen, bigbase ** newlen)
newlen_ans = sy.solve(eqn, newlen)[0].subs(consts).evalf()
print('newlen_ans = %r' % (newlen_ans,))
# for a 27 char alphabet we can get 216
print('Required length for lossless conversion len2 = %r' % (len2,))
def info(base, len):
bits = base ** len
print('base = %r' % (base,))
print('len = %r' % (len,))
print('bits = %r' % (bits,))
info(16, 256)
info(27, 16)
info(27, 64)
info(27, 216) | [
"r",
"Packs",
"a",
"long",
"hexstr",
"into",
"a",
"shorter",
"length",
"string",
"with",
"a",
"larger",
"base"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L766-L806 | train |
Erotemic/utool | utool/util_hash.py | get_file_hash | def get_file_hash(fpath, blocksize=65536, hasher=None, stride=1,
hexdigest=False):
r"""
For better hashes use hasher=hashlib.sha256, and keep stride=1
Args:
fpath (str): file path string
blocksize (int): 2 ** 16. Affects speed of reading file
hasher (None): defaults to sha1 for fast (but insecure) hashing
stride (int): strides > 1 skip data to hash, useful for faster
hashing, but less accurate, also makes hash dependant on
blocksize.
References:
http://stackoverflow.com/questions/3431825/generating-a-md5-checksum-of-a-file
http://stackoverflow.com/questions/5001893/when-should-i-use-sha-1-and-when-should-i-use-sha-2
CommandLine:
python -m utool.util_hash --test-get_file_hash
python -m utool.util_hash --test-get_file_hash:0
python -m utool.util_hash --test-get_file_hash:1
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> fpath = ut.grab_test_imgpath('patsy.jpg')
>>> #blocksize = 65536 # 2 ** 16
>>> blocksize = 2 ** 16
>>> hasher = None
>>> stride = 1
>>> hashbytes_20 = get_file_hash(fpath, blocksize, hasher, stride)
>>> result = repr(hashbytes_20)
>>> print(result)
'7\x07B\x0eX<sRu\xa2\x90P\xda\xb2\x84?\x81?\xa9\xd9'
'\x13\x9b\xf6\x0f\xa3QQ \xd7"$\xe9m\x05\x9e\x81\xf6\xf2v\xe4'
'\x16\x00\x80Xx\x8c-H\xcdP\xf6\x02\x9frl\xbf\x99VQ\xb5'
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> #fpath = ut.grab_file_url('http://en.wikipedia.org/wiki/List_of_comets_by_type')
>>> fpath = ut.unixjoin(ut.ensure_app_resource_dir('utool'), 'tmp.txt')
>>> ut.write_to(fpath, ut.lorium_ipsum())
>>> blocksize = 2 ** 3
>>> hasher = None
>>> stride = 2
>>> hashbytes_20 = get_file_hash(fpath, blocksize, hasher, stride)
>>> result = repr(hashbytes_20)
>>> print(result)
'5KP\xcf>R\xf6\xffO:L\xac\x9c\xd3V+\x0e\xf6\xe1n'
Ignore:
file_ = open(fpath, 'rb')
"""
if hasher is None:
hasher = hashlib.sha1()
with open(fpath, 'rb') as file_:
buf = file_.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
if stride > 1:
file_.seek(blocksize * (stride - 1), 1) # skip blocks
buf = file_.read(blocksize)
if hexdigest:
return hasher.hexdigest()
else:
return hasher.digest() | python | def get_file_hash(fpath, blocksize=65536, hasher=None, stride=1,
hexdigest=False):
r"""
For better hashes use hasher=hashlib.sha256, and keep stride=1
Args:
fpath (str): file path string
blocksize (int): 2 ** 16. Affects speed of reading file
hasher (None): defaults to sha1 for fast (but insecure) hashing
stride (int): strides > 1 skip data to hash, useful for faster
hashing, but less accurate, also makes hash dependant on
blocksize.
References:
http://stackoverflow.com/questions/3431825/generating-a-md5-checksum-of-a-file
http://stackoverflow.com/questions/5001893/when-should-i-use-sha-1-and-when-should-i-use-sha-2
CommandLine:
python -m utool.util_hash --test-get_file_hash
python -m utool.util_hash --test-get_file_hash:0
python -m utool.util_hash --test-get_file_hash:1
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> fpath = ut.grab_test_imgpath('patsy.jpg')
>>> #blocksize = 65536 # 2 ** 16
>>> blocksize = 2 ** 16
>>> hasher = None
>>> stride = 1
>>> hashbytes_20 = get_file_hash(fpath, blocksize, hasher, stride)
>>> result = repr(hashbytes_20)
>>> print(result)
'7\x07B\x0eX<sRu\xa2\x90P\xda\xb2\x84?\x81?\xa9\xd9'
'\x13\x9b\xf6\x0f\xa3QQ \xd7"$\xe9m\x05\x9e\x81\xf6\xf2v\xe4'
'\x16\x00\x80Xx\x8c-H\xcdP\xf6\x02\x9frl\xbf\x99VQ\xb5'
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> #fpath = ut.grab_file_url('http://en.wikipedia.org/wiki/List_of_comets_by_type')
>>> fpath = ut.unixjoin(ut.ensure_app_resource_dir('utool'), 'tmp.txt')
>>> ut.write_to(fpath, ut.lorium_ipsum())
>>> blocksize = 2 ** 3
>>> hasher = None
>>> stride = 2
>>> hashbytes_20 = get_file_hash(fpath, blocksize, hasher, stride)
>>> result = repr(hashbytes_20)
>>> print(result)
'5KP\xcf>R\xf6\xffO:L\xac\x9c\xd3V+\x0e\xf6\xe1n'
Ignore:
file_ = open(fpath, 'rb')
"""
if hasher is None:
hasher = hashlib.sha1()
with open(fpath, 'rb') as file_:
buf = file_.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
if stride > 1:
file_.seek(blocksize * (stride - 1), 1) # skip blocks
buf = file_.read(blocksize)
if hexdigest:
return hasher.hexdigest()
else:
return hasher.digest() | [
"def",
"get_file_hash",
"(",
"fpath",
",",
"blocksize",
"=",
"65536",
",",
"hasher",
"=",
"None",
",",
"stride",
"=",
"1",
",",
"hexdigest",
"=",
"False",
")",
":",
"if",
"hasher",
"is",
"None",
":",
"hasher",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"with",
"open",
"(",
"fpath",
",",
"'rb'",
")",
"as",
"file_",
":",
"buf",
"=",
"file_",
".",
"read",
"(",
"blocksize",
")",
"while",
"len",
"(",
"buf",
")",
">",
"0",
":",
"hasher",
".",
"update",
"(",
"buf",
")",
"if",
"stride",
">",
"1",
":",
"file_",
".",
"seek",
"(",
"blocksize",
"*",
"(",
"stride",
"-",
"1",
")",
",",
"1",
")",
"# skip blocks",
"buf",
"=",
"file_",
".",
"read",
"(",
"blocksize",
")",
"if",
"hexdigest",
":",
"return",
"hasher",
".",
"hexdigest",
"(",
")",
"else",
":",
"return",
"hasher",
".",
"digest",
"(",
")"
] | r"""
For better hashes use hasher=hashlib.sha256, and keep stride=1
Args:
fpath (str): file path string
blocksize (int): 2 ** 16. Affects speed of reading file
hasher (None): defaults to sha1 for fast (but insecure) hashing
stride (int): strides > 1 skip data to hash, useful for faster
hashing, but less accurate, also makes hash dependant on
blocksize.
References:
http://stackoverflow.com/questions/3431825/generating-a-md5-checksum-of-a-file
http://stackoverflow.com/questions/5001893/when-should-i-use-sha-1-and-when-should-i-use-sha-2
CommandLine:
python -m utool.util_hash --test-get_file_hash
python -m utool.util_hash --test-get_file_hash:0
python -m utool.util_hash --test-get_file_hash:1
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> fpath = ut.grab_test_imgpath('patsy.jpg')
>>> #blocksize = 65536 # 2 ** 16
>>> blocksize = 2 ** 16
>>> hasher = None
>>> stride = 1
>>> hashbytes_20 = get_file_hash(fpath, blocksize, hasher, stride)
>>> result = repr(hashbytes_20)
>>> print(result)
'7\x07B\x0eX<sRu\xa2\x90P\xda\xb2\x84?\x81?\xa9\xd9'
'\x13\x9b\xf6\x0f\xa3QQ \xd7"$\xe9m\x05\x9e\x81\xf6\xf2v\xe4'
'\x16\x00\x80Xx\x8c-H\xcdP\xf6\x02\x9frl\xbf\x99VQ\xb5'
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> #fpath = ut.grab_file_url('http://en.wikipedia.org/wiki/List_of_comets_by_type')
>>> fpath = ut.unixjoin(ut.ensure_app_resource_dir('utool'), 'tmp.txt')
>>> ut.write_to(fpath, ut.lorium_ipsum())
>>> blocksize = 2 ** 3
>>> hasher = None
>>> stride = 2
>>> hashbytes_20 = get_file_hash(fpath, blocksize, hasher, stride)
>>> result = repr(hashbytes_20)
>>> print(result)
'5KP\xcf>R\xf6\xffO:L\xac\x9c\xd3V+\x0e\xf6\xe1n'
Ignore:
file_ = open(fpath, 'rb') | [
"r",
"For",
"better",
"hashes",
"use",
"hasher",
"=",
"hashlib",
".",
"sha256",
"and",
"keep",
"stride",
"=",
"1"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L829-L897 | train |
Erotemic/utool | utool/util_hash.py | get_file_uuid | def get_file_uuid(fpath, hasher=None, stride=1):
""" Creates a uuid from the hash of a file
"""
if hasher is None:
hasher = hashlib.sha1() # 20 bytes of output
#hasher = hashlib.sha256() # 32 bytes of output
# sha1 produces a 20 byte hash
hashbytes_20 = get_file_hash(fpath, hasher=hasher, stride=stride)
# sha1 produces 20 bytes, but UUID requires 16 bytes
hashbytes_16 = hashbytes_20[0:16]
uuid_ = uuid.UUID(bytes=hashbytes_16)
return uuid_ | python | def get_file_uuid(fpath, hasher=None, stride=1):
""" Creates a uuid from the hash of a file
"""
if hasher is None:
hasher = hashlib.sha1() # 20 bytes of output
#hasher = hashlib.sha256() # 32 bytes of output
# sha1 produces a 20 byte hash
hashbytes_20 = get_file_hash(fpath, hasher=hasher, stride=stride)
# sha1 produces 20 bytes, but UUID requires 16 bytes
hashbytes_16 = hashbytes_20[0:16]
uuid_ = uuid.UUID(bytes=hashbytes_16)
return uuid_ | [
"def",
"get_file_uuid",
"(",
"fpath",
",",
"hasher",
"=",
"None",
",",
"stride",
"=",
"1",
")",
":",
"if",
"hasher",
"is",
"None",
":",
"hasher",
"=",
"hashlib",
".",
"sha1",
"(",
")",
"# 20 bytes of output",
"#hasher = hashlib.sha256() # 32 bytes of output",
"# sha1 produces a 20 byte hash",
"hashbytes_20",
"=",
"get_file_hash",
"(",
"fpath",
",",
"hasher",
"=",
"hasher",
",",
"stride",
"=",
"stride",
")",
"# sha1 produces 20 bytes, but UUID requires 16 bytes",
"hashbytes_16",
"=",
"hashbytes_20",
"[",
"0",
":",
"16",
"]",
"uuid_",
"=",
"uuid",
".",
"UUID",
"(",
"bytes",
"=",
"hashbytes_16",
")",
"return",
"uuid_"
] | Creates a uuid from the hash of a file | [
"Creates",
"a",
"uuid",
"from",
"the",
"hash",
"of",
"a",
"file"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L968-L979 | train |
Erotemic/utool | utool/util_hash.py | combine_uuids | def combine_uuids(uuids, ordered=True, salt=''):
"""
Creates a uuid that specifies a group of UUIDS
Args:
uuids (list): list of uuid objects
ordered (bool): if False uuid order changes the resulting combined uuid
otherwise the uuids are considered an orderless set
salt (str): salts the resulting hash
Returns:
uuid.UUID: combined uuid
CommandLine:
python -m utool.util_hash --test-combine_uuids
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> import utool as ut
>>> uuids = [hashable_to_uuid('one'), hashable_to_uuid('two'),
>>> hashable_to_uuid('three')]
>>> combo1 = combine_uuids(uuids, ordered=True)
>>> combo2 = combine_uuids(uuids[::-1], ordered=True)
>>> combo3 = combine_uuids(uuids, ordered=False)
>>> combo4 = combine_uuids(uuids[::-1], ordered=False)
>>> result = ut.repr4([combo1, combo2, combo3, combo4], nobr=True)
>>> print(result)
UUID('83ee781f-8646-ccba-0ed8-13842825c12a'),
UUID('52bbb33f-612e-2ab8-a62c-2f46e5b1edc8'),
UUID('945cadab-e834-e581-0f74-62f106d20d81'),
UUID('945cadab-e834-e581-0f74-62f106d20d81'),
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> import utool as ut
>>> uuids = [uuid.UUID('5ff6b34e-7d8f-ef32-5fad-489266acd2ae'),
>>> uuid.UUID('f2400146-ec12-950b-1489-668228e155a8'),
>>> uuid.UUID('037d6f31-8c73-f961-1fe4-d616442a1e86'),
>>> uuid.UUID('ca45d6e2-e648-09cc-a49e-e71c6fa3b3f3')]
>>> ordered = True
>>> salt = u''
>>> result = combine_uuids(uuids, ordered, salt)
>>> print(result)
1dabc66b-b564-676a-99b4-5cae7a9e7294
"""
if len(uuids) == 0:
return get_zero_uuid()
elif len(uuids) == 1:
return uuids[0]
else:
if not ordered:
uuids = sorted(uuids)
sep_str = '-'
sep_byte = six.binary_type(six.b(sep_str))
pref = six.binary_type(six.b('{}{}{}'.format(salt, sep_str, len(uuids))))
combined_bytes = pref + sep_byte.join([u.bytes for u in uuids])
combined_uuid = hashable_to_uuid(combined_bytes)
return combined_uuid | python | def combine_uuids(uuids, ordered=True, salt=''):
"""
Creates a uuid that specifies a group of UUIDS
Args:
uuids (list): list of uuid objects
ordered (bool): if False uuid order changes the resulting combined uuid
otherwise the uuids are considered an orderless set
salt (str): salts the resulting hash
Returns:
uuid.UUID: combined uuid
CommandLine:
python -m utool.util_hash --test-combine_uuids
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> import utool as ut
>>> uuids = [hashable_to_uuid('one'), hashable_to_uuid('two'),
>>> hashable_to_uuid('three')]
>>> combo1 = combine_uuids(uuids, ordered=True)
>>> combo2 = combine_uuids(uuids[::-1], ordered=True)
>>> combo3 = combine_uuids(uuids, ordered=False)
>>> combo4 = combine_uuids(uuids[::-1], ordered=False)
>>> result = ut.repr4([combo1, combo2, combo3, combo4], nobr=True)
>>> print(result)
UUID('83ee781f-8646-ccba-0ed8-13842825c12a'),
UUID('52bbb33f-612e-2ab8-a62c-2f46e5b1edc8'),
UUID('945cadab-e834-e581-0f74-62f106d20d81'),
UUID('945cadab-e834-e581-0f74-62f106d20d81'),
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> import utool as ut
>>> uuids = [uuid.UUID('5ff6b34e-7d8f-ef32-5fad-489266acd2ae'),
>>> uuid.UUID('f2400146-ec12-950b-1489-668228e155a8'),
>>> uuid.UUID('037d6f31-8c73-f961-1fe4-d616442a1e86'),
>>> uuid.UUID('ca45d6e2-e648-09cc-a49e-e71c6fa3b3f3')]
>>> ordered = True
>>> salt = u''
>>> result = combine_uuids(uuids, ordered, salt)
>>> print(result)
1dabc66b-b564-676a-99b4-5cae7a9e7294
"""
if len(uuids) == 0:
return get_zero_uuid()
elif len(uuids) == 1:
return uuids[0]
else:
if not ordered:
uuids = sorted(uuids)
sep_str = '-'
sep_byte = six.binary_type(six.b(sep_str))
pref = six.binary_type(six.b('{}{}{}'.format(salt, sep_str, len(uuids))))
combined_bytes = pref + sep_byte.join([u.bytes for u in uuids])
combined_uuid = hashable_to_uuid(combined_bytes)
return combined_uuid | [
"def",
"combine_uuids",
"(",
"uuids",
",",
"ordered",
"=",
"True",
",",
"salt",
"=",
"''",
")",
":",
"if",
"len",
"(",
"uuids",
")",
"==",
"0",
":",
"return",
"get_zero_uuid",
"(",
")",
"elif",
"len",
"(",
"uuids",
")",
"==",
"1",
":",
"return",
"uuids",
"[",
"0",
"]",
"else",
":",
"if",
"not",
"ordered",
":",
"uuids",
"=",
"sorted",
"(",
"uuids",
")",
"sep_str",
"=",
"'-'",
"sep_byte",
"=",
"six",
".",
"binary_type",
"(",
"six",
".",
"b",
"(",
"sep_str",
")",
")",
"pref",
"=",
"six",
".",
"binary_type",
"(",
"six",
".",
"b",
"(",
"'{}{}{}'",
".",
"format",
"(",
"salt",
",",
"sep_str",
",",
"len",
"(",
"uuids",
")",
")",
")",
")",
"combined_bytes",
"=",
"pref",
"+",
"sep_byte",
".",
"join",
"(",
"[",
"u",
".",
"bytes",
"for",
"u",
"in",
"uuids",
"]",
")",
"combined_uuid",
"=",
"hashable_to_uuid",
"(",
"combined_bytes",
")",
"return",
"combined_uuid"
] | Creates a uuid that specifies a group of UUIDS
Args:
uuids (list): list of uuid objects
ordered (bool): if False uuid order changes the resulting combined uuid
otherwise the uuids are considered an orderless set
salt (str): salts the resulting hash
Returns:
uuid.UUID: combined uuid
CommandLine:
python -m utool.util_hash --test-combine_uuids
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> import utool as ut
>>> uuids = [hashable_to_uuid('one'), hashable_to_uuid('two'),
>>> hashable_to_uuid('three')]
>>> combo1 = combine_uuids(uuids, ordered=True)
>>> combo2 = combine_uuids(uuids[::-1], ordered=True)
>>> combo3 = combine_uuids(uuids, ordered=False)
>>> combo4 = combine_uuids(uuids[::-1], ordered=False)
>>> result = ut.repr4([combo1, combo2, combo3, combo4], nobr=True)
>>> print(result)
UUID('83ee781f-8646-ccba-0ed8-13842825c12a'),
UUID('52bbb33f-612e-2ab8-a62c-2f46e5b1edc8'),
UUID('945cadab-e834-e581-0f74-62f106d20d81'),
UUID('945cadab-e834-e581-0f74-62f106d20d81'),
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> import utool as ut
>>> uuids = [uuid.UUID('5ff6b34e-7d8f-ef32-5fad-489266acd2ae'),
>>> uuid.UUID('f2400146-ec12-950b-1489-668228e155a8'),
>>> uuid.UUID('037d6f31-8c73-f961-1fe4-d616442a1e86'),
>>> uuid.UUID('ca45d6e2-e648-09cc-a49e-e71c6fa3b3f3')]
>>> ordered = True
>>> salt = u''
>>> result = combine_uuids(uuids, ordered, salt)
>>> print(result)
1dabc66b-b564-676a-99b4-5cae7a9e7294 | [
"Creates",
"a",
"uuid",
"that",
"specifies",
"a",
"group",
"of",
"UUIDS"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_hash.py#L1028-L1087 | train |
dsoprea/NsqSpinner | nsq/master.py | Master.__start_connection | def __start_connection(self, context, node, ccallbacks=None):
"""Start a new connection, and manage it from a new greenlet."""
_logger.debug("Creating connection object: CONTEXT=[%s] NODE=[%s]",
context, node)
c = nsq.connection.Connection(
context,
node,
self.__identify,
self.__message_handler,
self.__quit_ev,
ccallbacks,
ignore_quit=self.__connection_ignore_quit)
g = gevent.spawn(c.run)
# Now, wait for the thread to finish the connection.
timeout_s = nsq.config.client.NEW_CONNECTION_NEGOTIATE_TIMEOUT_S
if c.connected_ev.wait(timeout_s) is False:
_logger.error("New connection to server [%s] timed-out. Cleaning-"
"up thread.", node)
g.kill()
g.join()
# We'll try again on the next audit.
raise EnvironmentError("Connection to server [%s] failed." %
(node,))
self.__connections.append((node, c, g)) | python | def __start_connection(self, context, node, ccallbacks=None):
"""Start a new connection, and manage it from a new greenlet."""
_logger.debug("Creating connection object: CONTEXT=[%s] NODE=[%s]",
context, node)
c = nsq.connection.Connection(
context,
node,
self.__identify,
self.__message_handler,
self.__quit_ev,
ccallbacks,
ignore_quit=self.__connection_ignore_quit)
g = gevent.spawn(c.run)
# Now, wait for the thread to finish the connection.
timeout_s = nsq.config.client.NEW_CONNECTION_NEGOTIATE_TIMEOUT_S
if c.connected_ev.wait(timeout_s) is False:
_logger.error("New connection to server [%s] timed-out. Cleaning-"
"up thread.", node)
g.kill()
g.join()
# We'll try again on the next audit.
raise EnvironmentError("Connection to server [%s] failed." %
(node,))
self.__connections.append((node, c, g)) | [
"def",
"__start_connection",
"(",
"self",
",",
"context",
",",
"node",
",",
"ccallbacks",
"=",
"None",
")",
":",
"_logger",
".",
"debug",
"(",
"\"Creating connection object: CONTEXT=[%s] NODE=[%s]\"",
",",
"context",
",",
"node",
")",
"c",
"=",
"nsq",
".",
"connection",
".",
"Connection",
"(",
"context",
",",
"node",
",",
"self",
".",
"__identify",
",",
"self",
".",
"__message_handler",
",",
"self",
".",
"__quit_ev",
",",
"ccallbacks",
",",
"ignore_quit",
"=",
"self",
".",
"__connection_ignore_quit",
")",
"g",
"=",
"gevent",
".",
"spawn",
"(",
"c",
".",
"run",
")",
"# Now, wait for the thread to finish the connection.",
"timeout_s",
"=",
"nsq",
".",
"config",
".",
"client",
".",
"NEW_CONNECTION_NEGOTIATE_TIMEOUT_S",
"if",
"c",
".",
"connected_ev",
".",
"wait",
"(",
"timeout_s",
")",
"is",
"False",
":",
"_logger",
".",
"error",
"(",
"\"New connection to server [%s] timed-out. Cleaning-\"",
"\"up thread.\"",
",",
"node",
")",
"g",
".",
"kill",
"(",
")",
"g",
".",
"join",
"(",
")",
"# We'll try again on the next audit.",
"raise",
"EnvironmentError",
"(",
"\"Connection to server [%s] failed.\"",
"%",
"(",
"node",
",",
")",
")",
"self",
".",
"__connections",
".",
"append",
"(",
"(",
"node",
",",
"c",
",",
"g",
")",
")"
] | Start a new connection, and manage it from a new greenlet. | [
"Start",
"a",
"new",
"connection",
"and",
"manage",
"it",
"from",
"a",
"new",
"greenlet",
"."
] | 972237b8ddce737983bfed001fde52e5236be695 | https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/master.py#L44-L76 | train |
dsoprea/NsqSpinner | nsq/master.py | Master.__audit_connections | def __audit_connections(self, ccallbacks):
"""Monitor state of all connections, and utility of all servers."""
while self.__quit_ev.is_set() is False:
# Remove any connections that are dead.
self.__connections = filter(
lambda (n, c, g): not g.ready(),
self.__connections)
connected_node_couplets_s = set([
(c.managed_connection.context, node)
for (node, c, g)
in self.__connections])
# Warn if there are any still-active connections that are no longer
# being advertised (probably where we were given some lookup servers
# that have dropped this particular *nsqd* server).
lingering_nodes_s = connected_node_couplets_s - \
self.__node_couplets_s
if lingering_nodes_s:
_logger.warning("Server(s) are connected but no longer "
"advertised: %s", lingering_nodes_s)
# Connect any servers that don't currently have a connection.
unused_nodes_s = self.__node_couplets_s - connected_node_couplets_s
for (context, node) in unused_nodes_s:
_logger.info("Trying to connect unconnected server: "
"CONTEXT=[%s] NODE=[%s]", context, node)
self.__start_connection(context, node, ccallbacks)
else:
# Are there both no unused servers and no connected servers?
if not connected_node_couplets_s:
_logger.error("All servers have gone away. Stopping "
"client.")
# Clear our list of servers, and squash the "no servers!"
# error so that we can shut things down in the right order.
try:
self.set_servers([])
except EnvironmentError:
pass
self.__quit_ev.set()
return
interval_s = \
nsq.config.client.GRANULAR_CONNECTION_AUDIT_SLEEP_STEP_TIME_S
audit_wait_s = float(nsq.config.client.CONNECTION_AUDIT_WAIT_S)
while audit_wait_s > 0 and\
self.__quit_ev.is_set() is False:
gevent.sleep(interval_s)
audit_wait_s -= interval_s | python | def __audit_connections(self, ccallbacks):
"""Monitor state of all connections, and utility of all servers."""
while self.__quit_ev.is_set() is False:
# Remove any connections that are dead.
self.__connections = filter(
lambda (n, c, g): not g.ready(),
self.__connections)
connected_node_couplets_s = set([
(c.managed_connection.context, node)
for (node, c, g)
in self.__connections])
# Warn if there are any still-active connections that are no longer
# being advertised (probably where we were given some lookup servers
# that have dropped this particular *nsqd* server).
lingering_nodes_s = connected_node_couplets_s - \
self.__node_couplets_s
if lingering_nodes_s:
_logger.warning("Server(s) are connected but no longer "
"advertised: %s", lingering_nodes_s)
# Connect any servers that don't currently have a connection.
unused_nodes_s = self.__node_couplets_s - connected_node_couplets_s
for (context, node) in unused_nodes_s:
_logger.info("Trying to connect unconnected server: "
"CONTEXT=[%s] NODE=[%s]", context, node)
self.__start_connection(context, node, ccallbacks)
else:
# Are there both no unused servers and no connected servers?
if not connected_node_couplets_s:
_logger.error("All servers have gone away. Stopping "
"client.")
# Clear our list of servers, and squash the "no servers!"
# error so that we can shut things down in the right order.
try:
self.set_servers([])
except EnvironmentError:
pass
self.__quit_ev.set()
return
interval_s = \
nsq.config.client.GRANULAR_CONNECTION_AUDIT_SLEEP_STEP_TIME_S
audit_wait_s = float(nsq.config.client.CONNECTION_AUDIT_WAIT_S)
while audit_wait_s > 0 and\
self.__quit_ev.is_set() is False:
gevent.sleep(interval_s)
audit_wait_s -= interval_s | [
"def",
"__audit_connections",
"(",
"self",
",",
"ccallbacks",
")",
":",
"while",
"self",
".",
"__quit_ev",
".",
"is_set",
"(",
")",
"is",
"False",
":",
"# Remove any connections that are dead.",
"self",
".",
"__connections",
"=",
"filter",
"(",
"lambda",
"(",
"n",
",",
"c",
",",
"g",
")",
":",
"not",
"g",
".",
"ready",
"(",
")",
",",
"self",
".",
"__connections",
")",
"connected_node_couplets_s",
"=",
"set",
"(",
"[",
"(",
"c",
".",
"managed_connection",
".",
"context",
",",
"node",
")",
"for",
"(",
"node",
",",
"c",
",",
"g",
")",
"in",
"self",
".",
"__connections",
"]",
")",
"# Warn if there are any still-active connections that are no longer ",
"# being advertised (probably where we were given some lookup servers ",
"# that have dropped this particular *nsqd* server).",
"lingering_nodes_s",
"=",
"connected_node_couplets_s",
"-",
"self",
".",
"__node_couplets_s",
"if",
"lingering_nodes_s",
":",
"_logger",
".",
"warning",
"(",
"\"Server(s) are connected but no longer \"",
"\"advertised: %s\"",
",",
"lingering_nodes_s",
")",
"# Connect any servers that don't currently have a connection.",
"unused_nodes_s",
"=",
"self",
".",
"__node_couplets_s",
"-",
"connected_node_couplets_s",
"for",
"(",
"context",
",",
"node",
")",
"in",
"unused_nodes_s",
":",
"_logger",
".",
"info",
"(",
"\"Trying to connect unconnected server: \"",
"\"CONTEXT=[%s] NODE=[%s]\"",
",",
"context",
",",
"node",
")",
"self",
".",
"__start_connection",
"(",
"context",
",",
"node",
",",
"ccallbacks",
")",
"else",
":",
"# Are there both no unused servers and no connected servers?",
"if",
"not",
"connected_node_couplets_s",
":",
"_logger",
".",
"error",
"(",
"\"All servers have gone away. Stopping \"",
"\"client.\"",
")",
"# Clear our list of servers, and squash the \"no servers!\" ",
"# error so that we can shut things down in the right order.",
"try",
":",
"self",
".",
"set_servers",
"(",
"[",
"]",
")",
"except",
"EnvironmentError",
":",
"pass",
"self",
".",
"__quit_ev",
".",
"set",
"(",
")",
"return",
"interval_s",
"=",
"nsq",
".",
"config",
".",
"client",
".",
"GRANULAR_CONNECTION_AUDIT_SLEEP_STEP_TIME_S",
"audit_wait_s",
"=",
"float",
"(",
"nsq",
".",
"config",
".",
"client",
".",
"CONNECTION_AUDIT_WAIT_S",
")",
"while",
"audit_wait_s",
">",
"0",
"and",
"self",
".",
"__quit_ev",
".",
"is_set",
"(",
")",
"is",
"False",
":",
"gevent",
".",
"sleep",
"(",
"interval_s",
")",
"audit_wait_s",
"-=",
"interval_s"
] | Monitor state of all connections, and utility of all servers. | [
"Monitor",
"state",
"of",
"all",
"connections",
"and",
"utility",
"of",
"all",
"servers",
"."
] | 972237b8ddce737983bfed001fde52e5236be695 | https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/master.py#L111-L170 | train |
dsoprea/NsqSpinner | nsq/master.py | Master.__join_connections | def __join_connections(self):
"""Wait for all connections to close. There are no side-effects here.
We just want to try and leave -after- everything has closed, in
general.
"""
interval_s = nsq.config.client.CONNECTION_CLOSE_AUDIT_WAIT_S
graceful_wait_s = nsq.config.client.CONNECTION_QUIT_CLOSE_TIMEOUT_S
graceful = False
while graceful_wait_s > 0:
if not self.__connections:
break
connected_list = [c.is_connected for (n, c, g) in self.__connections]
if any(connected_list) is False:
graceful = True
break
# We need to give the greenlets periodic control, in order to finish
# up.
gevent.sleep(interval_s)
graceful_wait_s -= interval_s
if graceful is False:
connected_list = [c for (n, c, g) in self.__connections if c.is_connected]
_logger.error("We were told to terminate, but not all "
"connections were stopped: [%s]", connected_list) | python | def __join_connections(self):
"""Wait for all connections to close. There are no side-effects here.
We just want to try and leave -after- everything has closed, in
general.
"""
interval_s = nsq.config.client.CONNECTION_CLOSE_AUDIT_WAIT_S
graceful_wait_s = nsq.config.client.CONNECTION_QUIT_CLOSE_TIMEOUT_S
graceful = False
while graceful_wait_s > 0:
if not self.__connections:
break
connected_list = [c.is_connected for (n, c, g) in self.__connections]
if any(connected_list) is False:
graceful = True
break
# We need to give the greenlets periodic control, in order to finish
# up.
gevent.sleep(interval_s)
graceful_wait_s -= interval_s
if graceful is False:
connected_list = [c for (n, c, g) in self.__connections if c.is_connected]
_logger.error("We were told to terminate, but not all "
"connections were stopped: [%s]", connected_list) | [
"def",
"__join_connections",
"(",
"self",
")",
":",
"interval_s",
"=",
"nsq",
".",
"config",
".",
"client",
".",
"CONNECTION_CLOSE_AUDIT_WAIT_S",
"graceful_wait_s",
"=",
"nsq",
".",
"config",
".",
"client",
".",
"CONNECTION_QUIT_CLOSE_TIMEOUT_S",
"graceful",
"=",
"False",
"while",
"graceful_wait_s",
">",
"0",
":",
"if",
"not",
"self",
".",
"__connections",
":",
"break",
"connected_list",
"=",
"[",
"c",
".",
"is_connected",
"for",
"(",
"n",
",",
"c",
",",
"g",
")",
"in",
"self",
".",
"__connections",
"]",
"if",
"any",
"(",
"connected_list",
")",
"is",
"False",
":",
"graceful",
"=",
"True",
"break",
"# We need to give the greenlets periodic control, in order to finish ",
"# up.",
"gevent",
".",
"sleep",
"(",
"interval_s",
")",
"graceful_wait_s",
"-=",
"interval_s",
"if",
"graceful",
"is",
"False",
":",
"connected_list",
"=",
"[",
"c",
"for",
"(",
"n",
",",
"c",
",",
"g",
")",
"in",
"self",
".",
"__connections",
"if",
"c",
".",
"is_connected",
"]",
"_logger",
".",
"error",
"(",
"\"We were told to terminate, but not all \"",
"\"connections were stopped: [%s]\"",
",",
"connected_list",
")"
] | Wait for all connections to close. There are no side-effects here.
We just want to try and leave -after- everything has closed, in
general. | [
"Wait",
"for",
"all",
"connections",
"to",
"close",
".",
"There",
"are",
"no",
"side",
"-",
"effects",
"here",
".",
"We",
"just",
"want",
"to",
"try",
"and",
"leave",
"-",
"after",
"-",
"everything",
"has",
"closed",
"in",
"general",
"."
] | 972237b8ddce737983bfed001fde52e5236be695 | https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/master.py#L172-L200 | train |
dsoprea/NsqSpinner | nsq/master.py | Master.__manage_connections | def __manage_connections(self, ccallbacks=None):
"""This runs as the main connection management greenlet."""
_logger.info("Running client.")
# Create message-handler.
if self.__message_handler_cls is not None:
# TODO(dustin): Move this to another thread if we can mix multithreading with coroutines.
self.__message_handler = self.__message_handler_cls(
self.__election,
ccallbacks)
# Spawn the initial connections to all of the servers.
for (context, node) in self.__node_couplets_s:
self.__start_connection(context, node, ccallbacks)
# Wait for at least one connection to the server.
self.__wait_for_one_server_connection()
# Indicate that the client is okay to pass control back to the caller.
self.__is_alive = True
self.__ready_ev.set()
# Loop, and maintain all connections. This exits when the quit event
# is set.
self.__audit_connections(ccallbacks)
# Wait for all of the connections to close. They will respond to the
# same quit event that terminate the audit loop just above.
self.__join_connections()
_logger.info("Connection management has stopped.")
self.__is_alive = False | python | def __manage_connections(self, ccallbacks=None):
"""This runs as the main connection management greenlet."""
_logger.info("Running client.")
# Create message-handler.
if self.__message_handler_cls is not None:
# TODO(dustin): Move this to another thread if we can mix multithreading with coroutines.
self.__message_handler = self.__message_handler_cls(
self.__election,
ccallbacks)
# Spawn the initial connections to all of the servers.
for (context, node) in self.__node_couplets_s:
self.__start_connection(context, node, ccallbacks)
# Wait for at least one connection to the server.
self.__wait_for_one_server_connection()
# Indicate that the client is okay to pass control back to the caller.
self.__is_alive = True
self.__ready_ev.set()
# Loop, and maintain all connections. This exits when the quit event
# is set.
self.__audit_connections(ccallbacks)
# Wait for all of the connections to close. They will respond to the
# same quit event that terminate the audit loop just above.
self.__join_connections()
_logger.info("Connection management has stopped.")
self.__is_alive = False | [
"def",
"__manage_connections",
"(",
"self",
",",
"ccallbacks",
"=",
"None",
")",
":",
"_logger",
".",
"info",
"(",
"\"Running client.\"",
")",
"# Create message-handler.",
"if",
"self",
".",
"__message_handler_cls",
"is",
"not",
"None",
":",
"# TODO(dustin): Move this to another thread if we can mix multithreading with coroutines. ",
"self",
".",
"__message_handler",
"=",
"self",
".",
"__message_handler_cls",
"(",
"self",
".",
"__election",
",",
"ccallbacks",
")",
"# Spawn the initial connections to all of the servers.",
"for",
"(",
"context",
",",
"node",
")",
"in",
"self",
".",
"__node_couplets_s",
":",
"self",
".",
"__start_connection",
"(",
"context",
",",
"node",
",",
"ccallbacks",
")",
"# Wait for at least one connection to the server.",
"self",
".",
"__wait_for_one_server_connection",
"(",
")",
"# Indicate that the client is okay to pass control back to the caller.",
"self",
".",
"__is_alive",
"=",
"True",
"self",
".",
"__ready_ev",
".",
"set",
"(",
")",
"# Loop, and maintain all connections. This exits when the quit event ",
"# is set.",
"self",
".",
"__audit_connections",
"(",
"ccallbacks",
")",
"# Wait for all of the connections to close. They will respond to the ",
"# same quit event that terminate the audit loop just above.",
"self",
".",
"__join_connections",
"(",
")",
"_logger",
".",
"info",
"(",
"\"Connection management has stopped.\"",
")",
"self",
".",
"__is_alive",
"=",
"False"
] | This runs as the main connection management greenlet. | [
"This",
"runs",
"as",
"the",
"main",
"connection",
"management",
"greenlet",
"."
] | 972237b8ddce737983bfed001fde52e5236be695 | https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/master.py#L202-L237 | train |
dsoprea/NsqSpinner | nsq/master.py | Master.set_servers | def set_servers(self, node_couplets):
"""Set the current collection of servers. The entries are 2-tuples of
contexts and nodes.
"""
node_couplets_s = set(node_couplets)
if node_couplets_s != self.__node_couplets_s:
_logger.info("Servers have changed. NEW: %s REMOVED: %s",
node_couplets_s - self.__node_couplets_s,
self.__node_couplets_s - node_couplets_s)
# Since no servers means no connection greenlets, and the discover
# greenlet is technically scheduled and not running between
# invocations, this should successfully terminate the process.
if not node_couplets_s:
raise EnvironmentError("No servers available.")
self.__node_couplets_s = node_couplets_s | python | def set_servers(self, node_couplets):
"""Set the current collection of servers. The entries are 2-tuples of
contexts and nodes.
"""
node_couplets_s = set(node_couplets)
if node_couplets_s != self.__node_couplets_s:
_logger.info("Servers have changed. NEW: %s REMOVED: %s",
node_couplets_s - self.__node_couplets_s,
self.__node_couplets_s - node_couplets_s)
# Since no servers means no connection greenlets, and the discover
# greenlet is technically scheduled and not running between
# invocations, this should successfully terminate the process.
if not node_couplets_s:
raise EnvironmentError("No servers available.")
self.__node_couplets_s = node_couplets_s | [
"def",
"set_servers",
"(",
"self",
",",
"node_couplets",
")",
":",
"node_couplets_s",
"=",
"set",
"(",
"node_couplets",
")",
"if",
"node_couplets_s",
"!=",
"self",
".",
"__node_couplets_s",
":",
"_logger",
".",
"info",
"(",
"\"Servers have changed. NEW: %s REMOVED: %s\"",
",",
"node_couplets_s",
"-",
"self",
".",
"__node_couplets_s",
",",
"self",
".",
"__node_couplets_s",
"-",
"node_couplets_s",
")",
"# Since no servers means no connection greenlets, and the discover ",
"# greenlet is technically scheduled and not running between ",
"# invocations, this should successfully terminate the process.",
"if",
"not",
"node_couplets_s",
":",
"raise",
"EnvironmentError",
"(",
"\"No servers available.\"",
")",
"self",
".",
"__node_couplets_s",
"=",
"node_couplets_s"
] | Set the current collection of servers. The entries are 2-tuples of
contexts and nodes. | [
"Set",
"the",
"current",
"collection",
"of",
"servers",
".",
"The",
"entries",
"are",
"2",
"-",
"tuples",
"of",
"contexts",
"and",
"nodes",
"."
] | 972237b8ddce737983bfed001fde52e5236be695 | https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/master.py#L239-L257 | train |
dsoprea/NsqSpinner | nsq/master.py | Master.start | def start(self, ccallbacks=None):
"""Establish and maintain connections."""
self.__manage_g = gevent.spawn(self.__manage_connections, ccallbacks)
self.__ready_ev.wait() | python | def start(self, ccallbacks=None):
"""Establish and maintain connections."""
self.__manage_g = gevent.spawn(self.__manage_connections, ccallbacks)
self.__ready_ev.wait() | [
"def",
"start",
"(",
"self",
",",
"ccallbacks",
"=",
"None",
")",
":",
"self",
".",
"__manage_g",
"=",
"gevent",
".",
"spawn",
"(",
"self",
".",
"__manage_connections",
",",
"ccallbacks",
")",
"self",
".",
"__ready_ev",
".",
"wait",
"(",
")"
] | Establish and maintain connections. | [
"Establish",
"and",
"maintain",
"connections",
"."
] | 972237b8ddce737983bfed001fde52e5236be695 | https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/master.py#L268-L272 | train |
dsoprea/NsqSpinner | nsq/master.py | Master.stop | def stop(self):
"""Stop all of the connections."""
_logger.debug("Emitting quit signal for connections.")
self.__quit_ev.set()
_logger.info("Waiting for connection manager to stop.")
self.__manage_g.join() | python | def stop(self):
"""Stop all of the connections."""
_logger.debug("Emitting quit signal for connections.")
self.__quit_ev.set()
_logger.info("Waiting for connection manager to stop.")
self.__manage_g.join() | [
"def",
"stop",
"(",
"self",
")",
":",
"_logger",
".",
"debug",
"(",
"\"Emitting quit signal for connections.\"",
")",
"self",
".",
"__quit_ev",
".",
"set",
"(",
")",
"_logger",
".",
"info",
"(",
"\"Waiting for connection manager to stop.\"",
")",
"self",
".",
"__manage_g",
".",
"join",
"(",
")"
] | Stop all of the connections. | [
"Stop",
"all",
"of",
"the",
"connections",
"."
] | 972237b8ddce737983bfed001fde52e5236be695 | https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/master.py#L274-L281 | train |
LEMS/pylems | lems/run.py | run | def run(file_path,include_dirs=[],dlems=False,nogui=False):
"""
Function for running from a script or shell.
"""
import argparse
args = argparse.Namespace()
args.lems_file = file_path
args.I = include_dirs
args.dlems = dlems
args.nogui = nogui
main(args=args) | python | def run(file_path,include_dirs=[],dlems=False,nogui=False):
"""
Function for running from a script or shell.
"""
import argparse
args = argparse.Namespace()
args.lems_file = file_path
args.I = include_dirs
args.dlems = dlems
args.nogui = nogui
main(args=args) | [
"def",
"run",
"(",
"file_path",
",",
"include_dirs",
"=",
"[",
"]",
",",
"dlems",
"=",
"False",
",",
"nogui",
"=",
"False",
")",
":",
"import",
"argparse",
"args",
"=",
"argparse",
".",
"Namespace",
"(",
")",
"args",
".",
"lems_file",
"=",
"file_path",
"args",
".",
"I",
"=",
"include_dirs",
"args",
".",
"dlems",
"=",
"dlems",
"args",
".",
"nogui",
"=",
"nogui",
"main",
"(",
"args",
"=",
"args",
")"
] | Function for running from a script or shell. | [
"Function",
"for",
"running",
"from",
"a",
"script",
"or",
"shell",
"."
] | 4eeb719d2f23650fe16c38626663b69b5c83818b | https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/run.py#L44-L54 | train |
dsoprea/NsqSpinner | nsq/node.py | DiscoveredNode.connect | def connect(self, nice_quit_ev):
"""Connect the server. We expect this to implement backoff and all
connection logistics for servers that were discovered via a lookup
node.
"""
_logger.debug("Connecting to discovered node: [%s]", self.server_host)
stop_epoch = time.time() + \
nsq.config.client.MAXIMUM_CONNECT_ATTEMPT_PERIOD_S
timeout_s = nsq.config.client.INITIAL_CONNECT_FAIL_WAIT_S
backoff_rate = nsq.config.client.CONNECT_FAIL_WAIT_BACKOFF_RATE
while stop_epoch >= time.time() and nice_quit_ev.is_set() is False:
try:
c = self.primitive_connect()
except gevent.socket.error:
_logger.exception("Could not connect to discovered server: "
"[%s]", self.server_host)
else:
_logger.info("Discovered server-node connected: [%s]",
self.server_host)
return c
timeout_s = min(timeout_s * backoff_rate,
nsq.config.client.MAXIMUM_CONNECT_FAIL_WAIT_S)
_logger.info("Waiting for (%d) seconds before reconnecting.",
timeout_s)
gevent.sleep(timeout_s)
raise nsq.exceptions.NsqConnectGiveUpError(
"Could not connect to the nsqlookupd server: [%s]" %
(self.server_host,)) | python | def connect(self, nice_quit_ev):
"""Connect the server. We expect this to implement backoff and all
connection logistics for servers that were discovered via a lookup
node.
"""
_logger.debug("Connecting to discovered node: [%s]", self.server_host)
stop_epoch = time.time() + \
nsq.config.client.MAXIMUM_CONNECT_ATTEMPT_PERIOD_S
timeout_s = nsq.config.client.INITIAL_CONNECT_FAIL_WAIT_S
backoff_rate = nsq.config.client.CONNECT_FAIL_WAIT_BACKOFF_RATE
while stop_epoch >= time.time() and nice_quit_ev.is_set() is False:
try:
c = self.primitive_connect()
except gevent.socket.error:
_logger.exception("Could not connect to discovered server: "
"[%s]", self.server_host)
else:
_logger.info("Discovered server-node connected: [%s]",
self.server_host)
return c
timeout_s = min(timeout_s * backoff_rate,
nsq.config.client.MAXIMUM_CONNECT_FAIL_WAIT_S)
_logger.info("Waiting for (%d) seconds before reconnecting.",
timeout_s)
gevent.sleep(timeout_s)
raise nsq.exceptions.NsqConnectGiveUpError(
"Could not connect to the nsqlookupd server: [%s]" %
(self.server_host,)) | [
"def",
"connect",
"(",
"self",
",",
"nice_quit_ev",
")",
":",
"_logger",
".",
"debug",
"(",
"\"Connecting to discovered node: [%s]\"",
",",
"self",
".",
"server_host",
")",
"stop_epoch",
"=",
"time",
".",
"time",
"(",
")",
"+",
"nsq",
".",
"config",
".",
"client",
".",
"MAXIMUM_CONNECT_ATTEMPT_PERIOD_S",
"timeout_s",
"=",
"nsq",
".",
"config",
".",
"client",
".",
"INITIAL_CONNECT_FAIL_WAIT_S",
"backoff_rate",
"=",
"nsq",
".",
"config",
".",
"client",
".",
"CONNECT_FAIL_WAIT_BACKOFF_RATE",
"while",
"stop_epoch",
">=",
"time",
".",
"time",
"(",
")",
"and",
"nice_quit_ev",
".",
"is_set",
"(",
")",
"is",
"False",
":",
"try",
":",
"c",
"=",
"self",
".",
"primitive_connect",
"(",
")",
"except",
"gevent",
".",
"socket",
".",
"error",
":",
"_logger",
".",
"exception",
"(",
"\"Could not connect to discovered server: \"",
"\"[%s]\"",
",",
"self",
".",
"server_host",
")",
"else",
":",
"_logger",
".",
"info",
"(",
"\"Discovered server-node connected: [%s]\"",
",",
"self",
".",
"server_host",
")",
"return",
"c",
"timeout_s",
"=",
"min",
"(",
"timeout_s",
"*",
"backoff_rate",
",",
"nsq",
".",
"config",
".",
"client",
".",
"MAXIMUM_CONNECT_FAIL_WAIT_S",
")",
"_logger",
".",
"info",
"(",
"\"Waiting for (%d) seconds before reconnecting.\"",
",",
"timeout_s",
")",
"gevent",
".",
"sleep",
"(",
"timeout_s",
")",
"raise",
"nsq",
".",
"exceptions",
".",
"NsqConnectGiveUpError",
"(",
"\"Could not connect to the nsqlookupd server: [%s]\"",
"%",
"(",
"self",
".",
"server_host",
",",
")",
")"
] | Connect the server. We expect this to implement backoff and all
connection logistics for servers that were discovered via a lookup
node. | [
"Connect",
"the",
"server",
".",
"We",
"expect",
"this",
"to",
"implement",
"backoff",
"and",
"all",
"connection",
"logistics",
"for",
"servers",
"that",
"were",
"discovered",
"via",
"a",
"lookup",
"node",
"."
] | 972237b8ddce737983bfed001fde52e5236be695 | https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/node.py#L62-L98 | train |
dsoprea/NsqSpinner | nsq/node.py | ServerNode.connect | def connect(self, nice_quit_ev):
"""Connect the server. We expect this to implement connection logistics
for servers that were explicitly prescribed to us.
"""
_logger.debug("Connecting to explicit server node: [%s]",
self.server_host)
# According to the docs, a nsqlookupd-discovered server should fall-out
# of the lineup immediately if it fails. If it comes back, nsqlookupd
# will give it back to us.
try:
c = self.primitive_connect()
except gevent.socket.error:
_logger.exception("Could not connect to explicit server: [%s]",
self.server_host)
raise nsq.exceptions.NsqConnectGiveUpError(
"Could not connect to the nsqd server: [%s]" %
(self.server_host,))
_logger.info("Explicit server-node connected: [%s]", self.server_host)
return c | python | def connect(self, nice_quit_ev):
"""Connect the server. We expect this to implement connection logistics
for servers that were explicitly prescribed to us.
"""
_logger.debug("Connecting to explicit server node: [%s]",
self.server_host)
# According to the docs, a nsqlookupd-discovered server should fall-out
# of the lineup immediately if it fails. If it comes back, nsqlookupd
# will give it back to us.
try:
c = self.primitive_connect()
except gevent.socket.error:
_logger.exception("Could not connect to explicit server: [%s]",
self.server_host)
raise nsq.exceptions.NsqConnectGiveUpError(
"Could not connect to the nsqd server: [%s]" %
(self.server_host,))
_logger.info("Explicit server-node connected: [%s]", self.server_host)
return c | [
"def",
"connect",
"(",
"self",
",",
"nice_quit_ev",
")",
":",
"_logger",
".",
"debug",
"(",
"\"Connecting to explicit server node: [%s]\"",
",",
"self",
".",
"server_host",
")",
"# According to the docs, a nsqlookupd-discovered server should fall-out ",
"# of the lineup immediately if it fails. If it comes back, nsqlookupd ",
"# will give it back to us.",
"try",
":",
"c",
"=",
"self",
".",
"primitive_connect",
"(",
")",
"except",
"gevent",
".",
"socket",
".",
"error",
":",
"_logger",
".",
"exception",
"(",
"\"Could not connect to explicit server: [%s]\"",
",",
"self",
".",
"server_host",
")",
"raise",
"nsq",
".",
"exceptions",
".",
"NsqConnectGiveUpError",
"(",
"\"Could not connect to the nsqd server: [%s]\"",
"%",
"(",
"self",
".",
"server_host",
",",
")",
")",
"_logger",
".",
"info",
"(",
"\"Explicit server-node connected: [%s]\"",
",",
"self",
".",
"server_host",
")",
"return",
"c"
] | Connect the server. We expect this to implement connection logistics
for servers that were explicitly prescribed to us. | [
"Connect",
"the",
"server",
".",
"We",
"expect",
"this",
"to",
"implement",
"connection",
"logistics",
"for",
"servers",
"that",
"were",
"explicitly",
"prescribed",
"to",
"us",
"."
] | 972237b8ddce737983bfed001fde52e5236be695 | https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/node.py#L107-L130 | train |
glormph/msstitch | src/app/drivers/prottable/fdr.py | ProttableFDRDriver.prepare | def prepare(self):
"""No percolator XML for protein tables"""
self.target = self.fn
self.targetheader = reader.get_tsv_header(self.target)
self.decoyheader = reader.get_tsv_header(self.decoyfn) | python | def prepare(self):
"""No percolator XML for protein tables"""
self.target = self.fn
self.targetheader = reader.get_tsv_header(self.target)
self.decoyheader = reader.get_tsv_header(self.decoyfn) | [
"def",
"prepare",
"(",
"self",
")",
":",
"self",
".",
"target",
"=",
"self",
".",
"fn",
"self",
".",
"targetheader",
"=",
"reader",
".",
"get_tsv_header",
"(",
"self",
".",
"target",
")",
"self",
".",
"decoyheader",
"=",
"reader",
".",
"get_tsv_header",
"(",
"self",
".",
"decoyfn",
")"
] | No percolator XML for protein tables | [
"No",
"percolator",
"XML",
"for",
"protein",
"tables"
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/drivers/prottable/fdr.py#L32-L36 | train |
product-definition-center/pdc-client | pdc_client/__init__.py | PDCClient.obtain_token | def obtain_token(self):
"""
Try to obtain token from all end-points that were ever used to serve the
token. If the request returns 404 NOT FOUND, retry with older version of
the URL.
"""
token_end_points = ('token/obtain',
'obtain-token',
'obtain_token')
for end_point in token_end_points:
try:
return self.auth[end_point]._(page_size=None)['token']
except BeanBagException as e:
if e.response.status_code != 404:
raise
raise Exception('Could not obtain token from any known URL.') | python | def obtain_token(self):
"""
Try to obtain token from all end-points that were ever used to serve the
token. If the request returns 404 NOT FOUND, retry with older version of
the URL.
"""
token_end_points = ('token/obtain',
'obtain-token',
'obtain_token')
for end_point in token_end_points:
try:
return self.auth[end_point]._(page_size=None)['token']
except BeanBagException as e:
if e.response.status_code != 404:
raise
raise Exception('Could not obtain token from any known URL.') | [
"def",
"obtain_token",
"(",
"self",
")",
":",
"token_end_points",
"=",
"(",
"'token/obtain'",
",",
"'obtain-token'",
",",
"'obtain_token'",
")",
"for",
"end_point",
"in",
"token_end_points",
":",
"try",
":",
"return",
"self",
".",
"auth",
"[",
"end_point",
"]",
".",
"_",
"(",
"page_size",
"=",
"None",
")",
"[",
"'token'",
"]",
"except",
"BeanBagException",
"as",
"e",
":",
"if",
"e",
".",
"response",
".",
"status_code",
"!=",
"404",
":",
"raise",
"raise",
"Exception",
"(",
"'Could not obtain token from any known URL.'",
")"
] | Try to obtain token from all end-points that were ever used to serve the
token. If the request returns 404 NOT FOUND, retry with older version of
the URL. | [
"Try",
"to",
"obtain",
"token",
"from",
"all",
"end",
"-",
"points",
"that",
"were",
"ever",
"used",
"to",
"serve",
"the",
"token",
".",
"If",
"the",
"request",
"returns",
"404",
"NOT",
"FOUND",
"retry",
"with",
"older",
"version",
"of",
"the",
"URL",
"."
] | 7236fd8b72e675ebb321bbe337289d9fbeb6119f | https://github.com/product-definition-center/pdc-client/blob/7236fd8b72e675ebb321bbe337289d9fbeb6119f/pdc_client/__init__.py#L195-L210 | train |
product-definition-center/pdc-client | pdc_client/__init__.py | _BeanBagWrapper.results | def results(self, *args, **kwargs):
"""
Return an iterator with all pages of data.
Return NoResultsError with response if there is unexpected data.
"""
def worker():
kwargs['page'] = 1
while True:
response = self.client(*args, **kwargs)
if isinstance(response, list):
yield response
break
elif _is_page(response):
yield response['results']
if response['next']:
kwargs['page'] += 1
else:
break
else:
raise NoResultsError(response)
return itertools.chain.from_iterable(worker()) | python | def results(self, *args, **kwargs):
"""
Return an iterator with all pages of data.
Return NoResultsError with response if there is unexpected data.
"""
def worker():
kwargs['page'] = 1
while True:
response = self.client(*args, **kwargs)
if isinstance(response, list):
yield response
break
elif _is_page(response):
yield response['results']
if response['next']:
kwargs['page'] += 1
else:
break
else:
raise NoResultsError(response)
return itertools.chain.from_iterable(worker()) | [
"def",
"results",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"worker",
"(",
")",
":",
"kwargs",
"[",
"'page'",
"]",
"=",
"1",
"while",
"True",
":",
"response",
"=",
"self",
".",
"client",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"isinstance",
"(",
"response",
",",
"list",
")",
":",
"yield",
"response",
"break",
"elif",
"_is_page",
"(",
"response",
")",
":",
"yield",
"response",
"[",
"'results'",
"]",
"if",
"response",
"[",
"'next'",
"]",
":",
"kwargs",
"[",
"'page'",
"]",
"+=",
"1",
"else",
":",
"break",
"else",
":",
"raise",
"NoResultsError",
"(",
"response",
")",
"return",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"worker",
"(",
")",
")"
] | Return an iterator with all pages of data.
Return NoResultsError with response if there is unexpected data. | [
"Return",
"an",
"iterator",
"with",
"all",
"pages",
"of",
"data",
".",
"Return",
"NoResultsError",
"with",
"response",
"if",
"there",
"is",
"unexpected",
"data",
"."
] | 7236fd8b72e675ebb321bbe337289d9fbeb6119f | https://github.com/product-definition-center/pdc-client/blob/7236fd8b72e675ebb321bbe337289d9fbeb6119f/pdc_client/__init__.py#L331-L352 | train |
glormph/msstitch | src/app/actions/headers/base.py | get_isoquant_fields | def get_isoquant_fields(pqdb=False, poolnames=False):
"""Returns a headerfield dict for isobaric quant channels. Channels are
taken from DB and there isn't a pool-independent version of this yet"""
# FIXME when is a None database passed?
if pqdb is None:
return {}
try:
channels_psms = pqdb.get_isoquant_amountpsms_channels()
except OperationalError:
# FIXME what does this catch?
return {}
quantheader, psmsheader = OrderedDict(), OrderedDict()
for chan_name, amnt_psms_name in channels_psms:
quantheader[chan_name] = poolnames
if amnt_psms_name:
psmsheader[amnt_psms_name] = poolnames
quantheader.update(psmsheader)
return quantheader | python | def get_isoquant_fields(pqdb=False, poolnames=False):
"""Returns a headerfield dict for isobaric quant channels. Channels are
taken from DB and there isn't a pool-independent version of this yet"""
# FIXME when is a None database passed?
if pqdb is None:
return {}
try:
channels_psms = pqdb.get_isoquant_amountpsms_channels()
except OperationalError:
# FIXME what does this catch?
return {}
quantheader, psmsheader = OrderedDict(), OrderedDict()
for chan_name, amnt_psms_name in channels_psms:
quantheader[chan_name] = poolnames
if amnt_psms_name:
psmsheader[amnt_psms_name] = poolnames
quantheader.update(psmsheader)
return quantheader | [
"def",
"get_isoquant_fields",
"(",
"pqdb",
"=",
"False",
",",
"poolnames",
"=",
"False",
")",
":",
"# FIXME when is a None database passed?",
"if",
"pqdb",
"is",
"None",
":",
"return",
"{",
"}",
"try",
":",
"channels_psms",
"=",
"pqdb",
".",
"get_isoquant_amountpsms_channels",
"(",
")",
"except",
"OperationalError",
":",
"# FIXME what does this catch?",
"return",
"{",
"}",
"quantheader",
",",
"psmsheader",
"=",
"OrderedDict",
"(",
")",
",",
"OrderedDict",
"(",
")",
"for",
"chan_name",
",",
"amnt_psms_name",
"in",
"channels_psms",
":",
"quantheader",
"[",
"chan_name",
"]",
"=",
"poolnames",
"if",
"amnt_psms_name",
":",
"psmsheader",
"[",
"amnt_psms_name",
"]",
"=",
"poolnames",
"quantheader",
".",
"update",
"(",
"psmsheader",
")",
"return",
"quantheader"
] | Returns a headerfield dict for isobaric quant channels. Channels are
taken from DB and there isn't a pool-independent version of this yet | [
"Returns",
"a",
"headerfield",
"dict",
"for",
"isobaric",
"quant",
"channels",
".",
"Channels",
"are",
"taken",
"from",
"DB",
"and",
"there",
"isn",
"t",
"a",
"pool",
"-",
"independent",
"version",
"of",
"this",
"yet"
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/headers/base.py#L83-L100 | train |
trendels/gevent_inotifyx | example.py | watch_for_events | def watch_for_events():
"""Wait for events and print them to stdout."""
fd = inotify.init()
try:
wd = inotify.add_watch(fd, '/tmp', inotify.IN_CLOSE_WRITE)
while True:
for event in inotify.get_events(fd):
print("event:", event.name, event.get_mask_description())
finally:
os.close(fd) | python | def watch_for_events():
"""Wait for events and print them to stdout."""
fd = inotify.init()
try:
wd = inotify.add_watch(fd, '/tmp', inotify.IN_CLOSE_WRITE)
while True:
for event in inotify.get_events(fd):
print("event:", event.name, event.get_mask_description())
finally:
os.close(fd) | [
"def",
"watch_for_events",
"(",
")",
":",
"fd",
"=",
"inotify",
".",
"init",
"(",
")",
"try",
":",
"wd",
"=",
"inotify",
".",
"add_watch",
"(",
"fd",
",",
"'/tmp'",
",",
"inotify",
".",
"IN_CLOSE_WRITE",
")",
"while",
"True",
":",
"for",
"event",
"in",
"inotify",
".",
"get_events",
"(",
"fd",
")",
":",
"print",
"(",
"\"event:\"",
",",
"event",
".",
"name",
",",
"event",
".",
"get_mask_description",
"(",
")",
")",
"finally",
":",
"os",
".",
"close",
"(",
"fd",
")"
] | Wait for events and print them to stdout. | [
"Wait",
"for",
"events",
"and",
"print",
"them",
"to",
"stdout",
"."
] | b1e531616d150e86b13aeca450a61c66f9bbc855 | https://github.com/trendels/gevent_inotifyx/blob/b1e531616d150e86b13aeca450a61c66f9bbc855/example.py#L16-L25 | train |
ColinDuquesnoy/QCrash | qcrash/formatters/markdown.py | MardownFormatter.format_body | def format_body(self, description, sys_info=None, traceback=None):
"""
Formats the body using markdown.
:param description: Description of the issue, written by the user.
:param sys_info: Optional system information string
:param log: Optional application log
:param traceback: Optional traceback.
"""
body = BODY_ITEM_TEMPLATE % {
'name': 'Description', 'value': description
}
if traceback:
traceback = '\n'.join(traceback.splitlines()[-NB_LINES_MAX:])
body += BODY_ITEM_TEMPLATE % {
'name': 'Traceback', 'value': '```\n%s\n```' % traceback
}
if sys_info:
sys_info = '- %s' % '\n- '.join(sys_info.splitlines())
body += BODY_ITEM_TEMPLATE % {
'name': 'System information', 'value': sys_info
}
return body | python | def format_body(self, description, sys_info=None, traceback=None):
"""
Formats the body using markdown.
:param description: Description of the issue, written by the user.
:param sys_info: Optional system information string
:param log: Optional application log
:param traceback: Optional traceback.
"""
body = BODY_ITEM_TEMPLATE % {
'name': 'Description', 'value': description
}
if traceback:
traceback = '\n'.join(traceback.splitlines()[-NB_LINES_MAX:])
body += BODY_ITEM_TEMPLATE % {
'name': 'Traceback', 'value': '```\n%s\n```' % traceback
}
if sys_info:
sys_info = '- %s' % '\n- '.join(sys_info.splitlines())
body += BODY_ITEM_TEMPLATE % {
'name': 'System information', 'value': sys_info
}
return body | [
"def",
"format_body",
"(",
"self",
",",
"description",
",",
"sys_info",
"=",
"None",
",",
"traceback",
"=",
"None",
")",
":",
"body",
"=",
"BODY_ITEM_TEMPLATE",
"%",
"{",
"'name'",
":",
"'Description'",
",",
"'value'",
":",
"description",
"}",
"if",
"traceback",
":",
"traceback",
"=",
"'\\n'",
".",
"join",
"(",
"traceback",
".",
"splitlines",
"(",
")",
"[",
"-",
"NB_LINES_MAX",
":",
"]",
")",
"body",
"+=",
"BODY_ITEM_TEMPLATE",
"%",
"{",
"'name'",
":",
"'Traceback'",
",",
"'value'",
":",
"'```\\n%s\\n```'",
"%",
"traceback",
"}",
"if",
"sys_info",
":",
"sys_info",
"=",
"'- %s'",
"%",
"'\\n- '",
".",
"join",
"(",
"sys_info",
".",
"splitlines",
"(",
")",
")",
"body",
"+=",
"BODY_ITEM_TEMPLATE",
"%",
"{",
"'name'",
":",
"'System information'",
",",
"'value'",
":",
"sys_info",
"}",
"return",
"body"
] | Formats the body using markdown.
:param description: Description of the issue, written by the user.
:param sys_info: Optional system information string
:param log: Optional application log
:param traceback: Optional traceback. | [
"Formats",
"the",
"body",
"using",
"markdown",
"."
] | 775e1b15764e2041a8f9a08bea938e4d6ce817c7 | https://github.com/ColinDuquesnoy/QCrash/blob/775e1b15764e2041a8f9a08bea938e4d6ce817c7/qcrash/formatters/markdown.py#L21-L43 | train |
garethr/cloth | src/cloth/tasks.py | list | def list():
"List EC2 name and public and private ip address"
for node in env.nodes:
print "%s (%s, %s)" % (node.tags["Name"], node.ip_address,
node.private_ip_address) | python | def list():
"List EC2 name and public and private ip address"
for node in env.nodes:
print "%s (%s, %s)" % (node.tags["Name"], node.ip_address,
node.private_ip_address) | [
"def",
"list",
"(",
")",
":",
"for",
"node",
"in",
"env",
".",
"nodes",
":",
"print",
"\"%s (%s, %s)\"",
"%",
"(",
"node",
".",
"tags",
"[",
"\"Name\"",
"]",
",",
"node",
".",
"ip_address",
",",
"node",
".",
"private_ip_address",
")"
] | List EC2 name and public and private ip address | [
"List",
"EC2",
"name",
"and",
"public",
"and",
"private",
"ip",
"address"
] | b50c7cd6b03f49a931ee55ec94212760c50694a9 | https://github.com/garethr/cloth/blob/b50c7cd6b03f49a931ee55ec94212760c50694a9/src/cloth/tasks.py#L40-L44 | train |
steveYeah/PyBomb | pybomb/clients/games_client.py | GamesClient.quick_search | def quick_search(self, name, platform=None, sort_by=None, desc=True):
"""
Quick search method that allows you to search for a game using only the
title and the platform
:param name: string
:param platform: int
:param sort_by: string
:param desc: bool
:return: pybomb.clients.Response
"""
if platform is None:
query_filter = "name:{0}".format(name)
else:
query_filter = "name:{0},platforms:{1}".format(name, platform)
search_params = {"filter": query_filter}
if sort_by is not None:
self._validate_sort_field(sort_by)
if desc:
direction = self.SORT_ORDER_DESCENDING
else:
direction = self.SORT_ORDER_ASCENDING
search_params["sort"] = "{0}:{1}".format(sort_by, direction)
response = self._query(search_params)
return response | python | def quick_search(self, name, platform=None, sort_by=None, desc=True):
"""
Quick search method that allows you to search for a game using only the
title and the platform
:param name: string
:param platform: int
:param sort_by: string
:param desc: bool
:return: pybomb.clients.Response
"""
if platform is None:
query_filter = "name:{0}".format(name)
else:
query_filter = "name:{0},platforms:{1}".format(name, platform)
search_params = {"filter": query_filter}
if sort_by is not None:
self._validate_sort_field(sort_by)
if desc:
direction = self.SORT_ORDER_DESCENDING
else:
direction = self.SORT_ORDER_ASCENDING
search_params["sort"] = "{0}:{1}".format(sort_by, direction)
response = self._query(search_params)
return response | [
"def",
"quick_search",
"(",
"self",
",",
"name",
",",
"platform",
"=",
"None",
",",
"sort_by",
"=",
"None",
",",
"desc",
"=",
"True",
")",
":",
"if",
"platform",
"is",
"None",
":",
"query_filter",
"=",
"\"name:{0}\"",
".",
"format",
"(",
"name",
")",
"else",
":",
"query_filter",
"=",
"\"name:{0},platforms:{1}\"",
".",
"format",
"(",
"name",
",",
"platform",
")",
"search_params",
"=",
"{",
"\"filter\"",
":",
"query_filter",
"}",
"if",
"sort_by",
"is",
"not",
"None",
":",
"self",
".",
"_validate_sort_field",
"(",
"sort_by",
")",
"if",
"desc",
":",
"direction",
"=",
"self",
".",
"SORT_ORDER_DESCENDING",
"else",
":",
"direction",
"=",
"self",
".",
"SORT_ORDER_ASCENDING",
"search_params",
"[",
"\"sort\"",
"]",
"=",
"\"{0}:{1}\"",
".",
"format",
"(",
"sort_by",
",",
"direction",
")",
"response",
"=",
"self",
".",
"_query",
"(",
"search_params",
")",
"return",
"response"
] | Quick search method that allows you to search for a game using only the
title and the platform
:param name: string
:param platform: int
:param sort_by: string
:param desc: bool
:return: pybomb.clients.Response | [
"Quick",
"search",
"method",
"that",
"allows",
"you",
"to",
"search",
"for",
"a",
"game",
"using",
"only",
"the",
"title",
"and",
"the",
"platform"
] | 54045d74e642f8a1c4366c24bd6a330ae3da6257 | https://github.com/steveYeah/PyBomb/blob/54045d74e642f8a1c4366c24bd6a330ae3da6257/pybomb/clients/games_client.py#L88-L118 | train |
quikmile/trellio | trellio/pinger.py | Pinger.send_ping | def send_ping(self, payload=None):
"""
Sends the ping after the interval specified when initializing
"""
yield from asyncio.sleep(self._interval)
self._handler.send_ping(payload=payload)
self._start_timer(payload=payload) | python | def send_ping(self, payload=None):
"""
Sends the ping after the interval specified when initializing
"""
yield from asyncio.sleep(self._interval)
self._handler.send_ping(payload=payload)
self._start_timer(payload=payload) | [
"def",
"send_ping",
"(",
"self",
",",
"payload",
"=",
"None",
")",
":",
"yield",
"from",
"asyncio",
".",
"sleep",
"(",
"self",
".",
"_interval",
")",
"self",
".",
"_handler",
".",
"send_ping",
"(",
"payload",
"=",
"payload",
")",
"self",
".",
"_start_timer",
"(",
"payload",
"=",
"payload",
")"
] | Sends the ping after the interval specified when initializing | [
"Sends",
"the",
"ping",
"after",
"the",
"interval",
"specified",
"when",
"initializing"
] | e8b050077562acf32805fcbb9c0c162248a23c62 | https://github.com/quikmile/trellio/blob/e8b050077562acf32805fcbb9c0c162248a23c62/trellio/pinger.py#L38-L44 | train |
quikmile/trellio | trellio/pinger.py | Pinger.pong_received | def pong_received(self, payload=None):
"""
Called when a pong is received. So the timer is cancelled
"""
if self._timer is not None:
self._timer.cancel()
self._failures = 0
asyncio.async(self.send_ping(payload=payload)) | python | def pong_received(self, payload=None):
"""
Called when a pong is received. So the timer is cancelled
"""
if self._timer is not None:
self._timer.cancel()
self._failures = 0
asyncio.async(self.send_ping(payload=payload)) | [
"def",
"pong_received",
"(",
"self",
",",
"payload",
"=",
"None",
")",
":",
"if",
"self",
".",
"_timer",
"is",
"not",
"None",
":",
"self",
".",
"_timer",
".",
"cancel",
"(",
")",
"self",
".",
"_failures",
"=",
"0",
"asyncio",
".",
"async",
"(",
"self",
".",
"send_ping",
"(",
"payload",
"=",
"payload",
")",
")"
] | Called when a pong is received. So the timer is cancelled | [
"Called",
"when",
"a",
"pong",
"is",
"received",
".",
"So",
"the",
"timer",
"is",
"cancelled"
] | e8b050077562acf32805fcbb9c0c162248a23c62 | https://github.com/quikmile/trellio/blob/e8b050077562acf32805fcbb9c0c162248a23c62/trellio/pinger.py#L46-L53 | train |
Erotemic/utool | utool/util_type.py | is_comparable_type | def is_comparable_type(var, type_):
"""
Check to see if `var` is an instance of known compatible types for `type_`
Args:
var (?):
type_ (?):
Returns:
bool:
CommandLine:
python -m utool.util_type is_comparable_type --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> import utool as ut
>>> flags = []
>>> flags += [is_comparable_type(0, float)]
>>> flags += [is_comparable_type(0, np.float32)]
>>> flags += [is_comparable_type(0, np.int32)]
>>> flags += [is_comparable_type(0, int)]
>>> flags += [is_comparable_type(0.0, int)]
>>> result = ut.repr2(flags)
>>> print(result)
[True, True, True, True, False]
"""
other_types = COMPARABLE_TYPES.get(type_, type_)
return isinstance(var, other_types) | python | def is_comparable_type(var, type_):
"""
Check to see if `var` is an instance of known compatible types for `type_`
Args:
var (?):
type_ (?):
Returns:
bool:
CommandLine:
python -m utool.util_type is_comparable_type --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> import utool as ut
>>> flags = []
>>> flags += [is_comparable_type(0, float)]
>>> flags += [is_comparable_type(0, np.float32)]
>>> flags += [is_comparable_type(0, np.int32)]
>>> flags += [is_comparable_type(0, int)]
>>> flags += [is_comparable_type(0.0, int)]
>>> result = ut.repr2(flags)
>>> print(result)
[True, True, True, True, False]
"""
other_types = COMPARABLE_TYPES.get(type_, type_)
return isinstance(var, other_types) | [
"def",
"is_comparable_type",
"(",
"var",
",",
"type_",
")",
":",
"other_types",
"=",
"COMPARABLE_TYPES",
".",
"get",
"(",
"type_",
",",
"type_",
")",
"return",
"isinstance",
"(",
"var",
",",
"other_types",
")"
] | Check to see if `var` is an instance of known compatible types for `type_`
Args:
var (?):
type_ (?):
Returns:
bool:
CommandLine:
python -m utool.util_type is_comparable_type --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> import utool as ut
>>> flags = []
>>> flags += [is_comparable_type(0, float)]
>>> flags += [is_comparable_type(0, np.float32)]
>>> flags += [is_comparable_type(0, np.int32)]
>>> flags += [is_comparable_type(0, int)]
>>> flags += [is_comparable_type(0.0, int)]
>>> result = ut.repr2(flags)
>>> print(result)
[True, True, True, True, False] | [
"Check",
"to",
"see",
"if",
"var",
"is",
"an",
"instance",
"of",
"known",
"compatible",
"types",
"for",
"type_"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_type.py#L104-L133 | train |
Erotemic/utool | utool/util_type.py | smart_cast | def smart_cast(var, type_):
"""
casts var to type, and tries to be clever when var is a string
Args:
var (object): variable to cast
type_ (type or str): type to attempt to cast to
Returns:
object:
CommandLine:
python -m utool.util_type --exec-smart_cast
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> var = '1'
>>> type_ = 'fuzzy_subset'
>>> cast_var = smart_cast(var, type_)
>>> result = repr(cast_var)
>>> print(result)
[1]
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> import utool as ut
>>> cast_var = smart_cast('1', None)
>>> result = ut.repr2(cast_var)
>>> print(result)
'1'
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> cast_var = smart_cast('(1,3)', 'eval')
>>> result = repr(cast_var)
>>> print(result)
(1, 3)
Example3:
>>> # ENABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> cast_var = smart_cast('(1,3)', eval)
>>> result = repr(cast_var)
>>> print(result)
(1, 3)
Example4:
>>> # ENABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> cast_var = smart_cast('1::3', slice)
>>> result = repr(cast_var)
>>> print(result)
slice(1, None, 3)
"""
#if isinstance(type_, tuple):
# for trytype in type_:
# try:
# return trytype(var)
# except Exception:
# pass
# raise TypeError('Cant figure out type=%r' % (type_,))
if type_ is None or var is None:
return var
#if not isinstance(type_, six.string_types):
try:
if issubclass(type_, type(None)):
return var
except TypeError:
pass
if is_str(var):
if type_ in VALID_BOOL_TYPES:
return bool_from_str(var)
elif type_ is slice:
args = [None if len(arg) == 0 else int(arg) for arg in var.split(':')]
return slice(*args)
elif type_ is list:
# need more intelligent parsing here
subvar_list = var.split(',')
return [smart_cast2(subvar) for subvar in subvar_list]
elif isinstance(type_, six.string_types):
if type_ == 'fuzzy_subset':
return fuzzy_subset(var)
if type_ == 'eval':
return eval(var, {}, {})
#elif type_ == 'fuzzy_int':
# return fuzzy_subset(var)
else:
raise NotImplementedError('Uknown smart type_=%r' % (type_,))
return type_(var) | python | def smart_cast(var, type_):
"""
casts var to type, and tries to be clever when var is a string
Args:
var (object): variable to cast
type_ (type or str): type to attempt to cast to
Returns:
object:
CommandLine:
python -m utool.util_type --exec-smart_cast
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> var = '1'
>>> type_ = 'fuzzy_subset'
>>> cast_var = smart_cast(var, type_)
>>> result = repr(cast_var)
>>> print(result)
[1]
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> import utool as ut
>>> cast_var = smart_cast('1', None)
>>> result = ut.repr2(cast_var)
>>> print(result)
'1'
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> cast_var = smart_cast('(1,3)', 'eval')
>>> result = repr(cast_var)
>>> print(result)
(1, 3)
Example3:
>>> # ENABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> cast_var = smart_cast('(1,3)', eval)
>>> result = repr(cast_var)
>>> print(result)
(1, 3)
Example4:
>>> # ENABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> cast_var = smart_cast('1::3', slice)
>>> result = repr(cast_var)
>>> print(result)
slice(1, None, 3)
"""
#if isinstance(type_, tuple):
# for trytype in type_:
# try:
# return trytype(var)
# except Exception:
# pass
# raise TypeError('Cant figure out type=%r' % (type_,))
if type_ is None or var is None:
return var
#if not isinstance(type_, six.string_types):
try:
if issubclass(type_, type(None)):
return var
except TypeError:
pass
if is_str(var):
if type_ in VALID_BOOL_TYPES:
return bool_from_str(var)
elif type_ is slice:
args = [None if len(arg) == 0 else int(arg) for arg in var.split(':')]
return slice(*args)
elif type_ is list:
# need more intelligent parsing here
subvar_list = var.split(',')
return [smart_cast2(subvar) for subvar in subvar_list]
elif isinstance(type_, six.string_types):
if type_ == 'fuzzy_subset':
return fuzzy_subset(var)
if type_ == 'eval':
return eval(var, {}, {})
#elif type_ == 'fuzzy_int':
# return fuzzy_subset(var)
else:
raise NotImplementedError('Uknown smart type_=%r' % (type_,))
return type_(var) | [
"def",
"smart_cast",
"(",
"var",
",",
"type_",
")",
":",
"#if isinstance(type_, tuple):",
"# for trytype in type_:",
"# try:",
"# return trytype(var)",
"# except Exception:",
"# pass",
"# raise TypeError('Cant figure out type=%r' % (type_,))",
"if",
"type_",
"is",
"None",
"or",
"var",
"is",
"None",
":",
"return",
"var",
"#if not isinstance(type_, six.string_types):",
"try",
":",
"if",
"issubclass",
"(",
"type_",
",",
"type",
"(",
"None",
")",
")",
":",
"return",
"var",
"except",
"TypeError",
":",
"pass",
"if",
"is_str",
"(",
"var",
")",
":",
"if",
"type_",
"in",
"VALID_BOOL_TYPES",
":",
"return",
"bool_from_str",
"(",
"var",
")",
"elif",
"type_",
"is",
"slice",
":",
"args",
"=",
"[",
"None",
"if",
"len",
"(",
"arg",
")",
"==",
"0",
"else",
"int",
"(",
"arg",
")",
"for",
"arg",
"in",
"var",
".",
"split",
"(",
"':'",
")",
"]",
"return",
"slice",
"(",
"*",
"args",
")",
"elif",
"type_",
"is",
"list",
":",
"# need more intelligent parsing here",
"subvar_list",
"=",
"var",
".",
"split",
"(",
"','",
")",
"return",
"[",
"smart_cast2",
"(",
"subvar",
")",
"for",
"subvar",
"in",
"subvar_list",
"]",
"elif",
"isinstance",
"(",
"type_",
",",
"six",
".",
"string_types",
")",
":",
"if",
"type_",
"==",
"'fuzzy_subset'",
":",
"return",
"fuzzy_subset",
"(",
"var",
")",
"if",
"type_",
"==",
"'eval'",
":",
"return",
"eval",
"(",
"var",
",",
"{",
"}",
",",
"{",
"}",
")",
"#elif type_ == 'fuzzy_int':",
"# return fuzzy_subset(var)",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Uknown smart type_=%r'",
"%",
"(",
"type_",
",",
")",
")",
"return",
"type_",
"(",
"var",
")"
] | casts var to type, and tries to be clever when var is a string
Args:
var (object): variable to cast
type_ (type or str): type to attempt to cast to
Returns:
object:
CommandLine:
python -m utool.util_type --exec-smart_cast
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> var = '1'
>>> type_ = 'fuzzy_subset'
>>> cast_var = smart_cast(var, type_)
>>> result = repr(cast_var)
>>> print(result)
[1]
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> import utool as ut
>>> cast_var = smart_cast('1', None)
>>> result = ut.repr2(cast_var)
>>> print(result)
'1'
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> cast_var = smart_cast('(1,3)', 'eval')
>>> result = repr(cast_var)
>>> print(result)
(1, 3)
Example3:
>>> # ENABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> cast_var = smart_cast('(1,3)', eval)
>>> result = repr(cast_var)
>>> print(result)
(1, 3)
Example4:
>>> # ENABLE_DOCTEST
>>> from utool.util_type import * # NOQA
>>> cast_var = smart_cast('1::3', slice)
>>> result = repr(cast_var)
>>> print(result)
slice(1, None, 3) | [
"casts",
"var",
"to",
"type",
"and",
"tries",
"to",
"be",
"clever",
"when",
"var",
"is",
"a",
"string"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_type.py#L169-L260 | train |
Erotemic/utool | utool/util_type.py | fuzzy_subset | def fuzzy_subset(str_):
"""
converts a string into an argument to list_take
"""
if str_ is None:
return str_
if ':' in str_:
return smart_cast(str_, slice)
if str_.startswith('['):
return smart_cast(str_[1:-1], list)
else:
return smart_cast(str_, list) | python | def fuzzy_subset(str_):
"""
converts a string into an argument to list_take
"""
if str_ is None:
return str_
if ':' in str_:
return smart_cast(str_, slice)
if str_.startswith('['):
return smart_cast(str_[1:-1], list)
else:
return smart_cast(str_, list) | [
"def",
"fuzzy_subset",
"(",
"str_",
")",
":",
"if",
"str_",
"is",
"None",
":",
"return",
"str_",
"if",
"':'",
"in",
"str_",
":",
"return",
"smart_cast",
"(",
"str_",
",",
"slice",
")",
"if",
"str_",
".",
"startswith",
"(",
"'['",
")",
":",
"return",
"smart_cast",
"(",
"str_",
"[",
"1",
":",
"-",
"1",
"]",
",",
"list",
")",
"else",
":",
"return",
"smart_cast",
"(",
"str_",
",",
"list",
")"
] | converts a string into an argument to list_take | [
"converts",
"a",
"string",
"into",
"an",
"argument",
"to",
"list_take"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_type.py#L331-L342 | train |
Erotemic/utool | utool/util_type.py | fuzzy_int | def fuzzy_int(str_):
"""
lets some special strings be interpreted as ints
"""
try:
ret = int(str_)
return ret
except Exception:
# Parse comma separated values as ints
if re.match(r'\d*,\d*,?\d*', str_):
return tuple(map(int, str_.split(',')))
# Parse range values as ints
if re.match(r'\d*:\d*:?\d*', str_):
return tuple(range(*map(int, str_.split(':'))))
raise | python | def fuzzy_int(str_):
"""
lets some special strings be interpreted as ints
"""
try:
ret = int(str_)
return ret
except Exception:
# Parse comma separated values as ints
if re.match(r'\d*,\d*,?\d*', str_):
return tuple(map(int, str_.split(',')))
# Parse range values as ints
if re.match(r'\d*:\d*:?\d*', str_):
return tuple(range(*map(int, str_.split(':'))))
raise | [
"def",
"fuzzy_int",
"(",
"str_",
")",
":",
"try",
":",
"ret",
"=",
"int",
"(",
"str_",
")",
"return",
"ret",
"except",
"Exception",
":",
"# Parse comma separated values as ints",
"if",
"re",
".",
"match",
"(",
"r'\\d*,\\d*,?\\d*'",
",",
"str_",
")",
":",
"return",
"tuple",
"(",
"map",
"(",
"int",
",",
"str_",
".",
"split",
"(",
"','",
")",
")",
")",
"# Parse range values as ints",
"if",
"re",
".",
"match",
"(",
"r'\\d*:\\d*:?\\d*'",
",",
"str_",
")",
":",
"return",
"tuple",
"(",
"range",
"(",
"*",
"map",
"(",
"int",
",",
"str_",
".",
"split",
"(",
"':'",
")",
")",
")",
")",
"raise"
] | lets some special strings be interpreted as ints | [
"lets",
"some",
"special",
"strings",
"be",
"interpreted",
"as",
"ints"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_type.py#L345-L359 | train |
Erotemic/utool | utool/util_type.py | get_type | def get_type(var):
"""
Gets types accounting for numpy
Ignore:
import utool as ut
import pandas as pd
var = np.array(['a', 'b', 'c'])
ut.get_type(var)
var = pd.Index(['a', 'b', 'c'])
ut.get_type(var)
"""
if HAVE_NUMPY and isinstance(var, np.ndarray):
if _WIN32:
# This is a weird system specific error
# https://github.com/numpy/numpy/issues/3667
type_ = var.dtype
else:
type_ = var.dtype.type
elif HAVE_PANDAS and isinstance(var, pd.Index):
if _WIN32:
type_ = var.dtype
else:
type_ = var.dtype.type
else:
type_ = type(var)
return type_ | python | def get_type(var):
"""
Gets types accounting for numpy
Ignore:
import utool as ut
import pandas as pd
var = np.array(['a', 'b', 'c'])
ut.get_type(var)
var = pd.Index(['a', 'b', 'c'])
ut.get_type(var)
"""
if HAVE_NUMPY and isinstance(var, np.ndarray):
if _WIN32:
# This is a weird system specific error
# https://github.com/numpy/numpy/issues/3667
type_ = var.dtype
else:
type_ = var.dtype.type
elif HAVE_PANDAS and isinstance(var, pd.Index):
if _WIN32:
type_ = var.dtype
else:
type_ = var.dtype.type
else:
type_ = type(var)
return type_ | [
"def",
"get_type",
"(",
"var",
")",
":",
"if",
"HAVE_NUMPY",
"and",
"isinstance",
"(",
"var",
",",
"np",
".",
"ndarray",
")",
":",
"if",
"_WIN32",
":",
"# This is a weird system specific error",
"# https://github.com/numpy/numpy/issues/3667",
"type_",
"=",
"var",
".",
"dtype",
"else",
":",
"type_",
"=",
"var",
".",
"dtype",
".",
"type",
"elif",
"HAVE_PANDAS",
"and",
"isinstance",
"(",
"var",
",",
"pd",
".",
"Index",
")",
":",
"if",
"_WIN32",
":",
"type_",
"=",
"var",
".",
"dtype",
"else",
":",
"type_",
"=",
"var",
".",
"dtype",
".",
"type",
"else",
":",
"type_",
"=",
"type",
"(",
"var",
")",
"return",
"type_"
] | Gets types accounting for numpy
Ignore:
import utool as ut
import pandas as pd
var = np.array(['a', 'b', 'c'])
ut.get_type(var)
var = pd.Index(['a', 'b', 'c'])
ut.get_type(var) | [
"Gets",
"types",
"accounting",
"for",
"numpy"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_type.py#L377-L403 | train |
Erotemic/utool | utool/util_type.py | get_homogenous_list_type | def get_homogenous_list_type(list_):
"""
Returns the best matching python type even if it is an ndarray assumes all
items in the list are of the same type. does not check this
"""
# TODO Expand and make work correctly
if HAVE_NUMPY and isinstance(list_, np.ndarray):
item = list_
elif isinstance(list_, list) and len(list_) > 0:
item = list_[0]
else:
item = None
if item is not None:
if is_float(item):
type_ = float
elif is_int(item):
type_ = int
elif is_bool(item):
type_ = bool
elif is_str(item):
type_ = str
else:
type_ = get_type(item)
else:
type_ = None
return type_ | python | def get_homogenous_list_type(list_):
"""
Returns the best matching python type even if it is an ndarray assumes all
items in the list are of the same type. does not check this
"""
# TODO Expand and make work correctly
if HAVE_NUMPY and isinstance(list_, np.ndarray):
item = list_
elif isinstance(list_, list) and len(list_) > 0:
item = list_[0]
else:
item = None
if item is not None:
if is_float(item):
type_ = float
elif is_int(item):
type_ = int
elif is_bool(item):
type_ = bool
elif is_str(item):
type_ = str
else:
type_ = get_type(item)
else:
type_ = None
return type_ | [
"def",
"get_homogenous_list_type",
"(",
"list_",
")",
":",
"# TODO Expand and make work correctly",
"if",
"HAVE_NUMPY",
"and",
"isinstance",
"(",
"list_",
",",
"np",
".",
"ndarray",
")",
":",
"item",
"=",
"list_",
"elif",
"isinstance",
"(",
"list_",
",",
"list",
")",
"and",
"len",
"(",
"list_",
")",
">",
"0",
":",
"item",
"=",
"list_",
"[",
"0",
"]",
"else",
":",
"item",
"=",
"None",
"if",
"item",
"is",
"not",
"None",
":",
"if",
"is_float",
"(",
"item",
")",
":",
"type_",
"=",
"float",
"elif",
"is_int",
"(",
"item",
")",
":",
"type_",
"=",
"int",
"elif",
"is_bool",
"(",
"item",
")",
":",
"type_",
"=",
"bool",
"elif",
"is_str",
"(",
"item",
")",
":",
"type_",
"=",
"str",
"else",
":",
"type_",
"=",
"get_type",
"(",
"item",
")",
"else",
":",
"type_",
"=",
"None",
"return",
"type_"
] | Returns the best matching python type even if it is an ndarray assumes all
items in the list are of the same type. does not check this | [
"Returns",
"the",
"best",
"matching",
"python",
"type",
"even",
"if",
"it",
"is",
"an",
"ndarray",
"assumes",
"all",
"items",
"in",
"the",
"list",
"are",
"of",
"the",
"same",
"type",
".",
"does",
"not",
"check",
"this"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_type.py#L528-L553 | train |
LEMS/pylems | lems/base/stack.py | Stack.pop | def pop(self):
"""
Pops a value off the top of the stack.
@return: Value popped off the stack.
@rtype: *
@raise StackError: Raised when there is a stack underflow.
"""
if self.stack:
val = self.stack[0]
self.stack = self.stack[1:]
return val
else:
raise StackError('Stack empty') | python | def pop(self):
"""
Pops a value off the top of the stack.
@return: Value popped off the stack.
@rtype: *
@raise StackError: Raised when there is a stack underflow.
"""
if self.stack:
val = self.stack[0]
self.stack = self.stack[1:]
return val
else:
raise StackError('Stack empty') | [
"def",
"pop",
"(",
"self",
")",
":",
"if",
"self",
".",
"stack",
":",
"val",
"=",
"self",
".",
"stack",
"[",
"0",
"]",
"self",
".",
"stack",
"=",
"self",
".",
"stack",
"[",
"1",
":",
"]",
"return",
"val",
"else",
":",
"raise",
"StackError",
"(",
"'Stack empty'",
")"
] | Pops a value off the top of the stack.
@return: Value popped off the stack.
@rtype: *
@raise StackError: Raised when there is a stack underflow. | [
"Pops",
"a",
"value",
"off",
"the",
"top",
"of",
"the",
"stack",
"."
] | 4eeb719d2f23650fe16c38626663b69b5c83818b | https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/base/stack.py#L36-L51 | train |
glormph/msstitch | src/app/actions/mslookup/spectra.py | create_spectra_lookup | def create_spectra_lookup(lookup, fn_spectra):
"""Stores all spectra rt, injection time, and scan nr in db"""
to_store = []
mzmlmap = lookup.get_mzmlfile_map()
for fn, spectrum in fn_spectra:
spec_id = '{}_{}'.format(mzmlmap[fn], spectrum['scan'])
mzml_rt = round(float(spectrum['rt']), 12)
mzml_iit = round(float(spectrum['iit']), 12)
mz = float(spectrum['mz'])
to_store.append((spec_id, mzmlmap[fn], spectrum['scan'],
spectrum['charge'], mz, mzml_rt, mzml_iit))
if len(to_store) == DB_STORE_CHUNK:
lookup.store_mzmls(to_store)
to_store = []
lookup.store_mzmls(to_store)
lookup.index_mzml() | python | def create_spectra_lookup(lookup, fn_spectra):
"""Stores all spectra rt, injection time, and scan nr in db"""
to_store = []
mzmlmap = lookup.get_mzmlfile_map()
for fn, spectrum in fn_spectra:
spec_id = '{}_{}'.format(mzmlmap[fn], spectrum['scan'])
mzml_rt = round(float(spectrum['rt']), 12)
mzml_iit = round(float(spectrum['iit']), 12)
mz = float(spectrum['mz'])
to_store.append((spec_id, mzmlmap[fn], spectrum['scan'],
spectrum['charge'], mz, mzml_rt, mzml_iit))
if len(to_store) == DB_STORE_CHUNK:
lookup.store_mzmls(to_store)
to_store = []
lookup.store_mzmls(to_store)
lookup.index_mzml() | [
"def",
"create_spectra_lookup",
"(",
"lookup",
",",
"fn_spectra",
")",
":",
"to_store",
"=",
"[",
"]",
"mzmlmap",
"=",
"lookup",
".",
"get_mzmlfile_map",
"(",
")",
"for",
"fn",
",",
"spectrum",
"in",
"fn_spectra",
":",
"spec_id",
"=",
"'{}_{}'",
".",
"format",
"(",
"mzmlmap",
"[",
"fn",
"]",
",",
"spectrum",
"[",
"'scan'",
"]",
")",
"mzml_rt",
"=",
"round",
"(",
"float",
"(",
"spectrum",
"[",
"'rt'",
"]",
")",
",",
"12",
")",
"mzml_iit",
"=",
"round",
"(",
"float",
"(",
"spectrum",
"[",
"'iit'",
"]",
")",
",",
"12",
")",
"mz",
"=",
"float",
"(",
"spectrum",
"[",
"'mz'",
"]",
")",
"to_store",
".",
"append",
"(",
"(",
"spec_id",
",",
"mzmlmap",
"[",
"fn",
"]",
",",
"spectrum",
"[",
"'scan'",
"]",
",",
"spectrum",
"[",
"'charge'",
"]",
",",
"mz",
",",
"mzml_rt",
",",
"mzml_iit",
")",
")",
"if",
"len",
"(",
"to_store",
")",
"==",
"DB_STORE_CHUNK",
":",
"lookup",
".",
"store_mzmls",
"(",
"to_store",
")",
"to_store",
"=",
"[",
"]",
"lookup",
".",
"store_mzmls",
"(",
"to_store",
")",
"lookup",
".",
"index_mzml",
"(",
")"
] | Stores all spectra rt, injection time, and scan nr in db | [
"Stores",
"all",
"spectra",
"rt",
"injection",
"time",
"and",
"scan",
"nr",
"in",
"db"
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mslookup/spectra.py#L4-L19 | train |
Erotemic/utool | utool/util_assert.py | assert_raises | def assert_raises(ex_type, func, *args, **kwargs):
r"""
Checks that a function raises an error when given specific arguments.
Args:
ex_type (Exception): exception type
func (callable): live python function
CommandLine:
python -m utool.util_assert assert_raises --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_assert import * # NOQA
>>> import utool as ut
>>> ex_type = AssertionError
>>> func = len
>>> # Check that this raises an error when something else does not
>>> assert_raises(ex_type, assert_raises, ex_type, func, [])
>>> # Check this does not raise an error when something else does
>>> assert_raises(ValueError, [].index, 0)
"""
try:
func(*args, **kwargs)
except Exception as ex:
assert isinstance(ex, ex_type), (
'Raised %r but type should have been %r' % (ex, ex_type))
return True
else:
raise AssertionError('No error was raised') | python | def assert_raises(ex_type, func, *args, **kwargs):
r"""
Checks that a function raises an error when given specific arguments.
Args:
ex_type (Exception): exception type
func (callable): live python function
CommandLine:
python -m utool.util_assert assert_raises --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_assert import * # NOQA
>>> import utool as ut
>>> ex_type = AssertionError
>>> func = len
>>> # Check that this raises an error when something else does not
>>> assert_raises(ex_type, assert_raises, ex_type, func, [])
>>> # Check this does not raise an error when something else does
>>> assert_raises(ValueError, [].index, 0)
"""
try:
func(*args, **kwargs)
except Exception as ex:
assert isinstance(ex, ex_type), (
'Raised %r but type should have been %r' % (ex, ex_type))
return True
else:
raise AssertionError('No error was raised') | [
"def",
"assert_raises",
"(",
"ex_type",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"ex",
":",
"assert",
"isinstance",
"(",
"ex",
",",
"ex_type",
")",
",",
"(",
"'Raised %r but type should have been %r'",
"%",
"(",
"ex",
",",
"ex_type",
")",
")",
"return",
"True",
"else",
":",
"raise",
"AssertionError",
"(",
"'No error was raised'",
")"
] | r"""
Checks that a function raises an error when given specific arguments.
Args:
ex_type (Exception): exception type
func (callable): live python function
CommandLine:
python -m utool.util_assert assert_raises --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_assert import * # NOQA
>>> import utool as ut
>>> ex_type = AssertionError
>>> func = len
>>> # Check that this raises an error when something else does not
>>> assert_raises(ex_type, assert_raises, ex_type, func, [])
>>> # Check this does not raise an error when something else does
>>> assert_raises(ValueError, [].index, 0) | [
"r",
"Checks",
"that",
"a",
"function",
"raises",
"an",
"error",
"when",
"given",
"specific",
"arguments",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_assert.py#L26-L55 | train |
dsoprea/NsqSpinner | nsq/connection_election.py | ConnectionElection.command_for_all_connections | def command_for_all_connections(self, cb):
"""Invoke the callback with a command-object for each connection."""
for connection in self.__master.connections:
cb(connection.command) | python | def command_for_all_connections(self, cb):
"""Invoke the callback with a command-object for each connection."""
for connection in self.__master.connections:
cb(connection.command) | [
"def",
"command_for_all_connections",
"(",
"self",
",",
"cb",
")",
":",
"for",
"connection",
"in",
"self",
".",
"__master",
".",
"connections",
":",
"cb",
"(",
"connection",
".",
"command",
")"
] | Invoke the callback with a command-object for each connection. | [
"Invoke",
"the",
"callback",
"with",
"a",
"command",
"-",
"object",
"for",
"each",
"connection",
"."
] | 972237b8ddce737983bfed001fde52e5236be695 | https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/connection_election.py#L40-L44 | train |
Erotemic/utool | utool/util_autogen.py | dump_autogen_code | def dump_autogen_code(fpath, autogen_text, codetype='python', fullprint=None,
show_diff=None, dowrite=None):
"""
Helper that write a file if -w is given on command line, otherwise
it just prints it out. It has the opption of comparing a diff to the file.
"""
import utool as ut
if dowrite is None:
dowrite = ut.get_argflag(('-w', '--write'))
if show_diff is None:
show_diff = ut.get_argflag('--diff')
num_context_lines = ut.get_argval('--diff', type_=int, default=None)
show_diff = show_diff or num_context_lines is not None
num_context_lines = ut.get_argval('--diff', type_=int, default=None)
if fullprint is None:
fullprint = True
if fullprint is False:
fullprint = ut.get_argflag('--print')
print('[autogen] Autogenerated %s...\n+---\n' % (fpath,))
if not dowrite:
if fullprint:
ut.print_code(autogen_text, lexer_name=codetype)
print('\nL___')
else:
print('specify --print to write to stdout')
pass
print('specify -w to write, or --diff to compare')
print('...would write to: %s' % fpath)
if show_diff:
if ut.checkpath(fpath, verbose=True):
prev_text = ut.read_from(fpath)
textdiff = ut.get_textdiff(prev_text, autogen_text,
num_context_lines=num_context_lines)
try:
ut.print_difftext(textdiff)
except UnicodeDecodeError:
import unicodedata
textdiff = unicodedata.normalize('NFKD', textdiff).encode('ascii', 'ignore')
ut.print_difftext(textdiff)
if dowrite:
print('WARNING: Not writing. Remove --diff from command line')
elif dowrite:
ut.write_to(fpath, autogen_text) | python | def dump_autogen_code(fpath, autogen_text, codetype='python', fullprint=None,
show_diff=None, dowrite=None):
"""
Helper that write a file if -w is given on command line, otherwise
it just prints it out. It has the opption of comparing a diff to the file.
"""
import utool as ut
if dowrite is None:
dowrite = ut.get_argflag(('-w', '--write'))
if show_diff is None:
show_diff = ut.get_argflag('--diff')
num_context_lines = ut.get_argval('--diff', type_=int, default=None)
show_diff = show_diff or num_context_lines is not None
num_context_lines = ut.get_argval('--diff', type_=int, default=None)
if fullprint is None:
fullprint = True
if fullprint is False:
fullprint = ut.get_argflag('--print')
print('[autogen] Autogenerated %s...\n+---\n' % (fpath,))
if not dowrite:
if fullprint:
ut.print_code(autogen_text, lexer_name=codetype)
print('\nL___')
else:
print('specify --print to write to stdout')
pass
print('specify -w to write, or --diff to compare')
print('...would write to: %s' % fpath)
if show_diff:
if ut.checkpath(fpath, verbose=True):
prev_text = ut.read_from(fpath)
textdiff = ut.get_textdiff(prev_text, autogen_text,
num_context_lines=num_context_lines)
try:
ut.print_difftext(textdiff)
except UnicodeDecodeError:
import unicodedata
textdiff = unicodedata.normalize('NFKD', textdiff).encode('ascii', 'ignore')
ut.print_difftext(textdiff)
if dowrite:
print('WARNING: Not writing. Remove --diff from command line')
elif dowrite:
ut.write_to(fpath, autogen_text) | [
"def",
"dump_autogen_code",
"(",
"fpath",
",",
"autogen_text",
",",
"codetype",
"=",
"'python'",
",",
"fullprint",
"=",
"None",
",",
"show_diff",
"=",
"None",
",",
"dowrite",
"=",
"None",
")",
":",
"import",
"utool",
"as",
"ut",
"if",
"dowrite",
"is",
"None",
":",
"dowrite",
"=",
"ut",
".",
"get_argflag",
"(",
"(",
"'-w'",
",",
"'--write'",
")",
")",
"if",
"show_diff",
"is",
"None",
":",
"show_diff",
"=",
"ut",
".",
"get_argflag",
"(",
"'--diff'",
")",
"num_context_lines",
"=",
"ut",
".",
"get_argval",
"(",
"'--diff'",
",",
"type_",
"=",
"int",
",",
"default",
"=",
"None",
")",
"show_diff",
"=",
"show_diff",
"or",
"num_context_lines",
"is",
"not",
"None",
"num_context_lines",
"=",
"ut",
".",
"get_argval",
"(",
"'--diff'",
",",
"type_",
"=",
"int",
",",
"default",
"=",
"None",
")",
"if",
"fullprint",
"is",
"None",
":",
"fullprint",
"=",
"True",
"if",
"fullprint",
"is",
"False",
":",
"fullprint",
"=",
"ut",
".",
"get_argflag",
"(",
"'--print'",
")",
"print",
"(",
"'[autogen] Autogenerated %s...\\n+---\\n'",
"%",
"(",
"fpath",
",",
")",
")",
"if",
"not",
"dowrite",
":",
"if",
"fullprint",
":",
"ut",
".",
"print_code",
"(",
"autogen_text",
",",
"lexer_name",
"=",
"codetype",
")",
"print",
"(",
"'\\nL___'",
")",
"else",
":",
"print",
"(",
"'specify --print to write to stdout'",
")",
"pass",
"print",
"(",
"'specify -w to write, or --diff to compare'",
")",
"print",
"(",
"'...would write to: %s'",
"%",
"fpath",
")",
"if",
"show_diff",
":",
"if",
"ut",
".",
"checkpath",
"(",
"fpath",
",",
"verbose",
"=",
"True",
")",
":",
"prev_text",
"=",
"ut",
".",
"read_from",
"(",
"fpath",
")",
"textdiff",
"=",
"ut",
".",
"get_textdiff",
"(",
"prev_text",
",",
"autogen_text",
",",
"num_context_lines",
"=",
"num_context_lines",
")",
"try",
":",
"ut",
".",
"print_difftext",
"(",
"textdiff",
")",
"except",
"UnicodeDecodeError",
":",
"import",
"unicodedata",
"textdiff",
"=",
"unicodedata",
".",
"normalize",
"(",
"'NFKD'",
",",
"textdiff",
")",
".",
"encode",
"(",
"'ascii'",
",",
"'ignore'",
")",
"ut",
".",
"print_difftext",
"(",
"textdiff",
")",
"if",
"dowrite",
":",
"print",
"(",
"'WARNING: Not writing. Remove --diff from command line'",
")",
"elif",
"dowrite",
":",
"ut",
".",
"write_to",
"(",
"fpath",
",",
"autogen_text",
")"
] | Helper that write a file if -w is given on command line, otherwise
it just prints it out. It has the opption of comparing a diff to the file. | [
"Helper",
"that",
"write",
"a",
"file",
"if",
"-",
"w",
"is",
"given",
"on",
"command",
"line",
"otherwise",
"it",
"just",
"prints",
"it",
"out",
".",
"It",
"has",
"the",
"opption",
"of",
"comparing",
"a",
"diff",
"to",
"the",
"file",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_autogen.py#L22-L69 | train |
Erotemic/utool | utool/util_autogen.py | autofix_codeblock | def autofix_codeblock(codeblock, max_line_len=80,
aggressive=False,
very_aggressive=False,
experimental=False):
r"""
Uses autopep8 to format a block of code
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> codeblock = ut.codeblock(
'''
def func( with , some = 'Problems' ):
syntax ='Ok'
but = 'Its very messy'
if None:
# syntax might not be perfect due to being cut off
ommiting_this_line_still_works= True
''')
>>> fixed_codeblock = ut.autofix_codeblock(codeblock)
>>> print(fixed_codeblock)
"""
# FIXME idk how to remove the blank line following the function with
# autopep8. It seems to not be supported by them, but it looks bad.
import autopep8
arglist = ['--max-line-length', '80']
if aggressive:
arglist.extend(['-a'])
if very_aggressive:
arglist.extend(['-a', '-a'])
if experimental:
arglist.extend(['--experimental'])
arglist.extend([''])
autopep8_options = autopep8.parse_args(arglist)
fixed_codeblock = autopep8.fix_code(codeblock, options=autopep8_options)
return fixed_codeblock | python | def autofix_codeblock(codeblock, max_line_len=80,
aggressive=False,
very_aggressive=False,
experimental=False):
r"""
Uses autopep8 to format a block of code
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> codeblock = ut.codeblock(
'''
def func( with , some = 'Problems' ):
syntax ='Ok'
but = 'Its very messy'
if None:
# syntax might not be perfect due to being cut off
ommiting_this_line_still_works= True
''')
>>> fixed_codeblock = ut.autofix_codeblock(codeblock)
>>> print(fixed_codeblock)
"""
# FIXME idk how to remove the blank line following the function with
# autopep8. It seems to not be supported by them, but it looks bad.
import autopep8
arglist = ['--max-line-length', '80']
if aggressive:
arglist.extend(['-a'])
if very_aggressive:
arglist.extend(['-a', '-a'])
if experimental:
arglist.extend(['--experimental'])
arglist.extend([''])
autopep8_options = autopep8.parse_args(arglist)
fixed_codeblock = autopep8.fix_code(codeblock, options=autopep8_options)
return fixed_codeblock | [
"def",
"autofix_codeblock",
"(",
"codeblock",
",",
"max_line_len",
"=",
"80",
",",
"aggressive",
"=",
"False",
",",
"very_aggressive",
"=",
"False",
",",
"experimental",
"=",
"False",
")",
":",
"# FIXME idk how to remove the blank line following the function with",
"# autopep8. It seems to not be supported by them, but it looks bad.",
"import",
"autopep8",
"arglist",
"=",
"[",
"'--max-line-length'",
",",
"'80'",
"]",
"if",
"aggressive",
":",
"arglist",
".",
"extend",
"(",
"[",
"'-a'",
"]",
")",
"if",
"very_aggressive",
":",
"arglist",
".",
"extend",
"(",
"[",
"'-a'",
",",
"'-a'",
"]",
")",
"if",
"experimental",
":",
"arglist",
".",
"extend",
"(",
"[",
"'--experimental'",
"]",
")",
"arglist",
".",
"extend",
"(",
"[",
"''",
"]",
")",
"autopep8_options",
"=",
"autopep8",
".",
"parse_args",
"(",
"arglist",
")",
"fixed_codeblock",
"=",
"autopep8",
".",
"fix_code",
"(",
"codeblock",
",",
"options",
"=",
"autopep8_options",
")",
"return",
"fixed_codeblock"
] | r"""
Uses autopep8 to format a block of code
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> codeblock = ut.codeblock(
'''
def func( with , some = 'Problems' ):
syntax ='Ok'
but = 'Its very messy'
if None:
# syntax might not be perfect due to being cut off
ommiting_this_line_still_works= True
''')
>>> fixed_codeblock = ut.autofix_codeblock(codeblock)
>>> print(fixed_codeblock) | [
"r",
"Uses",
"autopep8",
"to",
"format",
"a",
"block",
"of",
"code"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_autogen.py#L186-L223 | train |
Erotemic/utool | utool/util_autogen.py | auto_docstr | def auto_docstr(modname, funcname, verbose=True, moddir=None, modpath=None, **kwargs):
r"""
called from vim. Uses strings of filename and modnames to build docstr
Args:
modname (str): name of a python module
funcname (str): name of a function in the module
Returns:
str: docstr
CommandLine:
python -m utool.util_autogen auto_docstr
python -m utool --tf auto_docstr
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> from utool.util_autogen import * # NOQA
>>> ut.util_autogen.rrr(verbose=False)
>>> #docstr = ut.auto_docstr('ibeis.algo.hots.smk.smk_index', 'compute_negentropy_names')
>>> modname = ut.get_argval('--modname', default='utool.util_autogen')
>>> funcname = ut.get_argval('--funcname', default='auto_docstr')
>>> moddir = ut.get_argval('--moddir', type_=str, default=None)
>>> docstr = ut.util_autogen.auto_docstr(modname, funcname)
>>> print(docstr)
"""
#import utool as ut
func, module, error_str = load_func_from_module(
modname, funcname, verbose=verbose, moddir=moddir, modpath=modpath)
if error_str is None:
try:
docstr = make_default_docstr(func, **kwargs)
except Exception as ex:
import utool as ut
error_str = ut.formatex(ex, 'Caught Error in parsing docstr', tb=True)
#ut.printex(ex)
error_str += (
'\n\nReplicateCommand:\n '
'python -m utool --tf auto_docstr '
'--modname={modname} --funcname={funcname} --moddir={moddir}').format(
modname=modname, funcname=funcname, moddir=moddir)
error_str += '\n kwargs=' + ut.repr4(kwargs)
return error_str
else:
docstr = error_str
return docstr | python | def auto_docstr(modname, funcname, verbose=True, moddir=None, modpath=None, **kwargs):
r"""
called from vim. Uses strings of filename and modnames to build docstr
Args:
modname (str): name of a python module
funcname (str): name of a function in the module
Returns:
str: docstr
CommandLine:
python -m utool.util_autogen auto_docstr
python -m utool --tf auto_docstr
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> from utool.util_autogen import * # NOQA
>>> ut.util_autogen.rrr(verbose=False)
>>> #docstr = ut.auto_docstr('ibeis.algo.hots.smk.smk_index', 'compute_negentropy_names')
>>> modname = ut.get_argval('--modname', default='utool.util_autogen')
>>> funcname = ut.get_argval('--funcname', default='auto_docstr')
>>> moddir = ut.get_argval('--moddir', type_=str, default=None)
>>> docstr = ut.util_autogen.auto_docstr(modname, funcname)
>>> print(docstr)
"""
#import utool as ut
func, module, error_str = load_func_from_module(
modname, funcname, verbose=verbose, moddir=moddir, modpath=modpath)
if error_str is None:
try:
docstr = make_default_docstr(func, **kwargs)
except Exception as ex:
import utool as ut
error_str = ut.formatex(ex, 'Caught Error in parsing docstr', tb=True)
#ut.printex(ex)
error_str += (
'\n\nReplicateCommand:\n '
'python -m utool --tf auto_docstr '
'--modname={modname} --funcname={funcname} --moddir={moddir}').format(
modname=modname, funcname=funcname, moddir=moddir)
error_str += '\n kwargs=' + ut.repr4(kwargs)
return error_str
else:
docstr = error_str
return docstr | [
"def",
"auto_docstr",
"(",
"modname",
",",
"funcname",
",",
"verbose",
"=",
"True",
",",
"moddir",
"=",
"None",
",",
"modpath",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"#import utool as ut",
"func",
",",
"module",
",",
"error_str",
"=",
"load_func_from_module",
"(",
"modname",
",",
"funcname",
",",
"verbose",
"=",
"verbose",
",",
"moddir",
"=",
"moddir",
",",
"modpath",
"=",
"modpath",
")",
"if",
"error_str",
"is",
"None",
":",
"try",
":",
"docstr",
"=",
"make_default_docstr",
"(",
"func",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
"as",
"ex",
":",
"import",
"utool",
"as",
"ut",
"error_str",
"=",
"ut",
".",
"formatex",
"(",
"ex",
",",
"'Caught Error in parsing docstr'",
",",
"tb",
"=",
"True",
")",
"#ut.printex(ex)",
"error_str",
"+=",
"(",
"'\\n\\nReplicateCommand:\\n '",
"'python -m utool --tf auto_docstr '",
"'--modname={modname} --funcname={funcname} --moddir={moddir}'",
")",
".",
"format",
"(",
"modname",
"=",
"modname",
",",
"funcname",
"=",
"funcname",
",",
"moddir",
"=",
"moddir",
")",
"error_str",
"+=",
"'\\n kwargs='",
"+",
"ut",
".",
"repr4",
"(",
"kwargs",
")",
"return",
"error_str",
"else",
":",
"docstr",
"=",
"error_str",
"return",
"docstr"
] | r"""
called from vim. Uses strings of filename and modnames to build docstr
Args:
modname (str): name of a python module
funcname (str): name of a function in the module
Returns:
str: docstr
CommandLine:
python -m utool.util_autogen auto_docstr
python -m utool --tf auto_docstr
Example:
>>> # DISABLE_DOCTEST
>>> import utool as ut
>>> from utool.util_autogen import * # NOQA
>>> ut.util_autogen.rrr(verbose=False)
>>> #docstr = ut.auto_docstr('ibeis.algo.hots.smk.smk_index', 'compute_negentropy_names')
>>> modname = ut.get_argval('--modname', default='utool.util_autogen')
>>> funcname = ut.get_argval('--funcname', default='auto_docstr')
>>> moddir = ut.get_argval('--moddir', type_=str, default=None)
>>> docstr = ut.util_autogen.auto_docstr(modname, funcname)
>>> print(docstr) | [
"r",
"called",
"from",
"vim",
".",
"Uses",
"strings",
"of",
"filename",
"and",
"modnames",
"to",
"build",
"docstr"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_autogen.py#L396-L442 | train |
Erotemic/utool | utool/util_autogen.py | make_args_docstr | def make_args_docstr(argname_list, argtype_list, argdesc_list, ismethod,
va_name=None, kw_name=None, kw_keys=[]):
r"""
Builds the argument docstring
Args:
argname_list (list): names
argtype_list (list): types
argdesc_list (list): descriptions
ismethod (bool): if generating docs for a method
va_name (Optional[str]): varargs name
kw_name (Optional[str]): kwargs name
kw_keys (Optional[list]): accepted kwarg keys
Returns:
str: arg_docstr
CommandLine:
python -m utool.util_autogen make_args_docstr
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_autogen import * # NOQA
>>> argname_list = ['argname_list', 'argtype_list', 'argdesc_list']
>>> argtype_list = ['list', 'list', 'list']
>>> argdesc_list = ['names', 'types', 'descriptions']
>>> va_name = 'args'
>>> kw_name = 'kwargs'
>>> kw_keys = ['']
>>> ismethod = False
>>> arg_docstr = make_args_docstr(argname_list, argtype_list,
>>> argdesc_list, ismethod, va_name,
>>> kw_name, kw_keys)
>>> result = str(arg_docstr)
>>> print(result)
argname_list (list): names
argtype_list (list): types
argdesc_list (list): descriptions
*args:
**kwargs:
"""
import utool as ut
if ismethod:
# Remove self from the list
argname_list = argname_list[1:]
argtype_list = argtype_list[1:]
argdesc_list = argdesc_list[1:]
argdoc_list = [arg + ' (%s): %s' % (_type, desc)
for arg, _type, desc in zip(argname_list, argtype_list, argdesc_list)]
# Add in varargs and kwargs
# References:
# http://www.sphinx-doc.org/en/stable/ext/example_google.html#example-google
if va_name is not None:
argdoc_list.append('*' + va_name + ':')
if kw_name is not None:
import textwrap
prefix = '**' + kw_name + ': '
wrapped_lines = textwrap.wrap(', '.join(kw_keys), width=70 - len(prefix))
sep = '\n' + (' ' * len(prefix))
kw_keystr = sep.join(wrapped_lines)
argdoc_list.append((prefix + kw_keystr).strip())
# align?
align_args = False
if align_args:
argdoc_aligned_list = ut.align_lines(argdoc_list, character='(')
arg_docstr = '\n'.join(argdoc_aligned_list)
else:
arg_docstr = '\n'.join(argdoc_list)
return arg_docstr | python | def make_args_docstr(argname_list, argtype_list, argdesc_list, ismethod,
va_name=None, kw_name=None, kw_keys=[]):
r"""
Builds the argument docstring
Args:
argname_list (list): names
argtype_list (list): types
argdesc_list (list): descriptions
ismethod (bool): if generating docs for a method
va_name (Optional[str]): varargs name
kw_name (Optional[str]): kwargs name
kw_keys (Optional[list]): accepted kwarg keys
Returns:
str: arg_docstr
CommandLine:
python -m utool.util_autogen make_args_docstr
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_autogen import * # NOQA
>>> argname_list = ['argname_list', 'argtype_list', 'argdesc_list']
>>> argtype_list = ['list', 'list', 'list']
>>> argdesc_list = ['names', 'types', 'descriptions']
>>> va_name = 'args'
>>> kw_name = 'kwargs'
>>> kw_keys = ['']
>>> ismethod = False
>>> arg_docstr = make_args_docstr(argname_list, argtype_list,
>>> argdesc_list, ismethod, va_name,
>>> kw_name, kw_keys)
>>> result = str(arg_docstr)
>>> print(result)
argname_list (list): names
argtype_list (list): types
argdesc_list (list): descriptions
*args:
**kwargs:
"""
import utool as ut
if ismethod:
# Remove self from the list
argname_list = argname_list[1:]
argtype_list = argtype_list[1:]
argdesc_list = argdesc_list[1:]
argdoc_list = [arg + ' (%s): %s' % (_type, desc)
for arg, _type, desc in zip(argname_list, argtype_list, argdesc_list)]
# Add in varargs and kwargs
# References:
# http://www.sphinx-doc.org/en/stable/ext/example_google.html#example-google
if va_name is not None:
argdoc_list.append('*' + va_name + ':')
if kw_name is not None:
import textwrap
prefix = '**' + kw_name + ': '
wrapped_lines = textwrap.wrap(', '.join(kw_keys), width=70 - len(prefix))
sep = '\n' + (' ' * len(prefix))
kw_keystr = sep.join(wrapped_lines)
argdoc_list.append((prefix + kw_keystr).strip())
# align?
align_args = False
if align_args:
argdoc_aligned_list = ut.align_lines(argdoc_list, character='(')
arg_docstr = '\n'.join(argdoc_aligned_list)
else:
arg_docstr = '\n'.join(argdoc_list)
return arg_docstr | [
"def",
"make_args_docstr",
"(",
"argname_list",
",",
"argtype_list",
",",
"argdesc_list",
",",
"ismethod",
",",
"va_name",
"=",
"None",
",",
"kw_name",
"=",
"None",
",",
"kw_keys",
"=",
"[",
"]",
")",
":",
"import",
"utool",
"as",
"ut",
"if",
"ismethod",
":",
"# Remove self from the list",
"argname_list",
"=",
"argname_list",
"[",
"1",
":",
"]",
"argtype_list",
"=",
"argtype_list",
"[",
"1",
":",
"]",
"argdesc_list",
"=",
"argdesc_list",
"[",
"1",
":",
"]",
"argdoc_list",
"=",
"[",
"arg",
"+",
"' (%s): %s'",
"%",
"(",
"_type",
",",
"desc",
")",
"for",
"arg",
",",
"_type",
",",
"desc",
"in",
"zip",
"(",
"argname_list",
",",
"argtype_list",
",",
"argdesc_list",
")",
"]",
"# Add in varargs and kwargs",
"# References:",
"# http://www.sphinx-doc.org/en/stable/ext/example_google.html#example-google",
"if",
"va_name",
"is",
"not",
"None",
":",
"argdoc_list",
".",
"append",
"(",
"'*'",
"+",
"va_name",
"+",
"':'",
")",
"if",
"kw_name",
"is",
"not",
"None",
":",
"import",
"textwrap",
"prefix",
"=",
"'**'",
"+",
"kw_name",
"+",
"': '",
"wrapped_lines",
"=",
"textwrap",
".",
"wrap",
"(",
"', '",
".",
"join",
"(",
"kw_keys",
")",
",",
"width",
"=",
"70",
"-",
"len",
"(",
"prefix",
")",
")",
"sep",
"=",
"'\\n'",
"+",
"(",
"' '",
"*",
"len",
"(",
"prefix",
")",
")",
"kw_keystr",
"=",
"sep",
".",
"join",
"(",
"wrapped_lines",
")",
"argdoc_list",
".",
"append",
"(",
"(",
"prefix",
"+",
"kw_keystr",
")",
".",
"strip",
"(",
")",
")",
"# align?",
"align_args",
"=",
"False",
"if",
"align_args",
":",
"argdoc_aligned_list",
"=",
"ut",
".",
"align_lines",
"(",
"argdoc_list",
",",
"character",
"=",
"'('",
")",
"arg_docstr",
"=",
"'\\n'",
".",
"join",
"(",
"argdoc_aligned_list",
")",
"else",
":",
"arg_docstr",
"=",
"'\\n'",
".",
"join",
"(",
"argdoc_list",
")",
"return",
"arg_docstr"
] | r"""
Builds the argument docstring
Args:
argname_list (list): names
argtype_list (list): types
argdesc_list (list): descriptions
ismethod (bool): if generating docs for a method
va_name (Optional[str]): varargs name
kw_name (Optional[str]): kwargs name
kw_keys (Optional[list]): accepted kwarg keys
Returns:
str: arg_docstr
CommandLine:
python -m utool.util_autogen make_args_docstr
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_autogen import * # NOQA
>>> argname_list = ['argname_list', 'argtype_list', 'argdesc_list']
>>> argtype_list = ['list', 'list', 'list']
>>> argdesc_list = ['names', 'types', 'descriptions']
>>> va_name = 'args'
>>> kw_name = 'kwargs'
>>> kw_keys = ['']
>>> ismethod = False
>>> arg_docstr = make_args_docstr(argname_list, argtype_list,
>>> argdesc_list, ismethod, va_name,
>>> kw_name, kw_keys)
>>> result = str(arg_docstr)
>>> print(result)
argname_list (list): names
argtype_list (list): types
argdesc_list (list): descriptions
*args:
**kwargs: | [
"r",
"Builds",
"the",
"argument",
"docstring"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_autogen.py#L457-L529 | train |
Erotemic/utool | utool/util_autogen.py | make_default_docstr | def make_default_docstr(func, with_args=True, with_ret=True,
with_commandline=True, with_example=True,
with_header=False, with_debug=False):
r"""
Tries to make a sensible default docstr so the user
can fill things in without typing too much
# TODO: Interleave old documentation with new documentation
Args:
func (function): live python function
with_args (bool):
with_ret (bool): (Defaults to True)
with_commandline (bool): (Defaults to True)
with_example (bool): (Defaults to True)
with_header (bool): (Defaults to False)
with_debug (bool): (Defaults to False)
Returns:
tuple: (argname, val)
Ignore:
pass
CommandLine:
python -m utool.util_autogen --exec-make_default_docstr --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_autogen import * # NOQA
>>> import utool as ut
>>> func = ut.make_default_docstr
>>> #func = ut.make_args_docstr
>>> #func = PythonStatement
>>> func = auto_docstr
>>> default_docstr = make_default_docstr(func)
>>> result = str(default_docstr)
>>> print(result)
"""
import utool as ut
#from utool import util_inspect
funcinfo = ut.util_inspect.infer_function_info(func)
argname_list = funcinfo.argname_list
argtype_list = funcinfo.argtype_list
argdesc_list = funcinfo.argdesc_list
return_header = funcinfo.return_header
return_type = funcinfo.return_type
return_name = funcinfo.return_name
return_desc = funcinfo.return_desc
funcname = funcinfo.funcname
modname = funcinfo.modname
defaults = funcinfo.defaults
num_indent = funcinfo.num_indent
needs_surround = funcinfo.needs_surround
funcname = funcinfo.funcname
ismethod = funcinfo.ismethod
va_name = funcinfo.va_name
kw_name = funcinfo.kw_name
kw_keys = funcinfo.kw_keys
docstr_parts = []
# Header part
if with_header:
header_block = funcname
docstr_parts.append(header_block)
# Args part
if with_args and len(argname_list) > 0:
argheader = 'Args'
arg_docstr = make_args_docstr(argname_list, argtype_list, argdesc_list,
ismethod, va_name, kw_name, kw_keys)
argsblock = make_docstr_block(argheader, arg_docstr)
docstr_parts.append(argsblock)
# if False:
# with_kw = with_args
# if with_kw and len(kwarg_keys) > 0:
# #ut.embed()
# import textwrap
# kwargs_docstr = ', '.join(kwarg_keys)
# kwargs_docstr = '\n'.join(textwrap.wrap(kwargs_docstr))
# kwargsblock = make_docstr_block('Kwargs', kwargs_docstr)
# docstr_parts.append(kwargsblock)
# Return / Yeild part
if with_ret and return_header is not None:
if return_header is not None:
return_doctr = make_returns_or_yeilds_docstr(return_type, return_name, return_desc)
returnblock = make_docstr_block(return_header, return_doctr)
docstr_parts.append(returnblock)
# Example part
# try to generate a simple and unit testable example
if with_commandline:
cmdlineheader = 'CommandLine'
cmdlinecode = make_cmdline_docstr(funcname, modname)
cmdlineblock = make_docstr_block(cmdlineheader, cmdlinecode)
docstr_parts.append(cmdlineblock)
if with_example:
exampleheader = 'Example'
examplecode = make_example_docstr(funcname, modname, argname_list,
defaults, return_type, return_name,
ismethod)
examplecode_ = ut.indent(examplecode, '>>> ')
exampleblock = make_docstr_block(exampleheader, examplecode_)
docstr_parts.append(exampleblock)
# DEBUG part (in case something goes wrong)
if with_debug:
debugheader = 'Debug'
debugblock = ut.codeblock(
'''
num_indent = {num_indent}
'''
).format(num_indent=num_indent)
debugblock = make_docstr_block(debugheader, debugblock)
docstr_parts.append(debugblock)
# Enclosure / Indentation Parts
if needs_surround:
docstr_parts = ['r"""'] + ['\n\n'.join(docstr_parts)] + ['"""']
default_docstr = '\n'.join(docstr_parts)
else:
default_docstr = '\n\n'.join(docstr_parts)
docstr_indent = ' ' * (num_indent + 4)
default_docstr = ut.indent(default_docstr, docstr_indent)
return default_docstr | python | def make_default_docstr(func, with_args=True, with_ret=True,
with_commandline=True, with_example=True,
with_header=False, with_debug=False):
r"""
Tries to make a sensible default docstr so the user
can fill things in without typing too much
# TODO: Interleave old documentation with new documentation
Args:
func (function): live python function
with_args (bool):
with_ret (bool): (Defaults to True)
with_commandline (bool): (Defaults to True)
with_example (bool): (Defaults to True)
with_header (bool): (Defaults to False)
with_debug (bool): (Defaults to False)
Returns:
tuple: (argname, val)
Ignore:
pass
CommandLine:
python -m utool.util_autogen --exec-make_default_docstr --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_autogen import * # NOQA
>>> import utool as ut
>>> func = ut.make_default_docstr
>>> #func = ut.make_args_docstr
>>> #func = PythonStatement
>>> func = auto_docstr
>>> default_docstr = make_default_docstr(func)
>>> result = str(default_docstr)
>>> print(result)
"""
import utool as ut
#from utool import util_inspect
funcinfo = ut.util_inspect.infer_function_info(func)
argname_list = funcinfo.argname_list
argtype_list = funcinfo.argtype_list
argdesc_list = funcinfo.argdesc_list
return_header = funcinfo.return_header
return_type = funcinfo.return_type
return_name = funcinfo.return_name
return_desc = funcinfo.return_desc
funcname = funcinfo.funcname
modname = funcinfo.modname
defaults = funcinfo.defaults
num_indent = funcinfo.num_indent
needs_surround = funcinfo.needs_surround
funcname = funcinfo.funcname
ismethod = funcinfo.ismethod
va_name = funcinfo.va_name
kw_name = funcinfo.kw_name
kw_keys = funcinfo.kw_keys
docstr_parts = []
# Header part
if with_header:
header_block = funcname
docstr_parts.append(header_block)
# Args part
if with_args and len(argname_list) > 0:
argheader = 'Args'
arg_docstr = make_args_docstr(argname_list, argtype_list, argdesc_list,
ismethod, va_name, kw_name, kw_keys)
argsblock = make_docstr_block(argheader, arg_docstr)
docstr_parts.append(argsblock)
# if False:
# with_kw = with_args
# if with_kw and len(kwarg_keys) > 0:
# #ut.embed()
# import textwrap
# kwargs_docstr = ', '.join(kwarg_keys)
# kwargs_docstr = '\n'.join(textwrap.wrap(kwargs_docstr))
# kwargsblock = make_docstr_block('Kwargs', kwargs_docstr)
# docstr_parts.append(kwargsblock)
# Return / Yeild part
if with_ret and return_header is not None:
if return_header is not None:
return_doctr = make_returns_or_yeilds_docstr(return_type, return_name, return_desc)
returnblock = make_docstr_block(return_header, return_doctr)
docstr_parts.append(returnblock)
# Example part
# try to generate a simple and unit testable example
if with_commandline:
cmdlineheader = 'CommandLine'
cmdlinecode = make_cmdline_docstr(funcname, modname)
cmdlineblock = make_docstr_block(cmdlineheader, cmdlinecode)
docstr_parts.append(cmdlineblock)
if with_example:
exampleheader = 'Example'
examplecode = make_example_docstr(funcname, modname, argname_list,
defaults, return_type, return_name,
ismethod)
examplecode_ = ut.indent(examplecode, '>>> ')
exampleblock = make_docstr_block(exampleheader, examplecode_)
docstr_parts.append(exampleblock)
# DEBUG part (in case something goes wrong)
if with_debug:
debugheader = 'Debug'
debugblock = ut.codeblock(
'''
num_indent = {num_indent}
'''
).format(num_indent=num_indent)
debugblock = make_docstr_block(debugheader, debugblock)
docstr_parts.append(debugblock)
# Enclosure / Indentation Parts
if needs_surround:
docstr_parts = ['r"""'] + ['\n\n'.join(docstr_parts)] + ['"""']
default_docstr = '\n'.join(docstr_parts)
else:
default_docstr = '\n\n'.join(docstr_parts)
docstr_indent = ' ' * (num_indent + 4)
default_docstr = ut.indent(default_docstr, docstr_indent)
return default_docstr | [
"def",
"make_default_docstr",
"(",
"func",
",",
"with_args",
"=",
"True",
",",
"with_ret",
"=",
"True",
",",
"with_commandline",
"=",
"True",
",",
"with_example",
"=",
"True",
",",
"with_header",
"=",
"False",
",",
"with_debug",
"=",
"False",
")",
":",
"import",
"utool",
"as",
"ut",
"#from utool import util_inspect",
"funcinfo",
"=",
"ut",
".",
"util_inspect",
".",
"infer_function_info",
"(",
"func",
")",
"argname_list",
"=",
"funcinfo",
".",
"argname_list",
"argtype_list",
"=",
"funcinfo",
".",
"argtype_list",
"argdesc_list",
"=",
"funcinfo",
".",
"argdesc_list",
"return_header",
"=",
"funcinfo",
".",
"return_header",
"return_type",
"=",
"funcinfo",
".",
"return_type",
"return_name",
"=",
"funcinfo",
".",
"return_name",
"return_desc",
"=",
"funcinfo",
".",
"return_desc",
"funcname",
"=",
"funcinfo",
".",
"funcname",
"modname",
"=",
"funcinfo",
".",
"modname",
"defaults",
"=",
"funcinfo",
".",
"defaults",
"num_indent",
"=",
"funcinfo",
".",
"num_indent",
"needs_surround",
"=",
"funcinfo",
".",
"needs_surround",
"funcname",
"=",
"funcinfo",
".",
"funcname",
"ismethod",
"=",
"funcinfo",
".",
"ismethod",
"va_name",
"=",
"funcinfo",
".",
"va_name",
"kw_name",
"=",
"funcinfo",
".",
"kw_name",
"kw_keys",
"=",
"funcinfo",
".",
"kw_keys",
"docstr_parts",
"=",
"[",
"]",
"# Header part",
"if",
"with_header",
":",
"header_block",
"=",
"funcname",
"docstr_parts",
".",
"append",
"(",
"header_block",
")",
"# Args part",
"if",
"with_args",
"and",
"len",
"(",
"argname_list",
")",
">",
"0",
":",
"argheader",
"=",
"'Args'",
"arg_docstr",
"=",
"make_args_docstr",
"(",
"argname_list",
",",
"argtype_list",
",",
"argdesc_list",
",",
"ismethod",
",",
"va_name",
",",
"kw_name",
",",
"kw_keys",
")",
"argsblock",
"=",
"make_docstr_block",
"(",
"argheader",
",",
"arg_docstr",
")",
"docstr_parts",
".",
"append",
"(",
"argsblock",
")",
"# if False:",
"# with_kw = with_args",
"# if with_kw and len(kwarg_keys) > 0:",
"# #ut.embed()",
"# import textwrap",
"# kwargs_docstr = ', '.join(kwarg_keys)",
"# kwargs_docstr = '\\n'.join(textwrap.wrap(kwargs_docstr))",
"# kwargsblock = make_docstr_block('Kwargs', kwargs_docstr)",
"# docstr_parts.append(kwargsblock)",
"# Return / Yeild part",
"if",
"with_ret",
"and",
"return_header",
"is",
"not",
"None",
":",
"if",
"return_header",
"is",
"not",
"None",
":",
"return_doctr",
"=",
"make_returns_or_yeilds_docstr",
"(",
"return_type",
",",
"return_name",
",",
"return_desc",
")",
"returnblock",
"=",
"make_docstr_block",
"(",
"return_header",
",",
"return_doctr",
")",
"docstr_parts",
".",
"append",
"(",
"returnblock",
")",
"# Example part",
"# try to generate a simple and unit testable example",
"if",
"with_commandline",
":",
"cmdlineheader",
"=",
"'CommandLine'",
"cmdlinecode",
"=",
"make_cmdline_docstr",
"(",
"funcname",
",",
"modname",
")",
"cmdlineblock",
"=",
"make_docstr_block",
"(",
"cmdlineheader",
",",
"cmdlinecode",
")",
"docstr_parts",
".",
"append",
"(",
"cmdlineblock",
")",
"if",
"with_example",
":",
"exampleheader",
"=",
"'Example'",
"examplecode",
"=",
"make_example_docstr",
"(",
"funcname",
",",
"modname",
",",
"argname_list",
",",
"defaults",
",",
"return_type",
",",
"return_name",
",",
"ismethod",
")",
"examplecode_",
"=",
"ut",
".",
"indent",
"(",
"examplecode",
",",
"'>>> '",
")",
"exampleblock",
"=",
"make_docstr_block",
"(",
"exampleheader",
",",
"examplecode_",
")",
"docstr_parts",
".",
"append",
"(",
"exampleblock",
")",
"# DEBUG part (in case something goes wrong)",
"if",
"with_debug",
":",
"debugheader",
"=",
"'Debug'",
"debugblock",
"=",
"ut",
".",
"codeblock",
"(",
"'''\n num_indent = {num_indent}\n '''",
")",
".",
"format",
"(",
"num_indent",
"=",
"num_indent",
")",
"debugblock",
"=",
"make_docstr_block",
"(",
"debugheader",
",",
"debugblock",
")",
"docstr_parts",
".",
"append",
"(",
"debugblock",
")",
"# Enclosure / Indentation Parts",
"if",
"needs_surround",
":",
"docstr_parts",
"=",
"[",
"'r\"\"\"'",
"]",
"+",
"[",
"'\\n\\n'",
".",
"join",
"(",
"docstr_parts",
")",
"]",
"+",
"[",
"'\"\"\"'",
"]",
"default_docstr",
"=",
"'\\n'",
".",
"join",
"(",
"docstr_parts",
")",
"else",
":",
"default_docstr",
"=",
"'\\n\\n'",
".",
"join",
"(",
"docstr_parts",
")",
"docstr_indent",
"=",
"' '",
"*",
"(",
"num_indent",
"+",
"4",
")",
"default_docstr",
"=",
"ut",
".",
"indent",
"(",
"default_docstr",
",",
"docstr_indent",
")",
"return",
"default_docstr"
] | r"""
Tries to make a sensible default docstr so the user
can fill things in without typing too much
# TODO: Interleave old documentation with new documentation
Args:
func (function): live python function
with_args (bool):
with_ret (bool): (Defaults to True)
with_commandline (bool): (Defaults to True)
with_example (bool): (Defaults to True)
with_header (bool): (Defaults to False)
with_debug (bool): (Defaults to False)
Returns:
tuple: (argname, val)
Ignore:
pass
CommandLine:
python -m utool.util_autogen --exec-make_default_docstr --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_autogen import * # NOQA
>>> import utool as ut
>>> func = ut.make_default_docstr
>>> #func = ut.make_args_docstr
>>> #func = PythonStatement
>>> func = auto_docstr
>>> default_docstr = make_default_docstr(func)
>>> result = str(default_docstr)
>>> print(result) | [
"r",
"Tries",
"to",
"make",
"a",
"sensible",
"default",
"docstr",
"so",
"the",
"user",
"can",
"fill",
"things",
"in",
"without",
"typing",
"too",
"much"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_autogen.py#L763-L894 | train |
Erotemic/utool | utool/util_autogen.py | remove_codeblock_syntax_sentinals | def remove_codeblock_syntax_sentinals(code_text):
r"""
Removes template comments and vim sentinals
Args:
code_text (str):
Returns:
str: code_text_
"""
flags = re.MULTILINE | re.DOTALL
code_text_ = code_text
code_text_ = re.sub(r'^ *# *REM [^\n]*$\n?', '', code_text_, flags=flags)
code_text_ = re.sub(r'^ *# STARTBLOCK *$\n', '', code_text_, flags=flags)
code_text_ = re.sub(r'^ *# ENDBLOCK *$\n?', '', code_text_, flags=flags)
code_text_ = code_text_.rstrip()
return code_text_ | python | def remove_codeblock_syntax_sentinals(code_text):
r"""
Removes template comments and vim sentinals
Args:
code_text (str):
Returns:
str: code_text_
"""
flags = re.MULTILINE | re.DOTALL
code_text_ = code_text
code_text_ = re.sub(r'^ *# *REM [^\n]*$\n?', '', code_text_, flags=flags)
code_text_ = re.sub(r'^ *# STARTBLOCK *$\n', '', code_text_, flags=flags)
code_text_ = re.sub(r'^ *# ENDBLOCK *$\n?', '', code_text_, flags=flags)
code_text_ = code_text_.rstrip()
return code_text_ | [
"def",
"remove_codeblock_syntax_sentinals",
"(",
"code_text",
")",
":",
"flags",
"=",
"re",
".",
"MULTILINE",
"|",
"re",
".",
"DOTALL",
"code_text_",
"=",
"code_text",
"code_text_",
"=",
"re",
".",
"sub",
"(",
"r'^ *# *REM [^\\n]*$\\n?'",
",",
"''",
",",
"code_text_",
",",
"flags",
"=",
"flags",
")",
"code_text_",
"=",
"re",
".",
"sub",
"(",
"r'^ *# STARTBLOCK *$\\n'",
",",
"''",
",",
"code_text_",
",",
"flags",
"=",
"flags",
")",
"code_text_",
"=",
"re",
".",
"sub",
"(",
"r'^ *# ENDBLOCK *$\\n?'",
",",
"''",
",",
"code_text_",
",",
"flags",
"=",
"flags",
")",
"code_text_",
"=",
"code_text_",
".",
"rstrip",
"(",
")",
"return",
"code_text_"
] | r"""
Removes template comments and vim sentinals
Args:
code_text (str):
Returns:
str: code_text_ | [
"r",
"Removes",
"template",
"comments",
"and",
"vim",
"sentinals"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_autogen.py#L897-L913 | train |
glormph/msstitch | src/app/actions/mzidtsv/proteingroup_sorters.py | sort_protein_group | def sort_protein_group(pgroup, sortfunctions, sortfunc_index):
"""Recursive function that sorts protein group by a number of sorting
functions."""
pgroup_out = []
subgroups = sortfunctions[sortfunc_index](pgroup)
sortfunc_index += 1
for subgroup in subgroups:
if len(subgroup) > 1 and sortfunc_index < len(sortfunctions):
pgroup_out.extend(sort_protein_group(subgroup,
sortfunctions,
sortfunc_index))
else:
pgroup_out.extend(subgroup)
return pgroup_out | python | def sort_protein_group(pgroup, sortfunctions, sortfunc_index):
"""Recursive function that sorts protein group by a number of sorting
functions."""
pgroup_out = []
subgroups = sortfunctions[sortfunc_index](pgroup)
sortfunc_index += 1
for subgroup in subgroups:
if len(subgroup) > 1 and sortfunc_index < len(sortfunctions):
pgroup_out.extend(sort_protein_group(subgroup,
sortfunctions,
sortfunc_index))
else:
pgroup_out.extend(subgroup)
return pgroup_out | [
"def",
"sort_protein_group",
"(",
"pgroup",
",",
"sortfunctions",
",",
"sortfunc_index",
")",
":",
"pgroup_out",
"=",
"[",
"]",
"subgroups",
"=",
"sortfunctions",
"[",
"sortfunc_index",
"]",
"(",
"pgroup",
")",
"sortfunc_index",
"+=",
"1",
"for",
"subgroup",
"in",
"subgroups",
":",
"if",
"len",
"(",
"subgroup",
")",
">",
"1",
"and",
"sortfunc_index",
"<",
"len",
"(",
"sortfunctions",
")",
":",
"pgroup_out",
".",
"extend",
"(",
"sort_protein_group",
"(",
"subgroup",
",",
"sortfunctions",
",",
"sortfunc_index",
")",
")",
"else",
":",
"pgroup_out",
".",
"extend",
"(",
"subgroup",
")",
"return",
"pgroup_out"
] | Recursive function that sorts protein group by a number of sorting
functions. | [
"Recursive",
"function",
"that",
"sorts",
"protein",
"group",
"by",
"a",
"number",
"of",
"sorting",
"functions",
"."
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mzidtsv/proteingroup_sorters.py#L35-L48 | train |
glormph/msstitch | src/app/actions/mzidtsv/proteingroup_sorters.py | sort_amounts | def sort_amounts(proteins, sort_index):
"""Generic function for sorting peptides and psms. Assumes a higher
number is better for what is passed at sort_index position in protein."""
amounts = {}
for protein in proteins:
amount_x_for_protein = protein[sort_index]
try:
amounts[amount_x_for_protein].append(protein)
except KeyError:
amounts[amount_x_for_protein] = [protein]
return [v for k, v in sorted(amounts.items(), reverse=True)] | python | def sort_amounts(proteins, sort_index):
"""Generic function for sorting peptides and psms. Assumes a higher
number is better for what is passed at sort_index position in protein."""
amounts = {}
for protein in proteins:
amount_x_for_protein = protein[sort_index]
try:
amounts[amount_x_for_protein].append(protein)
except KeyError:
amounts[amount_x_for_protein] = [protein]
return [v for k, v in sorted(amounts.items(), reverse=True)] | [
"def",
"sort_amounts",
"(",
"proteins",
",",
"sort_index",
")",
":",
"amounts",
"=",
"{",
"}",
"for",
"protein",
"in",
"proteins",
":",
"amount_x_for_protein",
"=",
"protein",
"[",
"sort_index",
"]",
"try",
":",
"amounts",
"[",
"amount_x_for_protein",
"]",
".",
"append",
"(",
"protein",
")",
"except",
"KeyError",
":",
"amounts",
"[",
"amount_x_for_protein",
"]",
"=",
"[",
"protein",
"]",
"return",
"[",
"v",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"amounts",
".",
"items",
"(",
")",
",",
"reverse",
"=",
"True",
")",
"]"
] | Generic function for sorting peptides and psms. Assumes a higher
number is better for what is passed at sort_index position in protein. | [
"Generic",
"function",
"for",
"sorting",
"peptides",
"and",
"psms",
".",
"Assumes",
"a",
"higher",
"number",
"is",
"better",
"for",
"what",
"is",
"passed",
"at",
"sort_index",
"position",
"in",
"protein",
"."
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mzidtsv/proteingroup_sorters.py#L51-L61 | train |
chriso/gauged | gauged/structures/sparse_map.py | SparseMap.free | def free(self):
"""Free the map"""
if self._ptr is None:
return
Gauged.map_free(self.ptr)
SparseMap.ALLOCATIONS -= 1
self._ptr = None | python | def free(self):
"""Free the map"""
if self._ptr is None:
return
Gauged.map_free(self.ptr)
SparseMap.ALLOCATIONS -= 1
self._ptr = None | [
"def",
"free",
"(",
"self",
")",
":",
"if",
"self",
".",
"_ptr",
"is",
"None",
":",
"return",
"Gauged",
".",
"map_free",
"(",
"self",
".",
"ptr",
")",
"SparseMap",
".",
"ALLOCATIONS",
"-=",
"1",
"self",
".",
"_ptr",
"=",
"None"
] | Free the map | [
"Free",
"the",
"map"
] | cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976 | https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/structures/sparse_map.py#L65-L71 | train |
chriso/gauged | gauged/structures/sparse_map.py | SparseMap.append | def append(self, position, array):
"""Append an array to the end of the map. The position
must be greater than any positions in the map"""
if not Gauged.map_append(self.ptr, position, array.ptr):
raise MemoryError | python | def append(self, position, array):
"""Append an array to the end of the map. The position
must be greater than any positions in the map"""
if not Gauged.map_append(self.ptr, position, array.ptr):
raise MemoryError | [
"def",
"append",
"(",
"self",
",",
"position",
",",
"array",
")",
":",
"if",
"not",
"Gauged",
".",
"map_append",
"(",
"self",
".",
"ptr",
",",
"position",
",",
"array",
".",
"ptr",
")",
":",
"raise",
"MemoryError"
] | Append an array to the end of the map. The position
must be greater than any positions in the map | [
"Append",
"an",
"array",
"to",
"the",
"end",
"of",
"the",
"map",
".",
"The",
"position",
"must",
"be",
"greater",
"than",
"any",
"positions",
"in",
"the",
"map"
] | cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976 | https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/structures/sparse_map.py#L73-L77 | train |
chriso/gauged | gauged/structures/sparse_map.py | SparseMap.slice | def slice(self, start=0, end=0):
"""Slice the map from [start, end)"""
tmp = Gauged.map_new()
if tmp is None:
raise MemoryError
if not Gauged.map_concat(tmp, self.ptr, start, end, 0):
Gauged.map_free(tmp) # pragma: no cover
raise MemoryError
return SparseMap(tmp) | python | def slice(self, start=0, end=0):
"""Slice the map from [start, end)"""
tmp = Gauged.map_new()
if tmp is None:
raise MemoryError
if not Gauged.map_concat(tmp, self.ptr, start, end, 0):
Gauged.map_free(tmp) # pragma: no cover
raise MemoryError
return SparseMap(tmp) | [
"def",
"slice",
"(",
"self",
",",
"start",
"=",
"0",
",",
"end",
"=",
"0",
")",
":",
"tmp",
"=",
"Gauged",
".",
"map_new",
"(",
")",
"if",
"tmp",
"is",
"None",
":",
"raise",
"MemoryError",
"if",
"not",
"Gauged",
".",
"map_concat",
"(",
"tmp",
",",
"self",
".",
"ptr",
",",
"start",
",",
"end",
",",
"0",
")",
":",
"Gauged",
".",
"map_free",
"(",
"tmp",
")",
"# pragma: no cover",
"raise",
"MemoryError",
"return",
"SparseMap",
"(",
"tmp",
")"
] | Slice the map from [start, end) | [
"Slice",
"the",
"map",
"from",
"[",
"start",
"end",
")"
] | cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976 | https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/structures/sparse_map.py#L79-L87 | train |
chriso/gauged | gauged/structures/sparse_map.py | SparseMap.concat | def concat(self, operand, start=0, end=0, offset=0):
"""Concat a map. You can also optionally slice the operand map
and apply an offset to each position before concatting"""
if not Gauged.map_concat(self.ptr, operand.ptr, start, end, offset):
raise MemoryError | python | def concat(self, operand, start=0, end=0, offset=0):
"""Concat a map. You can also optionally slice the operand map
and apply an offset to each position before concatting"""
if not Gauged.map_concat(self.ptr, operand.ptr, start, end, offset):
raise MemoryError | [
"def",
"concat",
"(",
"self",
",",
"operand",
",",
"start",
"=",
"0",
",",
"end",
"=",
"0",
",",
"offset",
"=",
"0",
")",
":",
"if",
"not",
"Gauged",
".",
"map_concat",
"(",
"self",
".",
"ptr",
",",
"operand",
".",
"ptr",
",",
"start",
",",
"end",
",",
"offset",
")",
":",
"raise",
"MemoryError"
] | Concat a map. You can also optionally slice the operand map
and apply an offset to each position before concatting | [
"Concat",
"a",
"map",
".",
"You",
"can",
"also",
"optionally",
"slice",
"the",
"operand",
"map",
"and",
"apply",
"an",
"offset",
"to",
"each",
"position",
"before",
"concatting"
] | cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976 | https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/structures/sparse_map.py#L89-L93 | train |
chriso/gauged | gauged/structures/sparse_map.py | SparseMap.buffer | def buffer(self, byte_offset=0):
"""Get a copy of the map buffer"""
contents = self.ptr.contents
ptr = addressof(contents.buffer.contents) + byte_offset
length = contents.length * 4 - byte_offset
return buffer((c_char * length).from_address(ptr).raw) \
if length else None | python | def buffer(self, byte_offset=0):
"""Get a copy of the map buffer"""
contents = self.ptr.contents
ptr = addressof(contents.buffer.contents) + byte_offset
length = contents.length * 4 - byte_offset
return buffer((c_char * length).from_address(ptr).raw) \
if length else None | [
"def",
"buffer",
"(",
"self",
",",
"byte_offset",
"=",
"0",
")",
":",
"contents",
"=",
"self",
".",
"ptr",
".",
"contents",
"ptr",
"=",
"addressof",
"(",
"contents",
".",
"buffer",
".",
"contents",
")",
"+",
"byte_offset",
"length",
"=",
"contents",
".",
"length",
"*",
"4",
"-",
"byte_offset",
"return",
"buffer",
"(",
"(",
"c_char",
"*",
"length",
")",
".",
"from_address",
"(",
"ptr",
")",
".",
"raw",
")",
"if",
"length",
"else",
"None"
] | Get a copy of the map buffer | [
"Get",
"a",
"copy",
"of",
"the",
"map",
"buffer"
] | cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976 | https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/structures/sparse_map.py#L99-L105 | train |
timedata-org/loady | loady/whitelist.py | matches | def matches(target, entry):
"""Does the target match the whitelist entry?"""
# It must match all the non-empty entries.
for t, e in itertools.zip_longest(target, entry):
if e and t != e:
return False
# ...and the provider and user can't be empty.
return entry[0] and entry[1] | python | def matches(target, entry):
"""Does the target match the whitelist entry?"""
# It must match all the non-empty entries.
for t, e in itertools.zip_longest(target, entry):
if e and t != e:
return False
# ...and the provider and user can't be empty.
return entry[0] and entry[1] | [
"def",
"matches",
"(",
"target",
",",
"entry",
")",
":",
"# It must match all the non-empty entries.",
"for",
"t",
",",
"e",
"in",
"itertools",
".",
"zip_longest",
"(",
"target",
",",
"entry",
")",
":",
"if",
"e",
"and",
"t",
"!=",
"e",
":",
"return",
"False",
"# ...and the provider and user can't be empty.",
"return",
"entry",
"[",
"0",
"]",
"and",
"entry",
"[",
"1",
"]"
] | Does the target match the whitelist entry? | [
"Does",
"the",
"target",
"match",
"the",
"whitelist",
"entry?"
] | 94ffcdb92f15a28f3c85f77bd293a9cb59de4cad | https://github.com/timedata-org/loady/blob/94ffcdb92f15a28f3c85f77bd293a9cb59de4cad/loady/whitelist.py#L29-L38 | train |
timedata-org/loady | loady/whitelist.py | check_entry | def check_entry(*entry):
"""Throws an exception if the entry isn't on the whitelist."""
whitelist = read_whitelist()
if not check_allow_prompt(entry, whitelist):
whitelist.append(entry)
write_whitelist(whitelist) | python | def check_entry(*entry):
"""Throws an exception if the entry isn't on the whitelist."""
whitelist = read_whitelist()
if not check_allow_prompt(entry, whitelist):
whitelist.append(entry)
write_whitelist(whitelist) | [
"def",
"check_entry",
"(",
"*",
"entry",
")",
":",
"whitelist",
"=",
"read_whitelist",
"(",
")",
"if",
"not",
"check_allow_prompt",
"(",
"entry",
",",
"whitelist",
")",
":",
"whitelist",
".",
"append",
"(",
"entry",
")",
"write_whitelist",
"(",
"whitelist",
")"
] | Throws an exception if the entry isn't on the whitelist. | [
"Throws",
"an",
"exception",
"if",
"the",
"entry",
"isn",
"t",
"on",
"the",
"whitelist",
"."
] | 94ffcdb92f15a28f3c85f77bd293a9cb59de4cad | https://github.com/timedata-org/loady/blob/94ffcdb92f15a28f3c85f77bd293a9cb59de4cad/loady/whitelist.py#L64-L69 | train |
timedata-org/loady | loady/data.py | load_uncached | def load_uncached(location, use_json=None):
"""
Return data at either a file location or at the raw version of a
URL, or raise an exception.
A file location either contains no colons like /usr/tom/test.txt,
or a single character and a colon like C:/WINDOWS/STUFF.
A URL location is anything that's not a file location.
If the URL ends in .json, .yml or .yaml and `json != False`,
or `json == True`, convert the data from YAML or JSON.
"""
if not whitelist.is_file(location):
r = requests.get(raw.raw(location))
if not r.ok:
raise ValueError('Couldn\'t read %s with code %s:\n%s' %
(location, r.status_code, r.text))
data = r.text
else:
try:
f = os.path.realpath(os.path.abspath(os.path.expanduser(location)))
data = open(f).read()
except Exception as e:
e.args = (
'There was an error reading the file', location, f) + e.args
raise
if use_json is None:
use_json = any(location.endswith(s) for s in SUFFIXES)
if not use_json:
return data
try:
return yaml.load(data)
except Exception as e:
e.args = ('There was a JSON error in the file', location) + e.args
raise | python | def load_uncached(location, use_json=None):
"""
Return data at either a file location or at the raw version of a
URL, or raise an exception.
A file location either contains no colons like /usr/tom/test.txt,
or a single character and a colon like C:/WINDOWS/STUFF.
A URL location is anything that's not a file location.
If the URL ends in .json, .yml or .yaml and `json != False`,
or `json == True`, convert the data from YAML or JSON.
"""
if not whitelist.is_file(location):
r = requests.get(raw.raw(location))
if not r.ok:
raise ValueError('Couldn\'t read %s with code %s:\n%s' %
(location, r.status_code, r.text))
data = r.text
else:
try:
f = os.path.realpath(os.path.abspath(os.path.expanduser(location)))
data = open(f).read()
except Exception as e:
e.args = (
'There was an error reading the file', location, f) + e.args
raise
if use_json is None:
use_json = any(location.endswith(s) for s in SUFFIXES)
if not use_json:
return data
try:
return yaml.load(data)
except Exception as e:
e.args = ('There was a JSON error in the file', location) + e.args
raise | [
"def",
"load_uncached",
"(",
"location",
",",
"use_json",
"=",
"None",
")",
":",
"if",
"not",
"whitelist",
".",
"is_file",
"(",
"location",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"raw",
".",
"raw",
"(",
"location",
")",
")",
"if",
"not",
"r",
".",
"ok",
":",
"raise",
"ValueError",
"(",
"'Couldn\\'t read %s with code %s:\\n%s'",
"%",
"(",
"location",
",",
"r",
".",
"status_code",
",",
"r",
".",
"text",
")",
")",
"data",
"=",
"r",
".",
"text",
"else",
":",
"try",
":",
"f",
"=",
"os",
".",
"path",
".",
"realpath",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"location",
")",
")",
")",
"data",
"=",
"open",
"(",
"f",
")",
".",
"read",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"e",
".",
"args",
"=",
"(",
"'There was an error reading the file'",
",",
"location",
",",
"f",
")",
"+",
"e",
".",
"args",
"raise",
"if",
"use_json",
"is",
"None",
":",
"use_json",
"=",
"any",
"(",
"location",
".",
"endswith",
"(",
"s",
")",
"for",
"s",
"in",
"SUFFIXES",
")",
"if",
"not",
"use_json",
":",
"return",
"data",
"try",
":",
"return",
"yaml",
".",
"load",
"(",
"data",
")",
"except",
"Exception",
"as",
"e",
":",
"e",
".",
"args",
"=",
"(",
"'There was a JSON error in the file'",
",",
"location",
")",
"+",
"e",
".",
"args",
"raise"
] | Return data at either a file location or at the raw version of a
URL, or raise an exception.
A file location either contains no colons like /usr/tom/test.txt,
or a single character and a colon like C:/WINDOWS/STUFF.
A URL location is anything that's not a file location.
If the URL ends in .json, .yml or .yaml and `json != False`,
or `json == True`, convert the data from YAML or JSON. | [
"Return",
"data",
"at",
"either",
"a",
"file",
"location",
"or",
"at",
"the",
"raw",
"version",
"of",
"a",
"URL",
"or",
"raise",
"an",
"exception",
"."
] | 94ffcdb92f15a28f3c85f77bd293a9cb59de4cad | https://github.com/timedata-org/loady/blob/94ffcdb92f15a28f3c85f77bd293a9cb59de4cad/loady/data.py#L12-L50 | train |
Erotemic/utool | utool/util_alg.py | find_group_differences | def find_group_differences(groups1, groups2):
r"""
Returns a measure of how disimilar two groupings are
Args:
groups1 (list): true grouping of items
groups2 (list): predicted grouping of items
CommandLine:
python -m utool.util_alg find_group_differences
SeeAlso:
vtool.group_indicies
vtool.apply_grouping
Example0:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> groups1 = [[1, 2, 3], [4], [5, 6], [7, 8], [9, 10, 11]]
>>> groups2 = [[1, 2, 11], [3, 4], [5, 6], [7], [8, 9], [10]]
>>> total_error = find_group_differences(groups1, groups2)
>>> result = ('total_error = %r' % (total_error,))
>>> print(result)
total_error = 20
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> groups1 = [[1, 2, 3], [4], [5, 6]]
>>> groups2 = [[1, 2, 3], [4], [5, 6]]
>>> total_error = find_group_differences(groups1, groups2)
>>> result = ('total_error = %r' % (total_error,))
>>> print(result)
total_error = 0
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> groups1 = [[1, 2, 3], [4], [5, 6]]
>>> groups2 = [[1, 2], [4], [5, 6]]
>>> total_error = find_group_differences(groups1, groups2)
>>> result = ('total_error = %r' % (total_error,))
>>> print(result)
total_error = 4
Ignore:
# Can this be done via sklearn label analysis?
# maybe no... the labels assigned to each component are arbitrary
# maybe if we label edges? likely too many labels.
groups1 = [[1, 2, 3], [4], [5, 6], [7, 8], [9, 10, 11]]
groups2 = [[1, 2, 11], [3, 4], [5, 6], [7], [8, 9], [10]]
"""
import utool as ut
# For each group, build mapping from each item to the members the group
item_to_others1 = {item: set(_group) - {item}
for _group in groups1 for item in _group}
item_to_others2 = {item: set(_group) - {item}
for _group in groups2 for item in _group}
flat_items1 = ut.flatten(groups1)
flat_items2 = ut.flatten(groups2)
flat_items = list(set(flat_items1 + flat_items2))
errors = []
item_to_error = {}
for item in flat_items:
# Determine the number of unshared members in each group
others1 = item_to_others1.get(item, set([]))
others2 = item_to_others2.get(item, set([]))
missing1 = others1 - others2
missing2 = others2 - others1
error = len(missing1) + len(missing2)
if error > 0:
item_to_error[item] = error
errors.append(error)
total_error = sum(errors)
return total_error | python | def find_group_differences(groups1, groups2):
r"""
Returns a measure of how disimilar two groupings are
Args:
groups1 (list): true grouping of items
groups2 (list): predicted grouping of items
CommandLine:
python -m utool.util_alg find_group_differences
SeeAlso:
vtool.group_indicies
vtool.apply_grouping
Example0:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> groups1 = [[1, 2, 3], [4], [5, 6], [7, 8], [9, 10, 11]]
>>> groups2 = [[1, 2, 11], [3, 4], [5, 6], [7], [8, 9], [10]]
>>> total_error = find_group_differences(groups1, groups2)
>>> result = ('total_error = %r' % (total_error,))
>>> print(result)
total_error = 20
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> groups1 = [[1, 2, 3], [4], [5, 6]]
>>> groups2 = [[1, 2, 3], [4], [5, 6]]
>>> total_error = find_group_differences(groups1, groups2)
>>> result = ('total_error = %r' % (total_error,))
>>> print(result)
total_error = 0
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> groups1 = [[1, 2, 3], [4], [5, 6]]
>>> groups2 = [[1, 2], [4], [5, 6]]
>>> total_error = find_group_differences(groups1, groups2)
>>> result = ('total_error = %r' % (total_error,))
>>> print(result)
total_error = 4
Ignore:
# Can this be done via sklearn label analysis?
# maybe no... the labels assigned to each component are arbitrary
# maybe if we label edges? likely too many labels.
groups1 = [[1, 2, 3], [4], [5, 6], [7, 8], [9, 10, 11]]
groups2 = [[1, 2, 11], [3, 4], [5, 6], [7], [8, 9], [10]]
"""
import utool as ut
# For each group, build mapping from each item to the members the group
item_to_others1 = {item: set(_group) - {item}
for _group in groups1 for item in _group}
item_to_others2 = {item: set(_group) - {item}
for _group in groups2 for item in _group}
flat_items1 = ut.flatten(groups1)
flat_items2 = ut.flatten(groups2)
flat_items = list(set(flat_items1 + flat_items2))
errors = []
item_to_error = {}
for item in flat_items:
# Determine the number of unshared members in each group
others1 = item_to_others1.get(item, set([]))
others2 = item_to_others2.get(item, set([]))
missing1 = others1 - others2
missing2 = others2 - others1
error = len(missing1) + len(missing2)
if error > 0:
item_to_error[item] = error
errors.append(error)
total_error = sum(errors)
return total_error | [
"def",
"find_group_differences",
"(",
"groups1",
",",
"groups2",
")",
":",
"import",
"utool",
"as",
"ut",
"# For each group, build mapping from each item to the members the group",
"item_to_others1",
"=",
"{",
"item",
":",
"set",
"(",
"_group",
")",
"-",
"{",
"item",
"}",
"for",
"_group",
"in",
"groups1",
"for",
"item",
"in",
"_group",
"}",
"item_to_others2",
"=",
"{",
"item",
":",
"set",
"(",
"_group",
")",
"-",
"{",
"item",
"}",
"for",
"_group",
"in",
"groups2",
"for",
"item",
"in",
"_group",
"}",
"flat_items1",
"=",
"ut",
".",
"flatten",
"(",
"groups1",
")",
"flat_items2",
"=",
"ut",
".",
"flatten",
"(",
"groups2",
")",
"flat_items",
"=",
"list",
"(",
"set",
"(",
"flat_items1",
"+",
"flat_items2",
")",
")",
"errors",
"=",
"[",
"]",
"item_to_error",
"=",
"{",
"}",
"for",
"item",
"in",
"flat_items",
":",
"# Determine the number of unshared members in each group",
"others1",
"=",
"item_to_others1",
".",
"get",
"(",
"item",
",",
"set",
"(",
"[",
"]",
")",
")",
"others2",
"=",
"item_to_others2",
".",
"get",
"(",
"item",
",",
"set",
"(",
"[",
"]",
")",
")",
"missing1",
"=",
"others1",
"-",
"others2",
"missing2",
"=",
"others2",
"-",
"others1",
"error",
"=",
"len",
"(",
"missing1",
")",
"+",
"len",
"(",
"missing2",
")",
"if",
"error",
">",
"0",
":",
"item_to_error",
"[",
"item",
"]",
"=",
"error",
"errors",
".",
"append",
"(",
"error",
")",
"total_error",
"=",
"sum",
"(",
"errors",
")",
"return",
"total_error"
] | r"""
Returns a measure of how disimilar two groupings are
Args:
groups1 (list): true grouping of items
groups2 (list): predicted grouping of items
CommandLine:
python -m utool.util_alg find_group_differences
SeeAlso:
vtool.group_indicies
vtool.apply_grouping
Example0:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> groups1 = [[1, 2, 3], [4], [5, 6], [7, 8], [9, 10, 11]]
>>> groups2 = [[1, 2, 11], [3, 4], [5, 6], [7], [8, 9], [10]]
>>> total_error = find_group_differences(groups1, groups2)
>>> result = ('total_error = %r' % (total_error,))
>>> print(result)
total_error = 20
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> groups1 = [[1, 2, 3], [4], [5, 6]]
>>> groups2 = [[1, 2, 3], [4], [5, 6]]
>>> total_error = find_group_differences(groups1, groups2)
>>> result = ('total_error = %r' % (total_error,))
>>> print(result)
total_error = 0
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> groups1 = [[1, 2, 3], [4], [5, 6]]
>>> groups2 = [[1, 2], [4], [5, 6]]
>>> total_error = find_group_differences(groups1, groups2)
>>> result = ('total_error = %r' % (total_error,))
>>> print(result)
total_error = 4
Ignore:
# Can this be done via sklearn label analysis?
# maybe no... the labels assigned to each component are arbitrary
# maybe if we label edges? likely too many labels.
groups1 = [[1, 2, 3], [4], [5, 6], [7, 8], [9, 10, 11]]
groups2 = [[1, 2, 11], [3, 4], [5, 6], [7], [8, 9], [10]] | [
"r",
"Returns",
"a",
"measure",
"of",
"how",
"disimilar",
"two",
"groupings",
"are"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L43-L120 | train |
Erotemic/utool | utool/util_alg.py | find_group_consistencies | def find_group_consistencies(groups1, groups2):
r"""
Returns a measure of group consistency
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> groups1 = [[1, 2, 3], [4], [5, 6]]
>>> groups2 = [[1, 2], [4], [5, 6]]
>>> common_groups = find_group_consistencies(groups1, groups2)
>>> result = ('common_groups = %r' % (common_groups,))
>>> print(result)
common_groups = [(5, 6), (4,)]
"""
group1_list = {tuple(sorted(_group)) for _group in groups1}
group2_list = {tuple(sorted(_group)) for _group in groups2}
common_groups = list(group1_list.intersection(group2_list))
return common_groups | python | def find_group_consistencies(groups1, groups2):
r"""
Returns a measure of group consistency
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> groups1 = [[1, 2, 3], [4], [5, 6]]
>>> groups2 = [[1, 2], [4], [5, 6]]
>>> common_groups = find_group_consistencies(groups1, groups2)
>>> result = ('common_groups = %r' % (common_groups,))
>>> print(result)
common_groups = [(5, 6), (4,)]
"""
group1_list = {tuple(sorted(_group)) for _group in groups1}
group2_list = {tuple(sorted(_group)) for _group in groups2}
common_groups = list(group1_list.intersection(group2_list))
return common_groups | [
"def",
"find_group_consistencies",
"(",
"groups1",
",",
"groups2",
")",
":",
"group1_list",
"=",
"{",
"tuple",
"(",
"sorted",
"(",
"_group",
")",
")",
"for",
"_group",
"in",
"groups1",
"}",
"group2_list",
"=",
"{",
"tuple",
"(",
"sorted",
"(",
"_group",
")",
")",
"for",
"_group",
"in",
"groups2",
"}",
"common_groups",
"=",
"list",
"(",
"group1_list",
".",
"intersection",
"(",
"group2_list",
")",
")",
"return",
"common_groups"
] | r"""
Returns a measure of group consistency
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> groups1 = [[1, 2, 3], [4], [5, 6]]
>>> groups2 = [[1, 2], [4], [5, 6]]
>>> common_groups = find_group_consistencies(groups1, groups2)
>>> result = ('common_groups = %r' % (common_groups,))
>>> print(result)
common_groups = [(5, 6), (4,)] | [
"r",
"Returns",
"a",
"measure",
"of",
"group",
"consistency"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L123-L140 | train |
Erotemic/utool | utool/util_alg.py | compare_groups | def compare_groups(true_groups, pred_groups):
r"""
Finds how predictions need to be modified to match the true grouping.
Notes:
pred_merges - the merges needed that would need to be done for the
pred_groups to match true_groups.
pred_hybrid - the hybrid split/merges needed that would need to be done
for the pred_groups to match true_groups.
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> true_groups = [
>>> [20, 21], [22, 23], [1, 2], [12, 13, 14], [4], [5, 6, 3], [7, 8],
>>> [9, 10, 11], [31, 32, 33, 34, 35], [41, 42, 43, 44], [45], [50]
>>> ]
>>> pred_groups = [
>>> [20, 21, 22, 23], [1, 2], [12], [13, 14], [3, 4], [5, 6,11],
>>> [7], [8, 9], [10], [31, 32], [33, 34, 35], [41, 42, 43, 44, 45]
>>> ]
>>> comparisons = ut.compare_groups(true_groups, pred_groups)
>>> print(comparisons)
>>> result = ut.repr4(comparisons)
>>> print(result)
{
'common': {{1, 2}},
'pred_hybrid': {{10}, {3, 4}, {5, 6, 11}, {7}, {8, 9}},
'pred_merges': [{{12}, {13, 14}}, {{31, 32}, {33, 34, 35}}],
'pred_splits': [{20, 21, 22, 23}, {41, 42, 43, 44, 45}],
'true_hybrid': {{3, 5, 6}, {4}, {50}, {7, 8}, {9, 10, 11}},
'true_merges': [{12, 13, 14}, {31, 32, 33, 34, 35}],
'true_splits': [{{20, 21}, {22, 23}}, {{41, 42, 43, 44}, {45}}],
}
"""
import utool as ut
true = {frozenset(_group) for _group in true_groups}
pred = {frozenset(_group) for _group in pred_groups}
# Find the groups that are exactly the same
common = true.intersection(pred)
true_sets = true.difference(common)
pred_sets = pred.difference(common)
# connected compoment lookups
pred_conn = {p: frozenset(ps) for ps in pred for p in ps}
true_conn = {t: frozenset(ts) for ts in true for t in ts}
# How many predictions can be merged into perfect pieces?
# For each true sets, find if it can be made via merging pred sets
pred_merges = []
true_merges = []
for ts in true_sets:
ccs = set([pred_conn.get(t, frozenset()) for t in ts])
if frozenset.union(*ccs) == ts:
# This is a pure merge
pred_merges.append(ccs)
true_merges.append(ts)
# How many predictions can be split into perfect pieces?
true_splits = []
pred_splits = []
for ps in pred_sets:
ccs = set([true_conn.get(p, frozenset()) for p in ps])
if frozenset.union(*ccs) == ps:
# This is a pure merge
true_splits.append(ccs)
pred_splits.append(ps)
pred_merges_flat = ut.flatten(pred_merges)
true_splits_flat = ut.flatten(true_splits)
pred_hybrid = frozenset(map(frozenset, pred_sets)).difference(
set(pred_splits + pred_merges_flat))
true_hybrid = frozenset(map(frozenset, true_sets)).difference(
set(true_merges + true_splits_flat))
comparisons = {
'common': common,
# 'true_splits_flat': true_splits_flat,
'true_splits': true_splits,
'true_merges': true_merges,
'true_hybrid': true_hybrid,
'pred_splits': pred_splits,
'pred_merges': pred_merges,
# 'pred_merges_flat': pred_merges_flat,
'pred_hybrid': pred_hybrid,
}
return comparisons | python | def compare_groups(true_groups, pred_groups):
r"""
Finds how predictions need to be modified to match the true grouping.
Notes:
pred_merges - the merges needed that would need to be done for the
pred_groups to match true_groups.
pred_hybrid - the hybrid split/merges needed that would need to be done
for the pred_groups to match true_groups.
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> true_groups = [
>>> [20, 21], [22, 23], [1, 2], [12, 13, 14], [4], [5, 6, 3], [7, 8],
>>> [9, 10, 11], [31, 32, 33, 34, 35], [41, 42, 43, 44], [45], [50]
>>> ]
>>> pred_groups = [
>>> [20, 21, 22, 23], [1, 2], [12], [13, 14], [3, 4], [5, 6,11],
>>> [7], [8, 9], [10], [31, 32], [33, 34, 35], [41, 42, 43, 44, 45]
>>> ]
>>> comparisons = ut.compare_groups(true_groups, pred_groups)
>>> print(comparisons)
>>> result = ut.repr4(comparisons)
>>> print(result)
{
'common': {{1, 2}},
'pred_hybrid': {{10}, {3, 4}, {5, 6, 11}, {7}, {8, 9}},
'pred_merges': [{{12}, {13, 14}}, {{31, 32}, {33, 34, 35}}],
'pred_splits': [{20, 21, 22, 23}, {41, 42, 43, 44, 45}],
'true_hybrid': {{3, 5, 6}, {4}, {50}, {7, 8}, {9, 10, 11}},
'true_merges': [{12, 13, 14}, {31, 32, 33, 34, 35}],
'true_splits': [{{20, 21}, {22, 23}}, {{41, 42, 43, 44}, {45}}],
}
"""
import utool as ut
true = {frozenset(_group) for _group in true_groups}
pred = {frozenset(_group) for _group in pred_groups}
# Find the groups that are exactly the same
common = true.intersection(pred)
true_sets = true.difference(common)
pred_sets = pred.difference(common)
# connected compoment lookups
pred_conn = {p: frozenset(ps) for ps in pred for p in ps}
true_conn = {t: frozenset(ts) for ts in true for t in ts}
# How many predictions can be merged into perfect pieces?
# For each true sets, find if it can be made via merging pred sets
pred_merges = []
true_merges = []
for ts in true_sets:
ccs = set([pred_conn.get(t, frozenset()) for t in ts])
if frozenset.union(*ccs) == ts:
# This is a pure merge
pred_merges.append(ccs)
true_merges.append(ts)
# How many predictions can be split into perfect pieces?
true_splits = []
pred_splits = []
for ps in pred_sets:
ccs = set([true_conn.get(p, frozenset()) for p in ps])
if frozenset.union(*ccs) == ps:
# This is a pure merge
true_splits.append(ccs)
pred_splits.append(ps)
pred_merges_flat = ut.flatten(pred_merges)
true_splits_flat = ut.flatten(true_splits)
pred_hybrid = frozenset(map(frozenset, pred_sets)).difference(
set(pred_splits + pred_merges_flat))
true_hybrid = frozenset(map(frozenset, true_sets)).difference(
set(true_merges + true_splits_flat))
comparisons = {
'common': common,
# 'true_splits_flat': true_splits_flat,
'true_splits': true_splits,
'true_merges': true_merges,
'true_hybrid': true_hybrid,
'pred_splits': pred_splits,
'pred_merges': pred_merges,
# 'pred_merges_flat': pred_merges_flat,
'pred_hybrid': pred_hybrid,
}
return comparisons | [
"def",
"compare_groups",
"(",
"true_groups",
",",
"pred_groups",
")",
":",
"import",
"utool",
"as",
"ut",
"true",
"=",
"{",
"frozenset",
"(",
"_group",
")",
"for",
"_group",
"in",
"true_groups",
"}",
"pred",
"=",
"{",
"frozenset",
"(",
"_group",
")",
"for",
"_group",
"in",
"pred_groups",
"}",
"# Find the groups that are exactly the same",
"common",
"=",
"true",
".",
"intersection",
"(",
"pred",
")",
"true_sets",
"=",
"true",
".",
"difference",
"(",
"common",
")",
"pred_sets",
"=",
"pred",
".",
"difference",
"(",
"common",
")",
"# connected compoment lookups",
"pred_conn",
"=",
"{",
"p",
":",
"frozenset",
"(",
"ps",
")",
"for",
"ps",
"in",
"pred",
"for",
"p",
"in",
"ps",
"}",
"true_conn",
"=",
"{",
"t",
":",
"frozenset",
"(",
"ts",
")",
"for",
"ts",
"in",
"true",
"for",
"t",
"in",
"ts",
"}",
"# How many predictions can be merged into perfect pieces?",
"# For each true sets, find if it can be made via merging pred sets",
"pred_merges",
"=",
"[",
"]",
"true_merges",
"=",
"[",
"]",
"for",
"ts",
"in",
"true_sets",
":",
"ccs",
"=",
"set",
"(",
"[",
"pred_conn",
".",
"get",
"(",
"t",
",",
"frozenset",
"(",
")",
")",
"for",
"t",
"in",
"ts",
"]",
")",
"if",
"frozenset",
".",
"union",
"(",
"*",
"ccs",
")",
"==",
"ts",
":",
"# This is a pure merge",
"pred_merges",
".",
"append",
"(",
"ccs",
")",
"true_merges",
".",
"append",
"(",
"ts",
")",
"# How many predictions can be split into perfect pieces?",
"true_splits",
"=",
"[",
"]",
"pred_splits",
"=",
"[",
"]",
"for",
"ps",
"in",
"pred_sets",
":",
"ccs",
"=",
"set",
"(",
"[",
"true_conn",
".",
"get",
"(",
"p",
",",
"frozenset",
"(",
")",
")",
"for",
"p",
"in",
"ps",
"]",
")",
"if",
"frozenset",
".",
"union",
"(",
"*",
"ccs",
")",
"==",
"ps",
":",
"# This is a pure merge",
"true_splits",
".",
"append",
"(",
"ccs",
")",
"pred_splits",
".",
"append",
"(",
"ps",
")",
"pred_merges_flat",
"=",
"ut",
".",
"flatten",
"(",
"pred_merges",
")",
"true_splits_flat",
"=",
"ut",
".",
"flatten",
"(",
"true_splits",
")",
"pred_hybrid",
"=",
"frozenset",
"(",
"map",
"(",
"frozenset",
",",
"pred_sets",
")",
")",
".",
"difference",
"(",
"set",
"(",
"pred_splits",
"+",
"pred_merges_flat",
")",
")",
"true_hybrid",
"=",
"frozenset",
"(",
"map",
"(",
"frozenset",
",",
"true_sets",
")",
")",
".",
"difference",
"(",
"set",
"(",
"true_merges",
"+",
"true_splits_flat",
")",
")",
"comparisons",
"=",
"{",
"'common'",
":",
"common",
",",
"# 'true_splits_flat': true_splits_flat,",
"'true_splits'",
":",
"true_splits",
",",
"'true_merges'",
":",
"true_merges",
",",
"'true_hybrid'",
":",
"true_hybrid",
",",
"'pred_splits'",
":",
"pred_splits",
",",
"'pred_merges'",
":",
"pred_merges",
",",
"# 'pred_merges_flat': pred_merges_flat,",
"'pred_hybrid'",
":",
"pred_hybrid",
",",
"}",
"return",
"comparisons"
] | r"""
Finds how predictions need to be modified to match the true grouping.
Notes:
pred_merges - the merges needed that would need to be done for the
pred_groups to match true_groups.
pred_hybrid - the hybrid split/merges needed that would need to be done
for the pred_groups to match true_groups.
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> true_groups = [
>>> [20, 21], [22, 23], [1, 2], [12, 13, 14], [4], [5, 6, 3], [7, 8],
>>> [9, 10, 11], [31, 32, 33, 34, 35], [41, 42, 43, 44], [45], [50]
>>> ]
>>> pred_groups = [
>>> [20, 21, 22, 23], [1, 2], [12], [13, 14], [3, 4], [5, 6,11],
>>> [7], [8, 9], [10], [31, 32], [33, 34, 35], [41, 42, 43, 44, 45]
>>> ]
>>> comparisons = ut.compare_groups(true_groups, pred_groups)
>>> print(comparisons)
>>> result = ut.repr4(comparisons)
>>> print(result)
{
'common': {{1, 2}},
'pred_hybrid': {{10}, {3, 4}, {5, 6, 11}, {7}, {8, 9}},
'pred_merges': [{{12}, {13, 14}}, {{31, 32}, {33, 34, 35}}],
'pred_splits': [{20, 21, 22, 23}, {41, 42, 43, 44, 45}],
'true_hybrid': {{3, 5, 6}, {4}, {50}, {7, 8}, {9, 10, 11}},
'true_merges': [{12, 13, 14}, {31, 32, 33, 34, 35}],
'true_splits': [{{20, 21}, {22, 23}}, {{41, 42, 43, 44}, {45}}],
} | [
"r",
"Finds",
"how",
"predictions",
"need",
"to",
"be",
"modified",
"to",
"match",
"the",
"true",
"grouping",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L143-L234 | train |
Erotemic/utool | utool/util_alg.py | grouping_delta_stats | def grouping_delta_stats(old, new):
"""
Returns statistics about grouping changes
Args:
old (set of frozenset): old grouping
new (set of frozenset): new grouping
Returns:
pd.DataFrame: df: data frame of size statistics
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> old = [
>>> [20, 21, 22, 23], [1, 2], [12], [13, 14], [3, 4], [5, 6,11],
>>> [7], [8, 9], [10], [31, 32], [33, 34, 35], [41, 42, 43, 44, 45]
>>> ]
>>> new = [
>>> [20, 21], [22, 23], [1, 2], [12, 13, 14], [4], [5, 6, 3], [7, 8],
>>> [9, 10, 11], [31, 32, 33, 34, 35], [41, 42, 43, 44], [45],
>>> ]
>>> df = ut.grouping_delta_stats(old, new)
>>> print(df)
"""
import pandas as pd
import utool as ut
group_delta = ut.grouping_delta(old, new)
stats = ut.odict()
unchanged = group_delta['unchanged']
splits = group_delta['splits']
merges = group_delta['merges']
hybrid = group_delta['hybrid']
statsmap = ut.partial(lambda x: ut.stats_dict(map(len, x), size=True))
stats['unchanged'] = statsmap(unchanged)
stats['old_split'] = statsmap(splits['old'])
stats['new_split'] = statsmap(ut.flatten(splits['new']))
stats['old_merge'] = statsmap(ut.flatten(merges['old']))
stats['new_merge'] = statsmap(merges['new'])
stats['old_hybrid'] = statsmap(hybrid['old'])
stats['new_hybrid'] = statsmap(hybrid['new'])
df = pd.DataFrame.from_dict(stats, orient='index')
df = df.loc[list(stats.keys())]
return df | python | def grouping_delta_stats(old, new):
"""
Returns statistics about grouping changes
Args:
old (set of frozenset): old grouping
new (set of frozenset): new grouping
Returns:
pd.DataFrame: df: data frame of size statistics
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> old = [
>>> [20, 21, 22, 23], [1, 2], [12], [13, 14], [3, 4], [5, 6,11],
>>> [7], [8, 9], [10], [31, 32], [33, 34, 35], [41, 42, 43, 44, 45]
>>> ]
>>> new = [
>>> [20, 21], [22, 23], [1, 2], [12, 13, 14], [4], [5, 6, 3], [7, 8],
>>> [9, 10, 11], [31, 32, 33, 34, 35], [41, 42, 43, 44], [45],
>>> ]
>>> df = ut.grouping_delta_stats(old, new)
>>> print(df)
"""
import pandas as pd
import utool as ut
group_delta = ut.grouping_delta(old, new)
stats = ut.odict()
unchanged = group_delta['unchanged']
splits = group_delta['splits']
merges = group_delta['merges']
hybrid = group_delta['hybrid']
statsmap = ut.partial(lambda x: ut.stats_dict(map(len, x), size=True))
stats['unchanged'] = statsmap(unchanged)
stats['old_split'] = statsmap(splits['old'])
stats['new_split'] = statsmap(ut.flatten(splits['new']))
stats['old_merge'] = statsmap(ut.flatten(merges['old']))
stats['new_merge'] = statsmap(merges['new'])
stats['old_hybrid'] = statsmap(hybrid['old'])
stats['new_hybrid'] = statsmap(hybrid['new'])
df = pd.DataFrame.from_dict(stats, orient='index')
df = df.loc[list(stats.keys())]
return df | [
"def",
"grouping_delta_stats",
"(",
"old",
",",
"new",
")",
":",
"import",
"pandas",
"as",
"pd",
"import",
"utool",
"as",
"ut",
"group_delta",
"=",
"ut",
".",
"grouping_delta",
"(",
"old",
",",
"new",
")",
"stats",
"=",
"ut",
".",
"odict",
"(",
")",
"unchanged",
"=",
"group_delta",
"[",
"'unchanged'",
"]",
"splits",
"=",
"group_delta",
"[",
"'splits'",
"]",
"merges",
"=",
"group_delta",
"[",
"'merges'",
"]",
"hybrid",
"=",
"group_delta",
"[",
"'hybrid'",
"]",
"statsmap",
"=",
"ut",
".",
"partial",
"(",
"lambda",
"x",
":",
"ut",
".",
"stats_dict",
"(",
"map",
"(",
"len",
",",
"x",
")",
",",
"size",
"=",
"True",
")",
")",
"stats",
"[",
"'unchanged'",
"]",
"=",
"statsmap",
"(",
"unchanged",
")",
"stats",
"[",
"'old_split'",
"]",
"=",
"statsmap",
"(",
"splits",
"[",
"'old'",
"]",
")",
"stats",
"[",
"'new_split'",
"]",
"=",
"statsmap",
"(",
"ut",
".",
"flatten",
"(",
"splits",
"[",
"'new'",
"]",
")",
")",
"stats",
"[",
"'old_merge'",
"]",
"=",
"statsmap",
"(",
"ut",
".",
"flatten",
"(",
"merges",
"[",
"'old'",
"]",
")",
")",
"stats",
"[",
"'new_merge'",
"]",
"=",
"statsmap",
"(",
"merges",
"[",
"'new'",
"]",
")",
"stats",
"[",
"'old_hybrid'",
"]",
"=",
"statsmap",
"(",
"hybrid",
"[",
"'old'",
"]",
")",
"stats",
"[",
"'new_hybrid'",
"]",
"=",
"statsmap",
"(",
"hybrid",
"[",
"'new'",
"]",
")",
"df",
"=",
"pd",
".",
"DataFrame",
".",
"from_dict",
"(",
"stats",
",",
"orient",
"=",
"'index'",
")",
"df",
"=",
"df",
".",
"loc",
"[",
"list",
"(",
"stats",
".",
"keys",
"(",
")",
")",
"]",
"return",
"df"
] | Returns statistics about grouping changes
Args:
old (set of frozenset): old grouping
new (set of frozenset): new grouping
Returns:
pd.DataFrame: df: data frame of size statistics
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> old = [
>>> [20, 21, 22, 23], [1, 2], [12], [13, 14], [3, 4], [5, 6,11],
>>> [7], [8, 9], [10], [31, 32], [33, 34, 35], [41, 42, 43, 44, 45]
>>> ]
>>> new = [
>>> [20, 21], [22, 23], [1, 2], [12, 13, 14], [4], [5, 6, 3], [7, 8],
>>> [9, 10, 11], [31, 32, 33, 34, 35], [41, 42, 43, 44], [45],
>>> ]
>>> df = ut.grouping_delta_stats(old, new)
>>> print(df) | [
"Returns",
"statistics",
"about",
"grouping",
"changes"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L440-L484 | train |
Erotemic/utool | utool/util_alg.py | upper_diag_self_prodx | def upper_diag_self_prodx(list_):
"""
upper diagnoal of cartesian product of self and self.
Weird name. fixme
Args:
list_ (list):
Returns:
list:
CommandLine:
python -m utool.util_alg --exec-upper_diag_self_prodx
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> list_ = [1, 2, 3]
>>> result = upper_diag_self_prodx(list_)
>>> print(result)
[(1, 2), (1, 3), (2, 3)]
"""
return [(item1, item2)
for n1, item1 in enumerate(list_)
for n2, item2 in enumerate(list_) if n1 < n2] | python | def upper_diag_self_prodx(list_):
"""
upper diagnoal of cartesian product of self and self.
Weird name. fixme
Args:
list_ (list):
Returns:
list:
CommandLine:
python -m utool.util_alg --exec-upper_diag_self_prodx
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> list_ = [1, 2, 3]
>>> result = upper_diag_self_prodx(list_)
>>> print(result)
[(1, 2), (1, 3), (2, 3)]
"""
return [(item1, item2)
for n1, item1 in enumerate(list_)
for n2, item2 in enumerate(list_) if n1 < n2] | [
"def",
"upper_diag_self_prodx",
"(",
"list_",
")",
":",
"return",
"[",
"(",
"item1",
",",
"item2",
")",
"for",
"n1",
",",
"item1",
"in",
"enumerate",
"(",
"list_",
")",
"for",
"n2",
",",
"item2",
"in",
"enumerate",
"(",
"list_",
")",
"if",
"n1",
"<",
"n2",
"]"
] | upper diagnoal of cartesian product of self and self.
Weird name. fixme
Args:
list_ (list):
Returns:
list:
CommandLine:
python -m utool.util_alg --exec-upper_diag_self_prodx
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> list_ = [1, 2, 3]
>>> result = upper_diag_self_prodx(list_)
>>> print(result)
[(1, 2), (1, 3), (2, 3)] | [
"upper",
"diagnoal",
"of",
"cartesian",
"product",
"of",
"self",
"and",
"self",
".",
"Weird",
"name",
".",
"fixme"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L487-L511 | train |
Erotemic/utool | utool/util_alg.py | colwise_diag_idxs | def colwise_diag_idxs(size, num=2):
r"""
dont trust this implementation or this function name
Args:
size (int):
Returns:
?: upper_diag_idxs
CommandLine:
python -m utool.util_alg --exec-colwise_diag_idxs --size=5 --num=2
python -m utool.util_alg --exec-colwise_diag_idxs --size=3 --num=3
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> size = ut.get_argval('--size', default=5)
>>> num = ut.get_argval('--num', default=2)
>>> mat = np.zeros([size] * num, dtype=np.int)
>>> upper_diag_idxs = colwise_diag_idxs(size, num)
>>> poses = np.array(upper_diag_idxs)
>>> idxs = np.ravel_multi_index(poses.T, mat.shape)
>>> print('poses.T =\n%s' % (ut.repr2(poses.T),))
>>> mat[tuple(poses.T)] = np.arange(1, len(poses) + 1)
>>> print(mat)
poses.T =
np.array([[0, 0, 1, 0, 1, 2, 0, 1, 2, 3],
[1, 2, 2, 3, 3, 3, 4, 4, 4, 4]])
"""
# diag_idxs = list(diagonalized_iter(size))
# upper_diag_idxs = [(r, c) for r, c in diag_idxs if r < c]
# # diag_idxs = list(diagonalized_iter(size))
import utool as ut
diag_idxs = ut.iprod(*[range(size) for _ in range(num)])
#diag_idxs = list(ut.iprod(range(size), range(size)))
# this is pretty much a simple c ordering
upper_diag_idxs = [
tup[::-1] for tup in diag_idxs
if all([a > b for a, b in ut.itertwo(tup)])
#if all([a > b for a, b in ut.itertwo(tup[:2])])
]
#upper_diag_idxs = [(c, r) for r, c in diag_idxs if r > c]
# # upper_diag_idxs = [(r, c) for r, c in diag_idxs if r > c]
return upper_diag_idxs | python | def colwise_diag_idxs(size, num=2):
r"""
dont trust this implementation or this function name
Args:
size (int):
Returns:
?: upper_diag_idxs
CommandLine:
python -m utool.util_alg --exec-colwise_diag_idxs --size=5 --num=2
python -m utool.util_alg --exec-colwise_diag_idxs --size=3 --num=3
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> size = ut.get_argval('--size', default=5)
>>> num = ut.get_argval('--num', default=2)
>>> mat = np.zeros([size] * num, dtype=np.int)
>>> upper_diag_idxs = colwise_diag_idxs(size, num)
>>> poses = np.array(upper_diag_idxs)
>>> idxs = np.ravel_multi_index(poses.T, mat.shape)
>>> print('poses.T =\n%s' % (ut.repr2(poses.T),))
>>> mat[tuple(poses.T)] = np.arange(1, len(poses) + 1)
>>> print(mat)
poses.T =
np.array([[0, 0, 1, 0, 1, 2, 0, 1, 2, 3],
[1, 2, 2, 3, 3, 3, 4, 4, 4, 4]])
"""
# diag_idxs = list(diagonalized_iter(size))
# upper_diag_idxs = [(r, c) for r, c in diag_idxs if r < c]
# # diag_idxs = list(diagonalized_iter(size))
import utool as ut
diag_idxs = ut.iprod(*[range(size) for _ in range(num)])
#diag_idxs = list(ut.iprod(range(size), range(size)))
# this is pretty much a simple c ordering
upper_diag_idxs = [
tup[::-1] for tup in diag_idxs
if all([a > b for a, b in ut.itertwo(tup)])
#if all([a > b for a, b in ut.itertwo(tup[:2])])
]
#upper_diag_idxs = [(c, r) for r, c in diag_idxs if r > c]
# # upper_diag_idxs = [(r, c) for r, c in diag_idxs if r > c]
return upper_diag_idxs | [
"def",
"colwise_diag_idxs",
"(",
"size",
",",
"num",
"=",
"2",
")",
":",
"# diag_idxs = list(diagonalized_iter(size))",
"# upper_diag_idxs = [(r, c) for r, c in diag_idxs if r < c]",
"# # diag_idxs = list(diagonalized_iter(size))",
"import",
"utool",
"as",
"ut",
"diag_idxs",
"=",
"ut",
".",
"iprod",
"(",
"*",
"[",
"range",
"(",
"size",
")",
"for",
"_",
"in",
"range",
"(",
"num",
")",
"]",
")",
"#diag_idxs = list(ut.iprod(range(size), range(size)))",
"# this is pretty much a simple c ordering",
"upper_diag_idxs",
"=",
"[",
"tup",
"[",
":",
":",
"-",
"1",
"]",
"for",
"tup",
"in",
"diag_idxs",
"if",
"all",
"(",
"[",
"a",
">",
"b",
"for",
"a",
",",
"b",
"in",
"ut",
".",
"itertwo",
"(",
"tup",
")",
"]",
")",
"#if all([a > b for a, b in ut.itertwo(tup[:2])])",
"]",
"#upper_diag_idxs = [(c, r) for r, c in diag_idxs if r > c]",
"# # upper_diag_idxs = [(r, c) for r, c in diag_idxs if r > c]",
"return",
"upper_diag_idxs"
] | r"""
dont trust this implementation or this function name
Args:
size (int):
Returns:
?: upper_diag_idxs
CommandLine:
python -m utool.util_alg --exec-colwise_diag_idxs --size=5 --num=2
python -m utool.util_alg --exec-colwise_diag_idxs --size=3 --num=3
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> size = ut.get_argval('--size', default=5)
>>> num = ut.get_argval('--num', default=2)
>>> mat = np.zeros([size] * num, dtype=np.int)
>>> upper_diag_idxs = colwise_diag_idxs(size, num)
>>> poses = np.array(upper_diag_idxs)
>>> idxs = np.ravel_multi_index(poses.T, mat.shape)
>>> print('poses.T =\n%s' % (ut.repr2(poses.T),))
>>> mat[tuple(poses.T)] = np.arange(1, len(poses) + 1)
>>> print(mat)
poses.T =
np.array([[0, 0, 1, 0, 1, 2, 0, 1, 2, 3],
[1, 2, 2, 3, 3, 3, 4, 4, 4, 4]]) | [
"r",
"dont",
"trust",
"this",
"implementation",
"or",
"this",
"function",
"name"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L547-L591 | train |
Erotemic/utool | utool/util_alg.py | product_nonsame | def product_nonsame(list1, list2):
""" product of list1 and list2 where items are non equal """
for item1, item2 in itertools.product(list1, list2):
if item1 != item2:
yield (item1, item2) | python | def product_nonsame(list1, list2):
""" product of list1 and list2 where items are non equal """
for item1, item2 in itertools.product(list1, list2):
if item1 != item2:
yield (item1, item2) | [
"def",
"product_nonsame",
"(",
"list1",
",",
"list2",
")",
":",
"for",
"item1",
",",
"item2",
"in",
"itertools",
".",
"product",
"(",
"list1",
",",
"list2",
")",
":",
"if",
"item1",
"!=",
"item2",
":",
"yield",
"(",
"item1",
",",
"item2",
")"
] | product of list1 and list2 where items are non equal | [
"product",
"of",
"list1",
"and",
"list2",
"where",
"items",
"are",
"non",
"equal"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L600-L604 | train |
Erotemic/utool | utool/util_alg.py | greedy_max_inden_setcover | def greedy_max_inden_setcover(candidate_sets_dict, items, max_covers=None):
"""
greedy algorithm for maximum independent set cover
Covers items with sets from candidate sets. Could be made faster.
CommandLine:
python -m utool.util_alg --test-greedy_max_inden_setcover
Example0:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> candidate_sets_dict = {'a': [5, 3], 'b': [2, 3, 5],
... 'c': [4, 8], 'd': [7, 6, 2, 1]}
>>> items = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> max_covers = None
>>> tup = greedy_max_inden_setcover(candidate_sets_dict, items, max_covers)
>>> (uncovered_items, covered_items_list, accepted_keys) = tup
>>> result = ut.repr4((uncovered_items, sorted(list(accepted_keys))), nl=False)
>>> print(result)
([0, 9], ['a', 'c', 'd'])
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> candidate_sets_dict = {'a': [5, 3], 'b': [2, 3, 5],
... 'c': [4, 8], 'd': [7, 6, 2, 1]}
>>> items = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> max_covers = 1
>>> tup = greedy_max_inden_setcover(candidate_sets_dict, items, max_covers)
>>> (uncovered_items, covered_items_list, accepted_keys) = tup
>>> result = ut.repr4((uncovered_items, sorted(list(accepted_keys))), nl=False)
>>> print(result)
([0, 3, 4, 5, 8, 9], ['d'])
"""
uncovered_set = set(items)
rejected_keys = set()
accepted_keys = set()
covered_items_list = []
while True:
# Break if we have enough covers
if max_covers is not None and len(covered_items_list) >= max_covers:
break
maxkey = None
maxlen = -1
# Loop over candidates to find the biggested unadded cover set
for key, candidate_items in six.iteritems(candidate_sets_dict):
if key in rejected_keys or key in accepted_keys:
continue
#print('Checking %r' % (key,))
lenval = len(candidate_items)
# len(uncovered_set.intersection(candidate_items)) == lenval:
if uncovered_set.issuperset(candidate_items):
if lenval > maxlen:
maxkey = key
maxlen = lenval
else:
rejected_keys.add(key)
# Add the set to the cover
if maxkey is None:
break
maxval = candidate_sets_dict[maxkey]
accepted_keys.add(maxkey)
covered_items_list.append(list(maxval))
# Add values in this key to the cover
uncovered_set.difference_update(maxval)
uncovered_items = list(uncovered_set)
covertup = uncovered_items, covered_items_list, accepted_keys
return covertup | python | def greedy_max_inden_setcover(candidate_sets_dict, items, max_covers=None):
"""
greedy algorithm for maximum independent set cover
Covers items with sets from candidate sets. Could be made faster.
CommandLine:
python -m utool.util_alg --test-greedy_max_inden_setcover
Example0:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> candidate_sets_dict = {'a': [5, 3], 'b': [2, 3, 5],
... 'c': [4, 8], 'd': [7, 6, 2, 1]}
>>> items = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> max_covers = None
>>> tup = greedy_max_inden_setcover(candidate_sets_dict, items, max_covers)
>>> (uncovered_items, covered_items_list, accepted_keys) = tup
>>> result = ut.repr4((uncovered_items, sorted(list(accepted_keys))), nl=False)
>>> print(result)
([0, 9], ['a', 'c', 'd'])
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> candidate_sets_dict = {'a': [5, 3], 'b': [2, 3, 5],
... 'c': [4, 8], 'd': [7, 6, 2, 1]}
>>> items = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> max_covers = 1
>>> tup = greedy_max_inden_setcover(candidate_sets_dict, items, max_covers)
>>> (uncovered_items, covered_items_list, accepted_keys) = tup
>>> result = ut.repr4((uncovered_items, sorted(list(accepted_keys))), nl=False)
>>> print(result)
([0, 3, 4, 5, 8, 9], ['d'])
"""
uncovered_set = set(items)
rejected_keys = set()
accepted_keys = set()
covered_items_list = []
while True:
# Break if we have enough covers
if max_covers is not None and len(covered_items_list) >= max_covers:
break
maxkey = None
maxlen = -1
# Loop over candidates to find the biggested unadded cover set
for key, candidate_items in six.iteritems(candidate_sets_dict):
if key in rejected_keys or key in accepted_keys:
continue
#print('Checking %r' % (key,))
lenval = len(candidate_items)
# len(uncovered_set.intersection(candidate_items)) == lenval:
if uncovered_set.issuperset(candidate_items):
if lenval > maxlen:
maxkey = key
maxlen = lenval
else:
rejected_keys.add(key)
# Add the set to the cover
if maxkey is None:
break
maxval = candidate_sets_dict[maxkey]
accepted_keys.add(maxkey)
covered_items_list.append(list(maxval))
# Add values in this key to the cover
uncovered_set.difference_update(maxval)
uncovered_items = list(uncovered_set)
covertup = uncovered_items, covered_items_list, accepted_keys
return covertup | [
"def",
"greedy_max_inden_setcover",
"(",
"candidate_sets_dict",
",",
"items",
",",
"max_covers",
"=",
"None",
")",
":",
"uncovered_set",
"=",
"set",
"(",
"items",
")",
"rejected_keys",
"=",
"set",
"(",
")",
"accepted_keys",
"=",
"set",
"(",
")",
"covered_items_list",
"=",
"[",
"]",
"while",
"True",
":",
"# Break if we have enough covers",
"if",
"max_covers",
"is",
"not",
"None",
"and",
"len",
"(",
"covered_items_list",
")",
">=",
"max_covers",
":",
"break",
"maxkey",
"=",
"None",
"maxlen",
"=",
"-",
"1",
"# Loop over candidates to find the biggested unadded cover set",
"for",
"key",
",",
"candidate_items",
"in",
"six",
".",
"iteritems",
"(",
"candidate_sets_dict",
")",
":",
"if",
"key",
"in",
"rejected_keys",
"or",
"key",
"in",
"accepted_keys",
":",
"continue",
"#print('Checking %r' % (key,))",
"lenval",
"=",
"len",
"(",
"candidate_items",
")",
"# len(uncovered_set.intersection(candidate_items)) == lenval:",
"if",
"uncovered_set",
".",
"issuperset",
"(",
"candidate_items",
")",
":",
"if",
"lenval",
">",
"maxlen",
":",
"maxkey",
"=",
"key",
"maxlen",
"=",
"lenval",
"else",
":",
"rejected_keys",
".",
"add",
"(",
"key",
")",
"# Add the set to the cover",
"if",
"maxkey",
"is",
"None",
":",
"break",
"maxval",
"=",
"candidate_sets_dict",
"[",
"maxkey",
"]",
"accepted_keys",
".",
"add",
"(",
"maxkey",
")",
"covered_items_list",
".",
"append",
"(",
"list",
"(",
"maxval",
")",
")",
"# Add values in this key to the cover",
"uncovered_set",
".",
"difference_update",
"(",
"maxval",
")",
"uncovered_items",
"=",
"list",
"(",
"uncovered_set",
")",
"covertup",
"=",
"uncovered_items",
",",
"covered_items_list",
",",
"accepted_keys",
"return",
"covertup"
] | greedy algorithm for maximum independent set cover
Covers items with sets from candidate sets. Could be made faster.
CommandLine:
python -m utool.util_alg --test-greedy_max_inden_setcover
Example0:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> candidate_sets_dict = {'a': [5, 3], 'b': [2, 3, 5],
... 'c': [4, 8], 'd': [7, 6, 2, 1]}
>>> items = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> max_covers = None
>>> tup = greedy_max_inden_setcover(candidate_sets_dict, items, max_covers)
>>> (uncovered_items, covered_items_list, accepted_keys) = tup
>>> result = ut.repr4((uncovered_items, sorted(list(accepted_keys))), nl=False)
>>> print(result)
([0, 9], ['a', 'c', 'd'])
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> candidate_sets_dict = {'a': [5, 3], 'b': [2, 3, 5],
... 'c': [4, 8], 'd': [7, 6, 2, 1]}
>>> items = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> max_covers = 1
>>> tup = greedy_max_inden_setcover(candidate_sets_dict, items, max_covers)
>>> (uncovered_items, covered_items_list, accepted_keys) = tup
>>> result = ut.repr4((uncovered_items, sorted(list(accepted_keys))), nl=False)
>>> print(result)
([0, 3, 4, 5, 8, 9], ['d']) | [
"greedy",
"algorithm",
"for",
"maximum",
"independent",
"set",
"cover"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L611-L681 | train |
Erotemic/utool | utool/util_alg.py | setcover_greedy | def setcover_greedy(candidate_sets_dict, items=None, set_weights=None, item_values=None, max_weight=None):
r"""
Greedy algorithm for various covering problems.
approximation gaurentees depending on specifications like set_weights and item values
Set Cover: log(len(items) + 1) approximation algorithm
Weighted Maximum Cover: 1 - 1/e == .632 approximation algorithm
Generalized maximum coverage is not implemented
References:
https://en.wikipedia.org/wiki/Maximum_coverage_problem
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> candidate_sets_dict = {
>>> 'a': [1, 2, 3, 8, 9, 0],
>>> 'b': [1, 2, 3, 4, 5],
>>> 'c': [4, 5, 7],
>>> 'd': [5, 6, 7],
>>> 'e': [6, 7, 8, 9, 0],
>>> }
>>> max_weight = None
>>> items = None
>>> set_weights = None
>>> item_values = None
>>> greedy_soln = ut.sort_dict(ut.setcover_greedy(candidate_sets_dict))
>>> exact_soln = ut.sort_dict(ut.setcover_ilp(candidate_sets_dict))
>>> print('greedy_soln = %r' % (greedy_soln,))
>>> print('exact_soln = %r' % (exact_soln,))
"""
import utool as ut
solution_cover = {}
# If candset_weights or item_values not given use the length as defaults
if items is None:
items = ut.flatten(candidate_sets_dict.values())
if set_weights is None:
get_weight = len
else:
def get_weight(solution_cover):
sum([set_weights[key] for key in solution_cover.keys()])
if item_values is None:
get_value = len
else:
def get_value(vals):
sum([item_values[v] for v in vals])
if max_weight is None:
max_weight = get_weight(candidate_sets_dict)
avail_covers = {key: set(val) for key, val in candidate_sets_dict.items()}
# While we still need covers
while get_weight(solution_cover) < max_weight and len(avail_covers) > 0:
# Find candiate set with the most uncovered items
avail_covers.values()
uncovered_values = list(map(get_value, avail_covers.values()))
chosen_idx = ut.argmax(uncovered_values)
if uncovered_values[chosen_idx] <= 0:
# needlessly adding value-less items
break
chosen_key = list(avail_covers.keys())[chosen_idx]
# Add values in this key to the cover
chosen_set = avail_covers[chosen_key]
solution_cover[chosen_key] = candidate_sets_dict[chosen_key]
# Remove chosen set from available options and covered items
# from remaining available sets
del avail_covers[chosen_key]
for vals in avail_covers.values():
vals.difference_update(chosen_set)
return solution_cover | python | def setcover_greedy(candidate_sets_dict, items=None, set_weights=None, item_values=None, max_weight=None):
r"""
Greedy algorithm for various covering problems.
approximation gaurentees depending on specifications like set_weights and item values
Set Cover: log(len(items) + 1) approximation algorithm
Weighted Maximum Cover: 1 - 1/e == .632 approximation algorithm
Generalized maximum coverage is not implemented
References:
https://en.wikipedia.org/wiki/Maximum_coverage_problem
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> candidate_sets_dict = {
>>> 'a': [1, 2, 3, 8, 9, 0],
>>> 'b': [1, 2, 3, 4, 5],
>>> 'c': [4, 5, 7],
>>> 'd': [5, 6, 7],
>>> 'e': [6, 7, 8, 9, 0],
>>> }
>>> max_weight = None
>>> items = None
>>> set_weights = None
>>> item_values = None
>>> greedy_soln = ut.sort_dict(ut.setcover_greedy(candidate_sets_dict))
>>> exact_soln = ut.sort_dict(ut.setcover_ilp(candidate_sets_dict))
>>> print('greedy_soln = %r' % (greedy_soln,))
>>> print('exact_soln = %r' % (exact_soln,))
"""
import utool as ut
solution_cover = {}
# If candset_weights or item_values not given use the length as defaults
if items is None:
items = ut.flatten(candidate_sets_dict.values())
if set_weights is None:
get_weight = len
else:
def get_weight(solution_cover):
sum([set_weights[key] for key in solution_cover.keys()])
if item_values is None:
get_value = len
else:
def get_value(vals):
sum([item_values[v] for v in vals])
if max_weight is None:
max_weight = get_weight(candidate_sets_dict)
avail_covers = {key: set(val) for key, val in candidate_sets_dict.items()}
# While we still need covers
while get_weight(solution_cover) < max_weight and len(avail_covers) > 0:
# Find candiate set with the most uncovered items
avail_covers.values()
uncovered_values = list(map(get_value, avail_covers.values()))
chosen_idx = ut.argmax(uncovered_values)
if uncovered_values[chosen_idx] <= 0:
# needlessly adding value-less items
break
chosen_key = list(avail_covers.keys())[chosen_idx]
# Add values in this key to the cover
chosen_set = avail_covers[chosen_key]
solution_cover[chosen_key] = candidate_sets_dict[chosen_key]
# Remove chosen set from available options and covered items
# from remaining available sets
del avail_covers[chosen_key]
for vals in avail_covers.values():
vals.difference_update(chosen_set)
return solution_cover | [
"def",
"setcover_greedy",
"(",
"candidate_sets_dict",
",",
"items",
"=",
"None",
",",
"set_weights",
"=",
"None",
",",
"item_values",
"=",
"None",
",",
"max_weight",
"=",
"None",
")",
":",
"import",
"utool",
"as",
"ut",
"solution_cover",
"=",
"{",
"}",
"# If candset_weights or item_values not given use the length as defaults",
"if",
"items",
"is",
"None",
":",
"items",
"=",
"ut",
".",
"flatten",
"(",
"candidate_sets_dict",
".",
"values",
"(",
")",
")",
"if",
"set_weights",
"is",
"None",
":",
"get_weight",
"=",
"len",
"else",
":",
"def",
"get_weight",
"(",
"solution_cover",
")",
":",
"sum",
"(",
"[",
"set_weights",
"[",
"key",
"]",
"for",
"key",
"in",
"solution_cover",
".",
"keys",
"(",
")",
"]",
")",
"if",
"item_values",
"is",
"None",
":",
"get_value",
"=",
"len",
"else",
":",
"def",
"get_value",
"(",
"vals",
")",
":",
"sum",
"(",
"[",
"item_values",
"[",
"v",
"]",
"for",
"v",
"in",
"vals",
"]",
")",
"if",
"max_weight",
"is",
"None",
":",
"max_weight",
"=",
"get_weight",
"(",
"candidate_sets_dict",
")",
"avail_covers",
"=",
"{",
"key",
":",
"set",
"(",
"val",
")",
"for",
"key",
",",
"val",
"in",
"candidate_sets_dict",
".",
"items",
"(",
")",
"}",
"# While we still need covers",
"while",
"get_weight",
"(",
"solution_cover",
")",
"<",
"max_weight",
"and",
"len",
"(",
"avail_covers",
")",
">",
"0",
":",
"# Find candiate set with the most uncovered items",
"avail_covers",
".",
"values",
"(",
")",
"uncovered_values",
"=",
"list",
"(",
"map",
"(",
"get_value",
",",
"avail_covers",
".",
"values",
"(",
")",
")",
")",
"chosen_idx",
"=",
"ut",
".",
"argmax",
"(",
"uncovered_values",
")",
"if",
"uncovered_values",
"[",
"chosen_idx",
"]",
"<=",
"0",
":",
"# needlessly adding value-less items",
"break",
"chosen_key",
"=",
"list",
"(",
"avail_covers",
".",
"keys",
"(",
")",
")",
"[",
"chosen_idx",
"]",
"# Add values in this key to the cover",
"chosen_set",
"=",
"avail_covers",
"[",
"chosen_key",
"]",
"solution_cover",
"[",
"chosen_key",
"]",
"=",
"candidate_sets_dict",
"[",
"chosen_key",
"]",
"# Remove chosen set from available options and covered items",
"# from remaining available sets",
"del",
"avail_covers",
"[",
"chosen_key",
"]",
"for",
"vals",
"in",
"avail_covers",
".",
"values",
"(",
")",
":",
"vals",
".",
"difference_update",
"(",
"chosen_set",
")",
"return",
"solution_cover"
] | r"""
Greedy algorithm for various covering problems.
approximation gaurentees depending on specifications like set_weights and item values
Set Cover: log(len(items) + 1) approximation algorithm
Weighted Maximum Cover: 1 - 1/e == .632 approximation algorithm
Generalized maximum coverage is not implemented
References:
https://en.wikipedia.org/wiki/Maximum_coverage_problem
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> candidate_sets_dict = {
>>> 'a': [1, 2, 3, 8, 9, 0],
>>> 'b': [1, 2, 3, 4, 5],
>>> 'c': [4, 5, 7],
>>> 'd': [5, 6, 7],
>>> 'e': [6, 7, 8, 9, 0],
>>> }
>>> max_weight = None
>>> items = None
>>> set_weights = None
>>> item_values = None
>>> greedy_soln = ut.sort_dict(ut.setcover_greedy(candidate_sets_dict))
>>> exact_soln = ut.sort_dict(ut.setcover_ilp(candidate_sets_dict))
>>> print('greedy_soln = %r' % (greedy_soln,))
>>> print('exact_soln = %r' % (exact_soln,)) | [
"r",
"Greedy",
"algorithm",
"for",
"various",
"covering",
"problems",
".",
"approximation",
"gaurentees",
"depending",
"on",
"specifications",
"like",
"set_weights",
"and",
"item",
"values"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L684-L752 | train |
Erotemic/utool | utool/util_alg.py | item_hist | def item_hist(list_):
""" counts the number of times each item appears in the dictionary """
dict_hist = {}
# Insert each item into the correct group
for item in list_:
if item not in dict_hist:
dict_hist[item] = 0
dict_hist[item] += 1
return dict_hist | python | def item_hist(list_):
""" counts the number of times each item appears in the dictionary """
dict_hist = {}
# Insert each item into the correct group
for item in list_:
if item not in dict_hist:
dict_hist[item] = 0
dict_hist[item] += 1
return dict_hist | [
"def",
"item_hist",
"(",
"list_",
")",
":",
"dict_hist",
"=",
"{",
"}",
"# Insert each item into the correct group",
"for",
"item",
"in",
"list_",
":",
"if",
"item",
"not",
"in",
"dict_hist",
":",
"dict_hist",
"[",
"item",
"]",
"=",
"0",
"dict_hist",
"[",
"item",
"]",
"+=",
"1",
"return",
"dict_hist"
] | counts the number of times each item appears in the dictionary | [
"counts",
"the",
"number",
"of",
"times",
"each",
"item",
"appears",
"in",
"the",
"dictionary"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L901-L909 | train |
Erotemic/utool | utool/util_alg.py | get_nth_prime | def get_nth_prime(n, max_prime=4100, safe=True):
""" hacky but still brute force algorithm for finding nth prime for small tests """
if n <= 100:
first_100_primes = (
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61,
67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137,
139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199,
211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277,
281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359,
367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439,
443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521,
523, 541, )
#print(len(first_100_primes))
nth_prime = first_100_primes[n - 1]
else:
if safe:
primes = [num for num in range(2, max_prime) if is_prime(num)]
nth_prime = primes[n]
else:
# This can run for a while... get it? while?
nth_prime = get_nth_prime_bruteforce(n)
return nth_prime | python | def get_nth_prime(n, max_prime=4100, safe=True):
""" hacky but still brute force algorithm for finding nth prime for small tests """
if n <= 100:
first_100_primes = (
2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61,
67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137,
139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199,
211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277,
281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359,
367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439,
443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521,
523, 541, )
#print(len(first_100_primes))
nth_prime = first_100_primes[n - 1]
else:
if safe:
primes = [num for num in range(2, max_prime) if is_prime(num)]
nth_prime = primes[n]
else:
# This can run for a while... get it? while?
nth_prime = get_nth_prime_bruteforce(n)
return nth_prime | [
"def",
"get_nth_prime",
"(",
"n",
",",
"max_prime",
"=",
"4100",
",",
"safe",
"=",
"True",
")",
":",
"if",
"n",
"<=",
"100",
":",
"first_100_primes",
"=",
"(",
"2",
",",
"3",
",",
"5",
",",
"7",
",",
"11",
",",
"13",
",",
"17",
",",
"19",
",",
"23",
",",
"29",
",",
"31",
",",
"37",
",",
"41",
",",
"43",
",",
"47",
",",
"53",
",",
"59",
",",
"61",
",",
"67",
",",
"71",
",",
"73",
",",
"79",
",",
"83",
",",
"89",
",",
"97",
",",
"101",
",",
"103",
",",
"107",
",",
"109",
",",
"113",
",",
"127",
",",
"131",
",",
"137",
",",
"139",
",",
"149",
",",
"151",
",",
"157",
",",
"163",
",",
"167",
",",
"173",
",",
"179",
",",
"181",
",",
"191",
",",
"193",
",",
"197",
",",
"199",
",",
"211",
",",
"223",
",",
"227",
",",
"229",
",",
"233",
",",
"239",
",",
"241",
",",
"251",
",",
"257",
",",
"263",
",",
"269",
",",
"271",
",",
"277",
",",
"281",
",",
"283",
",",
"293",
",",
"307",
",",
"311",
",",
"313",
",",
"317",
",",
"331",
",",
"337",
",",
"347",
",",
"349",
",",
"353",
",",
"359",
",",
"367",
",",
"373",
",",
"379",
",",
"383",
",",
"389",
",",
"397",
",",
"401",
",",
"409",
",",
"419",
",",
"421",
",",
"431",
",",
"433",
",",
"439",
",",
"443",
",",
"449",
",",
"457",
",",
"461",
",",
"463",
",",
"467",
",",
"479",
",",
"487",
",",
"491",
",",
"499",
",",
"503",
",",
"509",
",",
"521",
",",
"523",
",",
"541",
",",
")",
"#print(len(first_100_primes))",
"nth_prime",
"=",
"first_100_primes",
"[",
"n",
"-",
"1",
"]",
"else",
":",
"if",
"safe",
":",
"primes",
"=",
"[",
"num",
"for",
"num",
"in",
"range",
"(",
"2",
",",
"max_prime",
")",
"if",
"is_prime",
"(",
"num",
")",
"]",
"nth_prime",
"=",
"primes",
"[",
"n",
"]",
"else",
":",
"# This can run for a while... get it? while?",
"nth_prime",
"=",
"get_nth_prime_bruteforce",
"(",
"n",
")",
"return",
"nth_prime"
] | hacky but still brute force algorithm for finding nth prime for small tests | [
"hacky",
"but",
"still",
"brute",
"force",
"algorithm",
"for",
"finding",
"nth",
"prime",
"for",
"small",
"tests"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L1049-L1070 | train |
Erotemic/utool | utool/util_alg.py | knapsack | def knapsack(items, maxweight, method='recursive'):
r"""
Solve the knapsack problem by finding the most valuable subsequence of
`items` subject that weighs no more than `maxweight`.
Args:
items (tuple): is a sequence of tuples `(value, weight, id_)`, where
`value` is a number and `weight` is a non-negative integer, and
`id_` is an item identifier.
maxweight (scalar): is a non-negative integer.
Returns:
tuple: (total_value, items_subset) - a pair whose first element is the
sum of values in the most valuable subsequence, and whose second
element is the subsequence. Subset may be different depending on
implementation (ie top-odwn recusrive vs bottom-up iterative)
References:
http://codereview.stackexchange.com/questions/20569/dynamic-programming-solution-to-knapsack-problem
http://stackoverflow.com/questions/141779/solving-the-np-complete-problem-in-xkcd
http://www.es.ele.tue.nl/education/5MC10/Solutions/knapsack.pdf
CommandLine:
python -m utool.util_alg --test-knapsack
python -m utool.util_alg --test-knapsack:0
python -m utool.util_alg --exec-knapsack:1
Ignore:
annots_per_view = 2
maxweight = 2
items = [
(0.7005208343554686, 0.7005208343554686, 0),
(0.669270834329427, 0.669270834329427, 1),
(0.669270834329427, 0.669270834329427, 2),
(0.7005208343554686, 0.7005208343554686, 3),
(0.7005208343554686, 0.7005208343554686, 4),
(0.669270834329427, 0.669270834329427, 5),
(0.669270834329427, 0.669270834329427, 6),
(0.669270834329427, 0.669270834329427, 7),
(0.669270834329427, 0.669270834329427, 8),
(0.669270834329427, 0.669270834329427, 9),
(0.669270834329427, 0.669270834329427, 10),
(0.669270834329427, 0.669270834329427, 11),
(0.669270834329427, 0.669270834329427, 12),
(0.669270834329427, 0.669270834329427, 13),
(0.669270834329427, 0.669270834329427, 14),
(0.669270834329427, 0.669270834329427, 15),
(0.669270834329427, 0.669270834329427, 16),
(0.669270834329427, 0.669270834329427, 17),
(0.7005208343554686, 0.7005208343554686, 18),
(0.7005208343554686, 0.7005208343554686, 19),
(0.669270834329427, 0.669270834329427, 20),
(0.7005208343554686, 0.7005208343554686, 21),
(0.669270834329427, 0.669270834329427, 22),
(0.669270834329427, 0.669270834329427, 23),
(0.669270834329427, 0.669270834329427, 24),
(0.669270834329427, 0.669270834329427, 25),
(0.669270834329427, 0.669270834329427, 26),
(0.669270834329427, 0.669270834329427, 27),
(0.669270834329427, 0.669270834329427, 28),
(0.7005208343554686, 0.7005208343554686, 29),
(0.669270834329427, 0.669270834329427, 30),
(0.669270834329427, 0.669270834329427, 31),
(0.669270834329427, 0.669270834329427, 32),
(0.669270834329427, 0.669270834329427, 33),
(0.7005208343554686, 0.7005208343554686, 34),
(0.669270834329427, 0.669270834329427, 35),
(0.669270834329427, 0.669270834329427, 36),
(0.669270834329427, 0.669270834329427, 37),
(0.7005208343554686, 0.7005208343554686, 38),
(0.669270834329427, 0.669270834329427, 39),
(0.669270834329427, 0.669270834329427, 40),
(0.7005208343554686, 0.7005208343554686, 41),
(0.669270834329427, 0.669270834329427, 42),
(0.669270834329427, 0.669270834329427, 43),
(0.669270834329427, 0.669270834329427, 44),
]
values = ut.take_column(items, 0)
weights = ut.take_column(items, 1)
indices = ut.take_column(items, 2)
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> items = [(4, 12, 0), (2, 1, 1), (6, 4, 2), (1, 1, 3), (2, 2, 4)]
>>> maxweight = 15
>>> total_value, items_subset = knapsack(items, maxweight, method='recursive')
>>> total_value1, items_subset1 = knapsack(items, maxweight, method='iterative')
>>> result = 'total_value = %.2f\n' % (total_value,)
>>> result += 'items_subset = %r' % (items_subset,)
>>> ut.assert_eq(total_value1, total_value)
>>> ut.assert_eq(items_subset1, items_subset)
>>> print(result)
total_value = 11.00
items_subset = [(2, 1, 1), (6, 4, 2), (1, 1, 3), (2, 2, 4)]
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> # Solve https://xkcd.com/287/
>>> weights = [2.15, 2.75, 3.35, 3.55, 4.2, 5.8] * 2
>>> items = [(w, w, i) for i, w in enumerate(weights)]
>>> maxweight = 15.05
>>> total_value, items_subset = knapsack(items, maxweight, method='recursive')
>>> total_value1, items_subset1 = knapsack(items, maxweight, method='iterative')
>>> total_weight = sum([t[1] for t in items_subset])
>>> print('total_weight = %r' % (total_weight,))
>>> result = 'total_value = %.2f' % (total_value,)
>>> print('items_subset = %r' % (items_subset,))
>>> print('items_subset1 = %r' % (items_subset1,))
>>> #assert items_subset1 == items_subset, 'NOT EQ\n%r !=\n%r' % (items_subset1, items_subset)
>>> print(result)
total_value = 15.05
Timeit:
>>> import utool as ut
>>> setup = ut.codeblock(
>>> '''
import utool as ut
weights = [215, 275, 335, 355, 42, 58] * 40
items = [(w, w, i) for i, w in enumerate(weights)]
maxweight = 2505
#import numba
#knapsack_numba = numba.autojit(ut.knapsack_iterative)
#knapsack_numba = numba.autojit(ut.knapsack_iterative_numpy)
''')
>>> # Test load time
>>> stmt_list1 = ut.codeblock(
>>> '''
#ut.knapsack_recursive(items, maxweight)
ut.knapsack_iterative(items, maxweight)
ut.knapsack_ilp(items, maxweight)
#knapsack_numba(items, maxweight)
#ut.knapsack_iterative_numpy(items, maxweight)
''').split('\n')
>>> ut.util_dev.timeit_compare(stmt_list1, setup, int(5))
"""
if method == 'recursive':
return knapsack_recursive(items, maxweight)
elif method == 'iterative':
return knapsack_iterative(items, maxweight)
elif method == 'ilp':
return knapsack_ilp(items, maxweight)
else:
raise NotImplementedError('[util_alg] knapsack method=%r' % (method,)) | python | def knapsack(items, maxweight, method='recursive'):
r"""
Solve the knapsack problem by finding the most valuable subsequence of
`items` subject that weighs no more than `maxweight`.
Args:
items (tuple): is a sequence of tuples `(value, weight, id_)`, where
`value` is a number and `weight` is a non-negative integer, and
`id_` is an item identifier.
maxweight (scalar): is a non-negative integer.
Returns:
tuple: (total_value, items_subset) - a pair whose first element is the
sum of values in the most valuable subsequence, and whose second
element is the subsequence. Subset may be different depending on
implementation (ie top-odwn recusrive vs bottom-up iterative)
References:
http://codereview.stackexchange.com/questions/20569/dynamic-programming-solution-to-knapsack-problem
http://stackoverflow.com/questions/141779/solving-the-np-complete-problem-in-xkcd
http://www.es.ele.tue.nl/education/5MC10/Solutions/knapsack.pdf
CommandLine:
python -m utool.util_alg --test-knapsack
python -m utool.util_alg --test-knapsack:0
python -m utool.util_alg --exec-knapsack:1
Ignore:
annots_per_view = 2
maxweight = 2
items = [
(0.7005208343554686, 0.7005208343554686, 0),
(0.669270834329427, 0.669270834329427, 1),
(0.669270834329427, 0.669270834329427, 2),
(0.7005208343554686, 0.7005208343554686, 3),
(0.7005208343554686, 0.7005208343554686, 4),
(0.669270834329427, 0.669270834329427, 5),
(0.669270834329427, 0.669270834329427, 6),
(0.669270834329427, 0.669270834329427, 7),
(0.669270834329427, 0.669270834329427, 8),
(0.669270834329427, 0.669270834329427, 9),
(0.669270834329427, 0.669270834329427, 10),
(0.669270834329427, 0.669270834329427, 11),
(0.669270834329427, 0.669270834329427, 12),
(0.669270834329427, 0.669270834329427, 13),
(0.669270834329427, 0.669270834329427, 14),
(0.669270834329427, 0.669270834329427, 15),
(0.669270834329427, 0.669270834329427, 16),
(0.669270834329427, 0.669270834329427, 17),
(0.7005208343554686, 0.7005208343554686, 18),
(0.7005208343554686, 0.7005208343554686, 19),
(0.669270834329427, 0.669270834329427, 20),
(0.7005208343554686, 0.7005208343554686, 21),
(0.669270834329427, 0.669270834329427, 22),
(0.669270834329427, 0.669270834329427, 23),
(0.669270834329427, 0.669270834329427, 24),
(0.669270834329427, 0.669270834329427, 25),
(0.669270834329427, 0.669270834329427, 26),
(0.669270834329427, 0.669270834329427, 27),
(0.669270834329427, 0.669270834329427, 28),
(0.7005208343554686, 0.7005208343554686, 29),
(0.669270834329427, 0.669270834329427, 30),
(0.669270834329427, 0.669270834329427, 31),
(0.669270834329427, 0.669270834329427, 32),
(0.669270834329427, 0.669270834329427, 33),
(0.7005208343554686, 0.7005208343554686, 34),
(0.669270834329427, 0.669270834329427, 35),
(0.669270834329427, 0.669270834329427, 36),
(0.669270834329427, 0.669270834329427, 37),
(0.7005208343554686, 0.7005208343554686, 38),
(0.669270834329427, 0.669270834329427, 39),
(0.669270834329427, 0.669270834329427, 40),
(0.7005208343554686, 0.7005208343554686, 41),
(0.669270834329427, 0.669270834329427, 42),
(0.669270834329427, 0.669270834329427, 43),
(0.669270834329427, 0.669270834329427, 44),
]
values = ut.take_column(items, 0)
weights = ut.take_column(items, 1)
indices = ut.take_column(items, 2)
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> items = [(4, 12, 0), (2, 1, 1), (6, 4, 2), (1, 1, 3), (2, 2, 4)]
>>> maxweight = 15
>>> total_value, items_subset = knapsack(items, maxweight, method='recursive')
>>> total_value1, items_subset1 = knapsack(items, maxweight, method='iterative')
>>> result = 'total_value = %.2f\n' % (total_value,)
>>> result += 'items_subset = %r' % (items_subset,)
>>> ut.assert_eq(total_value1, total_value)
>>> ut.assert_eq(items_subset1, items_subset)
>>> print(result)
total_value = 11.00
items_subset = [(2, 1, 1), (6, 4, 2), (1, 1, 3), (2, 2, 4)]
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> # Solve https://xkcd.com/287/
>>> weights = [2.15, 2.75, 3.35, 3.55, 4.2, 5.8] * 2
>>> items = [(w, w, i) for i, w in enumerate(weights)]
>>> maxweight = 15.05
>>> total_value, items_subset = knapsack(items, maxweight, method='recursive')
>>> total_value1, items_subset1 = knapsack(items, maxweight, method='iterative')
>>> total_weight = sum([t[1] for t in items_subset])
>>> print('total_weight = %r' % (total_weight,))
>>> result = 'total_value = %.2f' % (total_value,)
>>> print('items_subset = %r' % (items_subset,))
>>> print('items_subset1 = %r' % (items_subset1,))
>>> #assert items_subset1 == items_subset, 'NOT EQ\n%r !=\n%r' % (items_subset1, items_subset)
>>> print(result)
total_value = 15.05
Timeit:
>>> import utool as ut
>>> setup = ut.codeblock(
>>> '''
import utool as ut
weights = [215, 275, 335, 355, 42, 58] * 40
items = [(w, w, i) for i, w in enumerate(weights)]
maxweight = 2505
#import numba
#knapsack_numba = numba.autojit(ut.knapsack_iterative)
#knapsack_numba = numba.autojit(ut.knapsack_iterative_numpy)
''')
>>> # Test load time
>>> stmt_list1 = ut.codeblock(
>>> '''
#ut.knapsack_recursive(items, maxweight)
ut.knapsack_iterative(items, maxweight)
ut.knapsack_ilp(items, maxweight)
#knapsack_numba(items, maxweight)
#ut.knapsack_iterative_numpy(items, maxweight)
''').split('\n')
>>> ut.util_dev.timeit_compare(stmt_list1, setup, int(5))
"""
if method == 'recursive':
return knapsack_recursive(items, maxweight)
elif method == 'iterative':
return knapsack_iterative(items, maxweight)
elif method == 'ilp':
return knapsack_ilp(items, maxweight)
else:
raise NotImplementedError('[util_alg] knapsack method=%r' % (method,)) | [
"def",
"knapsack",
"(",
"items",
",",
"maxweight",
",",
"method",
"=",
"'recursive'",
")",
":",
"if",
"method",
"==",
"'recursive'",
":",
"return",
"knapsack_recursive",
"(",
"items",
",",
"maxweight",
")",
"elif",
"method",
"==",
"'iterative'",
":",
"return",
"knapsack_iterative",
"(",
"items",
",",
"maxweight",
")",
"elif",
"method",
"==",
"'ilp'",
":",
"return",
"knapsack_ilp",
"(",
"items",
",",
"maxweight",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'[util_alg] knapsack method=%r'",
"%",
"(",
"method",
",",
")",
")"
] | r"""
Solve the knapsack problem by finding the most valuable subsequence of
`items` subject that weighs no more than `maxweight`.
Args:
items (tuple): is a sequence of tuples `(value, weight, id_)`, where
`value` is a number and `weight` is a non-negative integer, and
`id_` is an item identifier.
maxweight (scalar): is a non-negative integer.
Returns:
tuple: (total_value, items_subset) - a pair whose first element is the
sum of values in the most valuable subsequence, and whose second
element is the subsequence. Subset may be different depending on
implementation (ie top-odwn recusrive vs bottom-up iterative)
References:
http://codereview.stackexchange.com/questions/20569/dynamic-programming-solution-to-knapsack-problem
http://stackoverflow.com/questions/141779/solving-the-np-complete-problem-in-xkcd
http://www.es.ele.tue.nl/education/5MC10/Solutions/knapsack.pdf
CommandLine:
python -m utool.util_alg --test-knapsack
python -m utool.util_alg --test-knapsack:0
python -m utool.util_alg --exec-knapsack:1
Ignore:
annots_per_view = 2
maxweight = 2
items = [
(0.7005208343554686, 0.7005208343554686, 0),
(0.669270834329427, 0.669270834329427, 1),
(0.669270834329427, 0.669270834329427, 2),
(0.7005208343554686, 0.7005208343554686, 3),
(0.7005208343554686, 0.7005208343554686, 4),
(0.669270834329427, 0.669270834329427, 5),
(0.669270834329427, 0.669270834329427, 6),
(0.669270834329427, 0.669270834329427, 7),
(0.669270834329427, 0.669270834329427, 8),
(0.669270834329427, 0.669270834329427, 9),
(0.669270834329427, 0.669270834329427, 10),
(0.669270834329427, 0.669270834329427, 11),
(0.669270834329427, 0.669270834329427, 12),
(0.669270834329427, 0.669270834329427, 13),
(0.669270834329427, 0.669270834329427, 14),
(0.669270834329427, 0.669270834329427, 15),
(0.669270834329427, 0.669270834329427, 16),
(0.669270834329427, 0.669270834329427, 17),
(0.7005208343554686, 0.7005208343554686, 18),
(0.7005208343554686, 0.7005208343554686, 19),
(0.669270834329427, 0.669270834329427, 20),
(0.7005208343554686, 0.7005208343554686, 21),
(0.669270834329427, 0.669270834329427, 22),
(0.669270834329427, 0.669270834329427, 23),
(0.669270834329427, 0.669270834329427, 24),
(0.669270834329427, 0.669270834329427, 25),
(0.669270834329427, 0.669270834329427, 26),
(0.669270834329427, 0.669270834329427, 27),
(0.669270834329427, 0.669270834329427, 28),
(0.7005208343554686, 0.7005208343554686, 29),
(0.669270834329427, 0.669270834329427, 30),
(0.669270834329427, 0.669270834329427, 31),
(0.669270834329427, 0.669270834329427, 32),
(0.669270834329427, 0.669270834329427, 33),
(0.7005208343554686, 0.7005208343554686, 34),
(0.669270834329427, 0.669270834329427, 35),
(0.669270834329427, 0.669270834329427, 36),
(0.669270834329427, 0.669270834329427, 37),
(0.7005208343554686, 0.7005208343554686, 38),
(0.669270834329427, 0.669270834329427, 39),
(0.669270834329427, 0.669270834329427, 40),
(0.7005208343554686, 0.7005208343554686, 41),
(0.669270834329427, 0.669270834329427, 42),
(0.669270834329427, 0.669270834329427, 43),
(0.669270834329427, 0.669270834329427, 44),
]
values = ut.take_column(items, 0)
weights = ut.take_column(items, 1)
indices = ut.take_column(items, 2)
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> items = [(4, 12, 0), (2, 1, 1), (6, 4, 2), (1, 1, 3), (2, 2, 4)]
>>> maxweight = 15
>>> total_value, items_subset = knapsack(items, maxweight, method='recursive')
>>> total_value1, items_subset1 = knapsack(items, maxweight, method='iterative')
>>> result = 'total_value = %.2f\n' % (total_value,)
>>> result += 'items_subset = %r' % (items_subset,)
>>> ut.assert_eq(total_value1, total_value)
>>> ut.assert_eq(items_subset1, items_subset)
>>> print(result)
total_value = 11.00
items_subset = [(2, 1, 1), (6, 4, 2), (1, 1, 3), (2, 2, 4)]
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> # Solve https://xkcd.com/287/
>>> weights = [2.15, 2.75, 3.35, 3.55, 4.2, 5.8] * 2
>>> items = [(w, w, i) for i, w in enumerate(weights)]
>>> maxweight = 15.05
>>> total_value, items_subset = knapsack(items, maxweight, method='recursive')
>>> total_value1, items_subset1 = knapsack(items, maxweight, method='iterative')
>>> total_weight = sum([t[1] for t in items_subset])
>>> print('total_weight = %r' % (total_weight,))
>>> result = 'total_value = %.2f' % (total_value,)
>>> print('items_subset = %r' % (items_subset,))
>>> print('items_subset1 = %r' % (items_subset1,))
>>> #assert items_subset1 == items_subset, 'NOT EQ\n%r !=\n%r' % (items_subset1, items_subset)
>>> print(result)
total_value = 15.05
Timeit:
>>> import utool as ut
>>> setup = ut.codeblock(
>>> '''
import utool as ut
weights = [215, 275, 335, 355, 42, 58] * 40
items = [(w, w, i) for i, w in enumerate(weights)]
maxweight = 2505
#import numba
#knapsack_numba = numba.autojit(ut.knapsack_iterative)
#knapsack_numba = numba.autojit(ut.knapsack_iterative_numpy)
''')
>>> # Test load time
>>> stmt_list1 = ut.codeblock(
>>> '''
#ut.knapsack_recursive(items, maxweight)
ut.knapsack_iterative(items, maxweight)
ut.knapsack_ilp(items, maxweight)
#knapsack_numba(items, maxweight)
#ut.knapsack_iterative_numpy(items, maxweight)
''').split('\n')
>>> ut.util_dev.timeit_compare(stmt_list1, setup, int(5)) | [
"r",
"Solve",
"the",
"knapsack",
"problem",
"by",
"finding",
"the",
"most",
"valuable",
"subsequence",
"of",
"items",
"subject",
"that",
"weighs",
"no",
"more",
"than",
"maxweight",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L1134-L1282 | train |
Erotemic/utool | utool/util_alg.py | knapsack_ilp | def knapsack_ilp(items, maxweight, verbose=False):
"""
solves knapsack using an integer linear program
CommandLine:
python -m utool.util_alg knapsack_ilp
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> # Solve https://xkcd.com/287/
>>> weights = [2.15, 2.75, 3.35, 3.55, 4.2, 5.8, 6.55]
>>> values = [2.15, 2.75, 3.35, 3.55, 4.2, 5.8, 6.55]
>>> indices = ['mixed fruit', 'french fries', 'side salad',
>>> 'hot wings', 'mozzarella sticks', 'sampler plate',
>>> 'barbecue']
>>> items = [(v, w, i) for v, w, i in zip(values, weights, indices)]
>>> #items += [(3.95, 3.95, 'mystery plate')]
>>> maxweight = 15.05
>>> verbose = True
>>> total_value, items_subset = knapsack_ilp(items, maxweight, verbose)
>>> print('items_subset = %s' % (ut.repr3(items_subset, nl=1),))
"""
import pulp
# Given Input
values = [t[0] for t in items]
weights = [t[1] for t in items]
indices = [t[2] for t in items]
# Formulate integer program
prob = pulp.LpProblem("Knapsack", pulp.LpMaximize)
# Solution variables
x = pulp.LpVariable.dicts(name='x', indexs=indices,
lowBound=0, upBound=1, cat=pulp.LpInteger)
# maximize objective function
prob.objective = sum(v * x[i] for v, i in zip(values, indices))
# subject to
prob.add(sum(w * x[i] for w, i in zip(weights, indices)) <= maxweight)
# Solve using with solver like CPLEX, GLPK, or SCIP.
#pulp.CPLEX().solve(prob)
pulp.PULP_CBC_CMD().solve(prob)
# Read solution
flags = [x[i].varValue for i in indices]
total_value = sum([val for val, flag in zip(values, flags) if flag])
items_subset = [item for item, flag in zip(items, flags) if flag]
# Print summary
if verbose:
print(prob)
print('OPT:')
print('\n'.join([' %s = %s' % (x[i].name, x[i].varValue) for i in indices]))
print('total_value = %r' % (total_value,))
return total_value, items_subset | python | def knapsack_ilp(items, maxweight, verbose=False):
"""
solves knapsack using an integer linear program
CommandLine:
python -m utool.util_alg knapsack_ilp
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> # Solve https://xkcd.com/287/
>>> weights = [2.15, 2.75, 3.35, 3.55, 4.2, 5.8, 6.55]
>>> values = [2.15, 2.75, 3.35, 3.55, 4.2, 5.8, 6.55]
>>> indices = ['mixed fruit', 'french fries', 'side salad',
>>> 'hot wings', 'mozzarella sticks', 'sampler plate',
>>> 'barbecue']
>>> items = [(v, w, i) for v, w, i in zip(values, weights, indices)]
>>> #items += [(3.95, 3.95, 'mystery plate')]
>>> maxweight = 15.05
>>> verbose = True
>>> total_value, items_subset = knapsack_ilp(items, maxweight, verbose)
>>> print('items_subset = %s' % (ut.repr3(items_subset, nl=1),))
"""
import pulp
# Given Input
values = [t[0] for t in items]
weights = [t[1] for t in items]
indices = [t[2] for t in items]
# Formulate integer program
prob = pulp.LpProblem("Knapsack", pulp.LpMaximize)
# Solution variables
x = pulp.LpVariable.dicts(name='x', indexs=indices,
lowBound=0, upBound=1, cat=pulp.LpInteger)
# maximize objective function
prob.objective = sum(v * x[i] for v, i in zip(values, indices))
# subject to
prob.add(sum(w * x[i] for w, i in zip(weights, indices)) <= maxweight)
# Solve using with solver like CPLEX, GLPK, or SCIP.
#pulp.CPLEX().solve(prob)
pulp.PULP_CBC_CMD().solve(prob)
# Read solution
flags = [x[i].varValue for i in indices]
total_value = sum([val for val, flag in zip(values, flags) if flag])
items_subset = [item for item, flag in zip(items, flags) if flag]
# Print summary
if verbose:
print(prob)
print('OPT:')
print('\n'.join([' %s = %s' % (x[i].name, x[i].varValue) for i in indices]))
print('total_value = %r' % (total_value,))
return total_value, items_subset | [
"def",
"knapsack_ilp",
"(",
"items",
",",
"maxweight",
",",
"verbose",
"=",
"False",
")",
":",
"import",
"pulp",
"# Given Input",
"values",
"=",
"[",
"t",
"[",
"0",
"]",
"for",
"t",
"in",
"items",
"]",
"weights",
"=",
"[",
"t",
"[",
"1",
"]",
"for",
"t",
"in",
"items",
"]",
"indices",
"=",
"[",
"t",
"[",
"2",
"]",
"for",
"t",
"in",
"items",
"]",
"# Formulate integer program",
"prob",
"=",
"pulp",
".",
"LpProblem",
"(",
"\"Knapsack\"",
",",
"pulp",
".",
"LpMaximize",
")",
"# Solution variables",
"x",
"=",
"pulp",
".",
"LpVariable",
".",
"dicts",
"(",
"name",
"=",
"'x'",
",",
"indexs",
"=",
"indices",
",",
"lowBound",
"=",
"0",
",",
"upBound",
"=",
"1",
",",
"cat",
"=",
"pulp",
".",
"LpInteger",
")",
"# maximize objective function",
"prob",
".",
"objective",
"=",
"sum",
"(",
"v",
"*",
"x",
"[",
"i",
"]",
"for",
"v",
",",
"i",
"in",
"zip",
"(",
"values",
",",
"indices",
")",
")",
"# subject to",
"prob",
".",
"add",
"(",
"sum",
"(",
"w",
"*",
"x",
"[",
"i",
"]",
"for",
"w",
",",
"i",
"in",
"zip",
"(",
"weights",
",",
"indices",
")",
")",
"<=",
"maxweight",
")",
"# Solve using with solver like CPLEX, GLPK, or SCIP.",
"#pulp.CPLEX().solve(prob)",
"pulp",
".",
"PULP_CBC_CMD",
"(",
")",
".",
"solve",
"(",
"prob",
")",
"# Read solution",
"flags",
"=",
"[",
"x",
"[",
"i",
"]",
".",
"varValue",
"for",
"i",
"in",
"indices",
"]",
"total_value",
"=",
"sum",
"(",
"[",
"val",
"for",
"val",
",",
"flag",
"in",
"zip",
"(",
"values",
",",
"flags",
")",
"if",
"flag",
"]",
")",
"items_subset",
"=",
"[",
"item",
"for",
"item",
",",
"flag",
"in",
"zip",
"(",
"items",
",",
"flags",
")",
"if",
"flag",
"]",
"# Print summary",
"if",
"verbose",
":",
"print",
"(",
"prob",
")",
"print",
"(",
"'OPT:'",
")",
"print",
"(",
"'\\n'",
".",
"join",
"(",
"[",
"' %s = %s'",
"%",
"(",
"x",
"[",
"i",
"]",
".",
"name",
",",
"x",
"[",
"i",
"]",
".",
"varValue",
")",
"for",
"i",
"in",
"indices",
"]",
")",
")",
"print",
"(",
"'total_value = %r'",
"%",
"(",
"total_value",
",",
")",
")",
"return",
"total_value",
",",
"items_subset"
] | solves knapsack using an integer linear program
CommandLine:
python -m utool.util_alg knapsack_ilp
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> # Solve https://xkcd.com/287/
>>> weights = [2.15, 2.75, 3.35, 3.55, 4.2, 5.8, 6.55]
>>> values = [2.15, 2.75, 3.35, 3.55, 4.2, 5.8, 6.55]
>>> indices = ['mixed fruit', 'french fries', 'side salad',
>>> 'hot wings', 'mozzarella sticks', 'sampler plate',
>>> 'barbecue']
>>> items = [(v, w, i) for v, w, i in zip(values, weights, indices)]
>>> #items += [(3.95, 3.95, 'mystery plate')]
>>> maxweight = 15.05
>>> verbose = True
>>> total_value, items_subset = knapsack_ilp(items, maxweight, verbose)
>>> print('items_subset = %s' % (ut.repr3(items_subset, nl=1),)) | [
"solves",
"knapsack",
"using",
"an",
"integer",
"linear",
"program"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L1286-L1337 | train |
Erotemic/utool | utool/util_alg.py | knapsack_iterative | def knapsack_iterative(items, maxweight):
# Knapsack requires integral weights
weights = [t[1] for t in items]
max_exp = max([number_of_decimals(w_) for w_ in weights])
coeff = 10 ** max_exp
# Adjust weights to be integral
int_maxweight = int(maxweight * coeff)
int_items = [(v, int(w * coeff), idx) for v, w, idx in items]
"""
items = int_items
maxweight = int_maxweight
"""
return knapsack_iterative_int(int_items, int_maxweight) | python | def knapsack_iterative(items, maxweight):
# Knapsack requires integral weights
weights = [t[1] for t in items]
max_exp = max([number_of_decimals(w_) for w_ in weights])
coeff = 10 ** max_exp
# Adjust weights to be integral
int_maxweight = int(maxweight * coeff)
int_items = [(v, int(w * coeff), idx) for v, w, idx in items]
"""
items = int_items
maxweight = int_maxweight
"""
return knapsack_iterative_int(int_items, int_maxweight) | [
"def",
"knapsack_iterative",
"(",
"items",
",",
"maxweight",
")",
":",
"# Knapsack requires integral weights",
"weights",
"=",
"[",
"t",
"[",
"1",
"]",
"for",
"t",
"in",
"items",
"]",
"max_exp",
"=",
"max",
"(",
"[",
"number_of_decimals",
"(",
"w_",
")",
"for",
"w_",
"in",
"weights",
"]",
")",
"coeff",
"=",
"10",
"**",
"max_exp",
"# Adjust weights to be integral",
"int_maxweight",
"=",
"int",
"(",
"maxweight",
"*",
"coeff",
")",
"int_items",
"=",
"[",
"(",
"v",
",",
"int",
"(",
"w",
"*",
"coeff",
")",
",",
"idx",
")",
"for",
"v",
",",
"w",
",",
"idx",
"in",
"items",
"]",
"return",
"knapsack_iterative_int",
"(",
"int_items",
",",
"int_maxweight",
")"
] | items = int_items
maxweight = int_maxweight | [
"items",
"=",
"int_items",
"maxweight",
"=",
"int_maxweight"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L1385-L1397 | train |
Erotemic/utool | utool/util_alg.py | knapsack_iterative_int | def knapsack_iterative_int(items, maxweight):
r"""
Iterative knapsack method
Math:
maximize \sum_{i \in T} v_i
subject to \sum_{i \in T} w_i \leq W
Notes:
dpmat is the dynamic programming memoization matrix.
dpmat[i, w] is the total value of the items with weight at most W
T is idx_subset, the set of indicies in the optimal solution
CommandLine:
python -m utool.util_alg --exec-knapsack_iterative_int --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> weights = [1, 3, 3, 5, 2, 1] * 2
>>> items = [(w, w, i) for i, w in enumerate(weights)]
>>> maxweight = 10
>>> items = [(.8, 700, 0)]
>>> maxweight = 2000
>>> print('maxweight = %r' % (maxweight,))
>>> print('items = %r' % (items,))
>>> total_value, items_subset = knapsack_iterative_int(items, maxweight)
>>> total_weight = sum([t[1] for t in items_subset])
>>> print('total_weight = %r' % (total_weight,))
>>> print('items_subset = %r' % (items_subset,))
>>> result = 'total_value = %.2f' % (total_value,)
>>> print(result)
total_value = 0.80
Ignore:
DPMAT = [[dpmat[r][c] for c in range(maxweight)] for r in range(len(items))]
KMAT = [[kmat[r][c] for c in range(maxweight)] for r in range(len(items))]
"""
values = [t[0] for t in items]
weights = [t[1] for t in items]
maxsize = maxweight + 1
# Sparse representation seems better
dpmat = defaultdict(lambda: defaultdict(lambda: np.inf))
kmat = defaultdict(lambda: defaultdict(lambda: False))
idx_subset = [] # NOQA
for w in range(maxsize):
dpmat[0][w] = 0
# For each item consider to include it or not
for idx in range(len(items)):
item_val = values[idx]
item_weight = weights[idx]
# consider at each possible bag size
for w in range(maxsize):
valid_item = item_weight <= w
if idx > 0:
prev_val = dpmat[idx - 1][w]
prev_noitem_val = dpmat[idx - 1][w - item_weight]
else:
prev_val = 0
prev_noitem_val = 0
withitem_val = item_val + prev_noitem_val
more_valuable = withitem_val > prev_val
if valid_item and more_valuable:
dpmat[idx][w] = withitem_val
kmat[idx][w] = True
else:
dpmat[idx][w] = prev_val
kmat[idx][w] = False
# Trace backwards to get the items used in the solution
K = maxweight
for idx in reversed(range(len(items))):
if kmat[idx][K]:
idx_subset.append(idx)
K = K - weights[idx]
idx_subset = sorted(idx_subset)
items_subset = [items[i] for i in idx_subset]
total_value = dpmat[len(items) - 1][maxweight]
return total_value, items_subset | python | def knapsack_iterative_int(items, maxweight):
r"""
Iterative knapsack method
Math:
maximize \sum_{i \in T} v_i
subject to \sum_{i \in T} w_i \leq W
Notes:
dpmat is the dynamic programming memoization matrix.
dpmat[i, w] is the total value of the items with weight at most W
T is idx_subset, the set of indicies in the optimal solution
CommandLine:
python -m utool.util_alg --exec-knapsack_iterative_int --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> weights = [1, 3, 3, 5, 2, 1] * 2
>>> items = [(w, w, i) for i, w in enumerate(weights)]
>>> maxweight = 10
>>> items = [(.8, 700, 0)]
>>> maxweight = 2000
>>> print('maxweight = %r' % (maxweight,))
>>> print('items = %r' % (items,))
>>> total_value, items_subset = knapsack_iterative_int(items, maxweight)
>>> total_weight = sum([t[1] for t in items_subset])
>>> print('total_weight = %r' % (total_weight,))
>>> print('items_subset = %r' % (items_subset,))
>>> result = 'total_value = %.2f' % (total_value,)
>>> print(result)
total_value = 0.80
Ignore:
DPMAT = [[dpmat[r][c] for c in range(maxweight)] for r in range(len(items))]
KMAT = [[kmat[r][c] for c in range(maxweight)] for r in range(len(items))]
"""
values = [t[0] for t in items]
weights = [t[1] for t in items]
maxsize = maxweight + 1
# Sparse representation seems better
dpmat = defaultdict(lambda: defaultdict(lambda: np.inf))
kmat = defaultdict(lambda: defaultdict(lambda: False))
idx_subset = [] # NOQA
for w in range(maxsize):
dpmat[0][w] = 0
# For each item consider to include it or not
for idx in range(len(items)):
item_val = values[idx]
item_weight = weights[idx]
# consider at each possible bag size
for w in range(maxsize):
valid_item = item_weight <= w
if idx > 0:
prev_val = dpmat[idx - 1][w]
prev_noitem_val = dpmat[idx - 1][w - item_weight]
else:
prev_val = 0
prev_noitem_val = 0
withitem_val = item_val + prev_noitem_val
more_valuable = withitem_val > prev_val
if valid_item and more_valuable:
dpmat[idx][w] = withitem_val
kmat[idx][w] = True
else:
dpmat[idx][w] = prev_val
kmat[idx][w] = False
# Trace backwards to get the items used in the solution
K = maxweight
for idx in reversed(range(len(items))):
if kmat[idx][K]:
idx_subset.append(idx)
K = K - weights[idx]
idx_subset = sorted(idx_subset)
items_subset = [items[i] for i in idx_subset]
total_value = dpmat[len(items) - 1][maxweight]
return total_value, items_subset | [
"def",
"knapsack_iterative_int",
"(",
"items",
",",
"maxweight",
")",
":",
"values",
"=",
"[",
"t",
"[",
"0",
"]",
"for",
"t",
"in",
"items",
"]",
"weights",
"=",
"[",
"t",
"[",
"1",
"]",
"for",
"t",
"in",
"items",
"]",
"maxsize",
"=",
"maxweight",
"+",
"1",
"# Sparse representation seems better",
"dpmat",
"=",
"defaultdict",
"(",
"lambda",
":",
"defaultdict",
"(",
"lambda",
":",
"np",
".",
"inf",
")",
")",
"kmat",
"=",
"defaultdict",
"(",
"lambda",
":",
"defaultdict",
"(",
"lambda",
":",
"False",
")",
")",
"idx_subset",
"=",
"[",
"]",
"# NOQA",
"for",
"w",
"in",
"range",
"(",
"maxsize",
")",
":",
"dpmat",
"[",
"0",
"]",
"[",
"w",
"]",
"=",
"0",
"# For each item consider to include it or not",
"for",
"idx",
"in",
"range",
"(",
"len",
"(",
"items",
")",
")",
":",
"item_val",
"=",
"values",
"[",
"idx",
"]",
"item_weight",
"=",
"weights",
"[",
"idx",
"]",
"# consider at each possible bag size",
"for",
"w",
"in",
"range",
"(",
"maxsize",
")",
":",
"valid_item",
"=",
"item_weight",
"<=",
"w",
"if",
"idx",
">",
"0",
":",
"prev_val",
"=",
"dpmat",
"[",
"idx",
"-",
"1",
"]",
"[",
"w",
"]",
"prev_noitem_val",
"=",
"dpmat",
"[",
"idx",
"-",
"1",
"]",
"[",
"w",
"-",
"item_weight",
"]",
"else",
":",
"prev_val",
"=",
"0",
"prev_noitem_val",
"=",
"0",
"withitem_val",
"=",
"item_val",
"+",
"prev_noitem_val",
"more_valuable",
"=",
"withitem_val",
">",
"prev_val",
"if",
"valid_item",
"and",
"more_valuable",
":",
"dpmat",
"[",
"idx",
"]",
"[",
"w",
"]",
"=",
"withitem_val",
"kmat",
"[",
"idx",
"]",
"[",
"w",
"]",
"=",
"True",
"else",
":",
"dpmat",
"[",
"idx",
"]",
"[",
"w",
"]",
"=",
"prev_val",
"kmat",
"[",
"idx",
"]",
"[",
"w",
"]",
"=",
"False",
"# Trace backwards to get the items used in the solution",
"K",
"=",
"maxweight",
"for",
"idx",
"in",
"reversed",
"(",
"range",
"(",
"len",
"(",
"items",
")",
")",
")",
":",
"if",
"kmat",
"[",
"idx",
"]",
"[",
"K",
"]",
":",
"idx_subset",
".",
"append",
"(",
"idx",
")",
"K",
"=",
"K",
"-",
"weights",
"[",
"idx",
"]",
"idx_subset",
"=",
"sorted",
"(",
"idx_subset",
")",
"items_subset",
"=",
"[",
"items",
"[",
"i",
"]",
"for",
"i",
"in",
"idx_subset",
"]",
"total_value",
"=",
"dpmat",
"[",
"len",
"(",
"items",
")",
"-",
"1",
"]",
"[",
"maxweight",
"]",
"return",
"total_value",
",",
"items_subset"
] | r"""
Iterative knapsack method
Math:
maximize \sum_{i \in T} v_i
subject to \sum_{i \in T} w_i \leq W
Notes:
dpmat is the dynamic programming memoization matrix.
dpmat[i, w] is the total value of the items with weight at most W
T is idx_subset, the set of indicies in the optimal solution
CommandLine:
python -m utool.util_alg --exec-knapsack_iterative_int --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> weights = [1, 3, 3, 5, 2, 1] * 2
>>> items = [(w, w, i) for i, w in enumerate(weights)]
>>> maxweight = 10
>>> items = [(.8, 700, 0)]
>>> maxweight = 2000
>>> print('maxweight = %r' % (maxweight,))
>>> print('items = %r' % (items,))
>>> total_value, items_subset = knapsack_iterative_int(items, maxweight)
>>> total_weight = sum([t[1] for t in items_subset])
>>> print('total_weight = %r' % (total_weight,))
>>> print('items_subset = %r' % (items_subset,))
>>> result = 'total_value = %.2f' % (total_value,)
>>> print(result)
total_value = 0.80
Ignore:
DPMAT = [[dpmat[r][c] for c in range(maxweight)] for r in range(len(items))]
KMAT = [[kmat[r][c] for c in range(maxweight)] for r in range(len(items))] | [
"r",
"Iterative",
"knapsack",
"method"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L1400-L1477 | train |
Erotemic/utool | utool/util_alg.py | knapsack_iterative_numpy | def knapsack_iterative_numpy(items, maxweight):
"""
Iterative knapsack method
maximize \sum_{i \in T} v_i
subject to \sum_{i \in T} w_i \leq W
Notes:
dpmat is the dynamic programming memoization matrix.
dpmat[i, w] is the total value of the items with weight at most W
T is the set of indicies in the optimal solution
"""
#import numpy as np
items = np.array(items)
weights = items.T[1]
# Find maximum decimal place (this problem is in NP)
max_exp = max([number_of_decimals(w_) for w_ in weights])
coeff = 10 ** max_exp
# Adjust weights to be integral
weights = (weights * coeff).astype(np.int)
values = items.T[0]
MAXWEIGHT = int(maxweight * coeff)
W_SIZE = MAXWEIGHT + 1
dpmat = np.full((len(items), W_SIZE), np.inf)
kmat = np.full((len(items), W_SIZE), 0, dtype=np.bool)
idx_subset = []
for w in range(W_SIZE):
dpmat[0][w] = 0
for idx in range(1, len(items)):
item_val = values[idx]
item_weight = weights[idx]
for w in range(W_SIZE):
valid_item = item_weight <= w
prev_val = dpmat[idx - 1][w]
if valid_item:
prev_noitem_val = dpmat[idx - 1][w - item_weight]
withitem_val = item_val + prev_noitem_val
more_valuable = withitem_val > prev_val
else:
more_valuable = False
dpmat[idx][w] = withitem_val if more_valuable else prev_val
kmat[idx][w] = more_valuable
K = MAXWEIGHT
for idx in reversed(range(1, len(items))):
if kmat[idx, K]:
idx_subset.append(idx)
K = K - weights[idx]
idx_subset = sorted(idx_subset)
items_subset = [items[i] for i in idx_subset]
total_value = dpmat[len(items) - 1][MAXWEIGHT]
return total_value, items_subset | python | def knapsack_iterative_numpy(items, maxweight):
"""
Iterative knapsack method
maximize \sum_{i \in T} v_i
subject to \sum_{i \in T} w_i \leq W
Notes:
dpmat is the dynamic programming memoization matrix.
dpmat[i, w] is the total value of the items with weight at most W
T is the set of indicies in the optimal solution
"""
#import numpy as np
items = np.array(items)
weights = items.T[1]
# Find maximum decimal place (this problem is in NP)
max_exp = max([number_of_decimals(w_) for w_ in weights])
coeff = 10 ** max_exp
# Adjust weights to be integral
weights = (weights * coeff).astype(np.int)
values = items.T[0]
MAXWEIGHT = int(maxweight * coeff)
W_SIZE = MAXWEIGHT + 1
dpmat = np.full((len(items), W_SIZE), np.inf)
kmat = np.full((len(items), W_SIZE), 0, dtype=np.bool)
idx_subset = []
for w in range(W_SIZE):
dpmat[0][w] = 0
for idx in range(1, len(items)):
item_val = values[idx]
item_weight = weights[idx]
for w in range(W_SIZE):
valid_item = item_weight <= w
prev_val = dpmat[idx - 1][w]
if valid_item:
prev_noitem_val = dpmat[idx - 1][w - item_weight]
withitem_val = item_val + prev_noitem_val
more_valuable = withitem_val > prev_val
else:
more_valuable = False
dpmat[idx][w] = withitem_val if more_valuable else prev_val
kmat[idx][w] = more_valuable
K = MAXWEIGHT
for idx in reversed(range(1, len(items))):
if kmat[idx, K]:
idx_subset.append(idx)
K = K - weights[idx]
idx_subset = sorted(idx_subset)
items_subset = [items[i] for i in idx_subset]
total_value = dpmat[len(items) - 1][MAXWEIGHT]
return total_value, items_subset | [
"def",
"knapsack_iterative_numpy",
"(",
"items",
",",
"maxweight",
")",
":",
"#import numpy as np",
"items",
"=",
"np",
".",
"array",
"(",
"items",
")",
"weights",
"=",
"items",
".",
"T",
"[",
"1",
"]",
"# Find maximum decimal place (this problem is in NP)",
"max_exp",
"=",
"max",
"(",
"[",
"number_of_decimals",
"(",
"w_",
")",
"for",
"w_",
"in",
"weights",
"]",
")",
"coeff",
"=",
"10",
"**",
"max_exp",
"# Adjust weights to be integral",
"weights",
"=",
"(",
"weights",
"*",
"coeff",
")",
".",
"astype",
"(",
"np",
".",
"int",
")",
"values",
"=",
"items",
".",
"T",
"[",
"0",
"]",
"MAXWEIGHT",
"=",
"int",
"(",
"maxweight",
"*",
"coeff",
")",
"W_SIZE",
"=",
"MAXWEIGHT",
"+",
"1",
"dpmat",
"=",
"np",
".",
"full",
"(",
"(",
"len",
"(",
"items",
")",
",",
"W_SIZE",
")",
",",
"np",
".",
"inf",
")",
"kmat",
"=",
"np",
".",
"full",
"(",
"(",
"len",
"(",
"items",
")",
",",
"W_SIZE",
")",
",",
"0",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"idx_subset",
"=",
"[",
"]",
"for",
"w",
"in",
"range",
"(",
"W_SIZE",
")",
":",
"dpmat",
"[",
"0",
"]",
"[",
"w",
"]",
"=",
"0",
"for",
"idx",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"items",
")",
")",
":",
"item_val",
"=",
"values",
"[",
"idx",
"]",
"item_weight",
"=",
"weights",
"[",
"idx",
"]",
"for",
"w",
"in",
"range",
"(",
"W_SIZE",
")",
":",
"valid_item",
"=",
"item_weight",
"<=",
"w",
"prev_val",
"=",
"dpmat",
"[",
"idx",
"-",
"1",
"]",
"[",
"w",
"]",
"if",
"valid_item",
":",
"prev_noitem_val",
"=",
"dpmat",
"[",
"idx",
"-",
"1",
"]",
"[",
"w",
"-",
"item_weight",
"]",
"withitem_val",
"=",
"item_val",
"+",
"prev_noitem_val",
"more_valuable",
"=",
"withitem_val",
">",
"prev_val",
"else",
":",
"more_valuable",
"=",
"False",
"dpmat",
"[",
"idx",
"]",
"[",
"w",
"]",
"=",
"withitem_val",
"if",
"more_valuable",
"else",
"prev_val",
"kmat",
"[",
"idx",
"]",
"[",
"w",
"]",
"=",
"more_valuable",
"K",
"=",
"MAXWEIGHT",
"for",
"idx",
"in",
"reversed",
"(",
"range",
"(",
"1",
",",
"len",
"(",
"items",
")",
")",
")",
":",
"if",
"kmat",
"[",
"idx",
",",
"K",
"]",
":",
"idx_subset",
".",
"append",
"(",
"idx",
")",
"K",
"=",
"K",
"-",
"weights",
"[",
"idx",
"]",
"idx_subset",
"=",
"sorted",
"(",
"idx_subset",
")",
"items_subset",
"=",
"[",
"items",
"[",
"i",
"]",
"for",
"i",
"in",
"idx_subset",
"]",
"total_value",
"=",
"dpmat",
"[",
"len",
"(",
"items",
")",
"-",
"1",
"]",
"[",
"MAXWEIGHT",
"]",
"return",
"total_value",
",",
"items_subset"
] | Iterative knapsack method
maximize \sum_{i \in T} v_i
subject to \sum_{i \in T} w_i \leq W
Notes:
dpmat is the dynamic programming memoization matrix.
dpmat[i, w] is the total value of the items with weight at most W
T is the set of indicies in the optimal solution | [
"Iterative",
"knapsack",
"method"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L1480-L1532 | train |
Erotemic/utool | utool/util_alg.py | knapsack_greedy | def knapsack_greedy(items, maxweight):
r"""
non-optimal greedy version of knapsack algorithm
does not sort input. Sort the input by largest value
first if desired.
Args:
`items` (tuple): is a sequence of tuples `(value, weight, id_)`, where `value`
is a scalar and `weight` is a non-negative integer, and `id_` is an
item identifier.
`maxweight` (scalar): is a non-negative integer.
CommandLine:
python -m utool.util_alg --exec-knapsack_greedy
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> items = [(4, 12, 0), (2, 1, 1), (6, 4, 2), (1, 1, 3), (2, 2, 4)]
>>> maxweight = 15
>>> total_value, items_subset = knapsack_greedy(items, maxweight)
>>> result = 'total_value = %r\n' % (total_value,)
>>> result += 'items_subset = %r' % (items_subset,)
>>> print(result)
total_value = 7
items_subset = [(4, 12, 0), (2, 1, 1), (1, 1, 3)]
"""
items_subset = []
total_weight = 0
total_value = 0
for item in items:
value, weight = item[0:2]
if total_weight + weight > maxweight:
continue
else:
items_subset.append(item)
total_weight += weight
total_value += value
return total_value, items_subset | python | def knapsack_greedy(items, maxweight):
r"""
non-optimal greedy version of knapsack algorithm
does not sort input. Sort the input by largest value
first if desired.
Args:
`items` (tuple): is a sequence of tuples `(value, weight, id_)`, where `value`
is a scalar and `weight` is a non-negative integer, and `id_` is an
item identifier.
`maxweight` (scalar): is a non-negative integer.
CommandLine:
python -m utool.util_alg --exec-knapsack_greedy
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> items = [(4, 12, 0), (2, 1, 1), (6, 4, 2), (1, 1, 3), (2, 2, 4)]
>>> maxweight = 15
>>> total_value, items_subset = knapsack_greedy(items, maxweight)
>>> result = 'total_value = %r\n' % (total_value,)
>>> result += 'items_subset = %r' % (items_subset,)
>>> print(result)
total_value = 7
items_subset = [(4, 12, 0), (2, 1, 1), (1, 1, 3)]
"""
items_subset = []
total_weight = 0
total_value = 0
for item in items:
value, weight = item[0:2]
if total_weight + weight > maxweight:
continue
else:
items_subset.append(item)
total_weight += weight
total_value += value
return total_value, items_subset | [
"def",
"knapsack_greedy",
"(",
"items",
",",
"maxweight",
")",
":",
"items_subset",
"=",
"[",
"]",
"total_weight",
"=",
"0",
"total_value",
"=",
"0",
"for",
"item",
"in",
"items",
":",
"value",
",",
"weight",
"=",
"item",
"[",
"0",
":",
"2",
"]",
"if",
"total_weight",
"+",
"weight",
">",
"maxweight",
":",
"continue",
"else",
":",
"items_subset",
".",
"append",
"(",
"item",
")",
"total_weight",
"+=",
"weight",
"total_value",
"+=",
"value",
"return",
"total_value",
",",
"items_subset"
] | r"""
non-optimal greedy version of knapsack algorithm
does not sort input. Sort the input by largest value
first if desired.
Args:
`items` (tuple): is a sequence of tuples `(value, weight, id_)`, where `value`
is a scalar and `weight` is a non-negative integer, and `id_` is an
item identifier.
`maxweight` (scalar): is a non-negative integer.
CommandLine:
python -m utool.util_alg --exec-knapsack_greedy
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> items = [(4, 12, 0), (2, 1, 1), (6, 4, 2), (1, 1, 3), (2, 2, 4)]
>>> maxweight = 15
>>> total_value, items_subset = knapsack_greedy(items, maxweight)
>>> result = 'total_value = %r\n' % (total_value,)
>>> result += 'items_subset = %r' % (items_subset,)
>>> print(result)
total_value = 7
items_subset = [(4, 12, 0), (2, 1, 1), (1, 1, 3)] | [
"r",
"non",
"-",
"optimal",
"greedy",
"version",
"of",
"knapsack",
"algorithm",
"does",
"not",
"sort",
"input",
".",
"Sort",
"the",
"input",
"by",
"largest",
"value",
"first",
"if",
"desired",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L1548-L1587 | train |
Erotemic/utool | utool/util_alg.py | choose | def choose(n, k):
"""
N choose k
binomial combination (without replacement)
scipy.special.binom
"""
import scipy.misc
return scipy.misc.comb(n, k, exact=True, repetition=False) | python | def choose(n, k):
"""
N choose k
binomial combination (without replacement)
scipy.special.binom
"""
import scipy.misc
return scipy.misc.comb(n, k, exact=True, repetition=False) | [
"def",
"choose",
"(",
"n",
",",
"k",
")",
":",
"import",
"scipy",
".",
"misc",
"return",
"scipy",
".",
"misc",
".",
"comb",
"(",
"n",
",",
"k",
",",
"exact",
"=",
"True",
",",
"repetition",
"=",
"False",
")"
] | N choose k
binomial combination (without replacement)
scipy.special.binom | [
"N",
"choose",
"k"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L1653-L1661 | train |
Erotemic/utool | utool/util_alg.py | almost_eq | def almost_eq(arr1, arr2, thresh=1E-11, ret_error=False):
""" checks if floating point number are equal to a threshold
"""
error = np.abs(arr1 - arr2)
passed = error < thresh
if ret_error:
return passed, error
return passed | python | def almost_eq(arr1, arr2, thresh=1E-11, ret_error=False):
""" checks if floating point number are equal to a threshold
"""
error = np.abs(arr1 - arr2)
passed = error < thresh
if ret_error:
return passed, error
return passed | [
"def",
"almost_eq",
"(",
"arr1",
",",
"arr2",
",",
"thresh",
"=",
"1E-11",
",",
"ret_error",
"=",
"False",
")",
":",
"error",
"=",
"np",
".",
"abs",
"(",
"arr1",
"-",
"arr2",
")",
"passed",
"=",
"error",
"<",
"thresh",
"if",
"ret_error",
":",
"return",
"passed",
",",
"error",
"return",
"passed"
] | checks if floating point number are equal to a threshold | [
"checks",
"if",
"floating",
"point",
"number",
"are",
"equal",
"to",
"a",
"threshold"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L2002-L2009 | train |
Erotemic/utool | utool/util_alg.py | norm_zero_one | def norm_zero_one(array, dim=None):
"""
normalizes a numpy array from 0 to 1 based in its extent
Args:
array (ndarray):
dim (int):
Returns:
ndarray:
CommandLine:
python -m utool.util_alg --test-norm_zero_one
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> array = np.array([ 22, 1, 3, 2, 10, 42, ])
>>> dim = None
>>> array_norm = norm_zero_one(array, dim)
>>> result = ut.repr2(list(array_norm), precision=3)
>>> print(result)
[0.512, 0.000, 0.049, 0.024, 0.220, 1.000]
"""
if not util_type.is_float(array):
array = array.astype(np.float32)
array_max = array.max(dim)
array_min = array.min(dim)
array_exnt = np.subtract(array_max, array_min)
array_norm = np.divide(np.subtract(array, array_min), array_exnt)
return array_norm | python | def norm_zero_one(array, dim=None):
"""
normalizes a numpy array from 0 to 1 based in its extent
Args:
array (ndarray):
dim (int):
Returns:
ndarray:
CommandLine:
python -m utool.util_alg --test-norm_zero_one
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> array = np.array([ 22, 1, 3, 2, 10, 42, ])
>>> dim = None
>>> array_norm = norm_zero_one(array, dim)
>>> result = ut.repr2(list(array_norm), precision=3)
>>> print(result)
[0.512, 0.000, 0.049, 0.024, 0.220, 1.000]
"""
if not util_type.is_float(array):
array = array.astype(np.float32)
array_max = array.max(dim)
array_min = array.min(dim)
array_exnt = np.subtract(array_max, array_min)
array_norm = np.divide(np.subtract(array, array_min), array_exnt)
return array_norm | [
"def",
"norm_zero_one",
"(",
"array",
",",
"dim",
"=",
"None",
")",
":",
"if",
"not",
"util_type",
".",
"is_float",
"(",
"array",
")",
":",
"array",
"=",
"array",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"array_max",
"=",
"array",
".",
"max",
"(",
"dim",
")",
"array_min",
"=",
"array",
".",
"min",
"(",
"dim",
")",
"array_exnt",
"=",
"np",
".",
"subtract",
"(",
"array_max",
",",
"array_min",
")",
"array_norm",
"=",
"np",
".",
"divide",
"(",
"np",
".",
"subtract",
"(",
"array",
",",
"array_min",
")",
",",
"array_exnt",
")",
"return",
"array_norm"
] | normalizes a numpy array from 0 to 1 based in its extent
Args:
array (ndarray):
dim (int):
Returns:
ndarray:
CommandLine:
python -m utool.util_alg --test-norm_zero_one
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> array = np.array([ 22, 1, 3, 2, 10, 42, ])
>>> dim = None
>>> array_norm = norm_zero_one(array, dim)
>>> result = ut.repr2(list(array_norm), precision=3)
>>> print(result)
[0.512, 0.000, 0.049, 0.024, 0.220, 1.000] | [
"normalizes",
"a",
"numpy",
"array",
"from",
"0",
"to",
"1",
"based",
"in",
"its",
"extent"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L2078-L2109 | train |
Erotemic/utool | utool/util_alg.py | group_indices | def group_indices(groupid_list):
"""
groups indicies of each item in ``groupid_list``
Args:
groupid_list (list): list of group ids
SeeAlso:
vt.group_indices - optimized numpy version
ut.apply_grouping
CommandLine:
python -m utool.util_alg --test-group_indices
python3 -m utool.util_alg --test-group_indices
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> groupid_list = ['b', 1, 'b', 1, 'b', 1, 'b', 'c', 'c', 'c', 'c']
>>> (keys, groupxs) = ut.group_indices(groupid_list)
>>> result = ut.repr3((keys, groupxs), nobraces=1, nl=1)
>>> print(result)
[1, 'b', 'c'],
[[1, 3, 5], [0, 2, 4, 6], [7, 8, 9, 10]],
"""
item_list = range(len(groupid_list))
grouped_dict = util_dict.group_items(item_list, groupid_list)
# Sort by groupid for cache efficiency
keys_ = list(grouped_dict.keys())
try:
keys = sorted(keys_)
except TypeError:
# Python 3 does not allow sorting mixed types
keys = util_list.sortedby2(keys_, keys_)
groupxs = util_dict.dict_take(grouped_dict, keys)
return keys, groupxs | python | def group_indices(groupid_list):
"""
groups indicies of each item in ``groupid_list``
Args:
groupid_list (list): list of group ids
SeeAlso:
vt.group_indices - optimized numpy version
ut.apply_grouping
CommandLine:
python -m utool.util_alg --test-group_indices
python3 -m utool.util_alg --test-group_indices
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> groupid_list = ['b', 1, 'b', 1, 'b', 1, 'b', 'c', 'c', 'c', 'c']
>>> (keys, groupxs) = ut.group_indices(groupid_list)
>>> result = ut.repr3((keys, groupxs), nobraces=1, nl=1)
>>> print(result)
[1, 'b', 'c'],
[[1, 3, 5], [0, 2, 4, 6], [7, 8, 9, 10]],
"""
item_list = range(len(groupid_list))
grouped_dict = util_dict.group_items(item_list, groupid_list)
# Sort by groupid for cache efficiency
keys_ = list(grouped_dict.keys())
try:
keys = sorted(keys_)
except TypeError:
# Python 3 does not allow sorting mixed types
keys = util_list.sortedby2(keys_, keys_)
groupxs = util_dict.dict_take(grouped_dict, keys)
return keys, groupxs | [
"def",
"group_indices",
"(",
"groupid_list",
")",
":",
"item_list",
"=",
"range",
"(",
"len",
"(",
"groupid_list",
")",
")",
"grouped_dict",
"=",
"util_dict",
".",
"group_items",
"(",
"item_list",
",",
"groupid_list",
")",
"# Sort by groupid for cache efficiency",
"keys_",
"=",
"list",
"(",
"grouped_dict",
".",
"keys",
"(",
")",
")",
"try",
":",
"keys",
"=",
"sorted",
"(",
"keys_",
")",
"except",
"TypeError",
":",
"# Python 3 does not allow sorting mixed types",
"keys",
"=",
"util_list",
".",
"sortedby2",
"(",
"keys_",
",",
"keys_",
")",
"groupxs",
"=",
"util_dict",
".",
"dict_take",
"(",
"grouped_dict",
",",
"keys",
")",
"return",
"keys",
",",
"groupxs"
] | groups indicies of each item in ``groupid_list``
Args:
groupid_list (list): list of group ids
SeeAlso:
vt.group_indices - optimized numpy version
ut.apply_grouping
CommandLine:
python -m utool.util_alg --test-group_indices
python3 -m utool.util_alg --test-group_indices
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> groupid_list = ['b', 1, 'b', 1, 'b', 1, 'b', 'c', 'c', 'c', 'c']
>>> (keys, groupxs) = ut.group_indices(groupid_list)
>>> result = ut.repr3((keys, groupxs), nobraces=1, nl=1)
>>> print(result)
[1, 'b', 'c'],
[[1, 3, 5], [0, 2, 4, 6], [7, 8, 9, 10]], | [
"groups",
"indicies",
"of",
"each",
"item",
"in",
"groupid_list"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L2162-L2198 | train |
Erotemic/utool | utool/util_alg.py | ungroup_gen | def ungroup_gen(grouped_items, groupxs, fill=None):
"""
Ungroups items returning a generator.
Note that this is much slower than the list version and is not gaurenteed
to have better memory usage.
Args:
grouped_items (list):
groupxs (list):
maxval (int): (default = None)
Returns:
list: ungrouped_items
SeeAlso:
vt.invert_apply_grouping
CommandLine:
python -m utool.util_alg ungroup_unique
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> grouped_items = [[1.1, 1.2], [2.1, 2.2], [3.1, 3.2]]
>>> groupxs = [[1, 2], [5, 6], [9, 3]]
>>> ungrouped_items1 = list(ungroup_gen(grouped_items, groupxs))
>>> ungrouped_items2 = ungroup(grouped_items, groupxs)
>>> assert ungrouped_items1 == ungrouped_items2
>>> grouped_items = [[1.1, 1.2], [2.1, 2.2], [3.1, 3.2]]
>>> groupxs = [[0, 2], [1, 5], [4, 3]]
>>> ungrouped_items1 = list(ungroup_gen(grouped_items, groupxs))
>>> ungrouped_items2 = ungroup(grouped_items, groupxs)
>>> assert ungrouped_items1 == ungrouped_items2
Ignore:
labels = np.random.randint(0, 64, 10000)
unique_labels, groupxs = ut.group_indices(labels)
grouped_items = ut.apply_grouping(np.arange(len(labels)), groupxs)
ungrouped_items1 = list(ungroup_gen(grouped_items, groupxs))
ungrouped_items2 = ungroup(grouped_items, groupxs)
assert ungrouped_items2 == ungrouped_items1
%timeit list(ungroup_gen(grouped_items, groupxs))
%timeit ungroup(grouped_items, groupxs)
"""
import utool as ut
# Determine the number of items if unknown
#maxpergroup = [max(xs) if len(xs) else 0 for xs in groupxs]
#maxval = max(maxpergroup) if len(maxpergroup) else 0
minpergroup = [min(xs) if len(xs) else 0 for xs in groupxs]
minval = min(minpergroup) if len(minpergroup) else 0
flat_groupx = ut.flatten(groupxs)
sortx = ut.argsort(flat_groupx)
# Indicates the index being yeilded
groupx_sorted = ut.take(flat_groupx, sortx)
flat_items = ut.iflatten(grouped_items)
# Storage for data weiting to be yeilded
toyeild = {}
items_yeilded = 0
# Indicates the index we are curently yeilding
current_index = 0
# Determine where fills need to happen
num_fills_before = [minval] + (np.diff(groupx_sorted) - 1).tolist() + [0]
# Check if there are fills before the first item
fills = num_fills_before[items_yeilded]
if fills > 0:
for _ in range(fills):
yield None
current_index += 1
# Yield items as possible
for yeild_at, item in zip(flat_groupx, flat_items):
if yeild_at > current_index:
toyeild[yeild_at] = item
elif yeild_at == current_index:
# When we find the next element to yeild
yield item
current_index += 1
items_yeilded += 1
# Check if there are fills before the next item
fills = num_fills_before[items_yeilded]
if fills > 0:
for _ in range(fills):
yield None
current_index += 1
# Now yield everything that came before this
while current_index in toyeild:
item = toyeild.pop(current_index)
yield item
current_index += 1
items_yeilded += 1
# Check if there are fills before the next item
fills = num_fills_before[items_yeilded]
if fills > 0:
for _ in range(fills):
yield None
current_index += 1 | python | def ungroup_gen(grouped_items, groupxs, fill=None):
"""
Ungroups items returning a generator.
Note that this is much slower than the list version and is not gaurenteed
to have better memory usage.
Args:
grouped_items (list):
groupxs (list):
maxval (int): (default = None)
Returns:
list: ungrouped_items
SeeAlso:
vt.invert_apply_grouping
CommandLine:
python -m utool.util_alg ungroup_unique
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> grouped_items = [[1.1, 1.2], [2.1, 2.2], [3.1, 3.2]]
>>> groupxs = [[1, 2], [5, 6], [9, 3]]
>>> ungrouped_items1 = list(ungroup_gen(grouped_items, groupxs))
>>> ungrouped_items2 = ungroup(grouped_items, groupxs)
>>> assert ungrouped_items1 == ungrouped_items2
>>> grouped_items = [[1.1, 1.2], [2.1, 2.2], [3.1, 3.2]]
>>> groupxs = [[0, 2], [1, 5], [4, 3]]
>>> ungrouped_items1 = list(ungroup_gen(grouped_items, groupxs))
>>> ungrouped_items2 = ungroup(grouped_items, groupxs)
>>> assert ungrouped_items1 == ungrouped_items2
Ignore:
labels = np.random.randint(0, 64, 10000)
unique_labels, groupxs = ut.group_indices(labels)
grouped_items = ut.apply_grouping(np.arange(len(labels)), groupxs)
ungrouped_items1 = list(ungroup_gen(grouped_items, groupxs))
ungrouped_items2 = ungroup(grouped_items, groupxs)
assert ungrouped_items2 == ungrouped_items1
%timeit list(ungroup_gen(grouped_items, groupxs))
%timeit ungroup(grouped_items, groupxs)
"""
import utool as ut
# Determine the number of items if unknown
#maxpergroup = [max(xs) if len(xs) else 0 for xs in groupxs]
#maxval = max(maxpergroup) if len(maxpergroup) else 0
minpergroup = [min(xs) if len(xs) else 0 for xs in groupxs]
minval = min(minpergroup) if len(minpergroup) else 0
flat_groupx = ut.flatten(groupxs)
sortx = ut.argsort(flat_groupx)
# Indicates the index being yeilded
groupx_sorted = ut.take(flat_groupx, sortx)
flat_items = ut.iflatten(grouped_items)
# Storage for data weiting to be yeilded
toyeild = {}
items_yeilded = 0
# Indicates the index we are curently yeilding
current_index = 0
# Determine where fills need to happen
num_fills_before = [minval] + (np.diff(groupx_sorted) - 1).tolist() + [0]
# Check if there are fills before the first item
fills = num_fills_before[items_yeilded]
if fills > 0:
for _ in range(fills):
yield None
current_index += 1
# Yield items as possible
for yeild_at, item in zip(flat_groupx, flat_items):
if yeild_at > current_index:
toyeild[yeild_at] = item
elif yeild_at == current_index:
# When we find the next element to yeild
yield item
current_index += 1
items_yeilded += 1
# Check if there are fills before the next item
fills = num_fills_before[items_yeilded]
if fills > 0:
for _ in range(fills):
yield None
current_index += 1
# Now yield everything that came before this
while current_index in toyeild:
item = toyeild.pop(current_index)
yield item
current_index += 1
items_yeilded += 1
# Check if there are fills before the next item
fills = num_fills_before[items_yeilded]
if fills > 0:
for _ in range(fills):
yield None
current_index += 1 | [
"def",
"ungroup_gen",
"(",
"grouped_items",
",",
"groupxs",
",",
"fill",
"=",
"None",
")",
":",
"import",
"utool",
"as",
"ut",
"# Determine the number of items if unknown",
"#maxpergroup = [max(xs) if len(xs) else 0 for xs in groupxs]",
"#maxval = max(maxpergroup) if len(maxpergroup) else 0",
"minpergroup",
"=",
"[",
"min",
"(",
"xs",
")",
"if",
"len",
"(",
"xs",
")",
"else",
"0",
"for",
"xs",
"in",
"groupxs",
"]",
"minval",
"=",
"min",
"(",
"minpergroup",
")",
"if",
"len",
"(",
"minpergroup",
")",
"else",
"0",
"flat_groupx",
"=",
"ut",
".",
"flatten",
"(",
"groupxs",
")",
"sortx",
"=",
"ut",
".",
"argsort",
"(",
"flat_groupx",
")",
"# Indicates the index being yeilded",
"groupx_sorted",
"=",
"ut",
".",
"take",
"(",
"flat_groupx",
",",
"sortx",
")",
"flat_items",
"=",
"ut",
".",
"iflatten",
"(",
"grouped_items",
")",
"# Storage for data weiting to be yeilded",
"toyeild",
"=",
"{",
"}",
"items_yeilded",
"=",
"0",
"# Indicates the index we are curently yeilding",
"current_index",
"=",
"0",
"# Determine where fills need to happen",
"num_fills_before",
"=",
"[",
"minval",
"]",
"+",
"(",
"np",
".",
"diff",
"(",
"groupx_sorted",
")",
"-",
"1",
")",
".",
"tolist",
"(",
")",
"+",
"[",
"0",
"]",
"# Check if there are fills before the first item",
"fills",
"=",
"num_fills_before",
"[",
"items_yeilded",
"]",
"if",
"fills",
">",
"0",
":",
"for",
"_",
"in",
"range",
"(",
"fills",
")",
":",
"yield",
"None",
"current_index",
"+=",
"1",
"# Yield items as possible",
"for",
"yeild_at",
",",
"item",
"in",
"zip",
"(",
"flat_groupx",
",",
"flat_items",
")",
":",
"if",
"yeild_at",
">",
"current_index",
":",
"toyeild",
"[",
"yeild_at",
"]",
"=",
"item",
"elif",
"yeild_at",
"==",
"current_index",
":",
"# When we find the next element to yeild",
"yield",
"item",
"current_index",
"+=",
"1",
"items_yeilded",
"+=",
"1",
"# Check if there are fills before the next item",
"fills",
"=",
"num_fills_before",
"[",
"items_yeilded",
"]",
"if",
"fills",
">",
"0",
":",
"for",
"_",
"in",
"range",
"(",
"fills",
")",
":",
"yield",
"None",
"current_index",
"+=",
"1",
"# Now yield everything that came before this",
"while",
"current_index",
"in",
"toyeild",
":",
"item",
"=",
"toyeild",
".",
"pop",
"(",
"current_index",
")",
"yield",
"item",
"current_index",
"+=",
"1",
"items_yeilded",
"+=",
"1",
"# Check if there are fills before the next item",
"fills",
"=",
"num_fills_before",
"[",
"items_yeilded",
"]",
"if",
"fills",
">",
"0",
":",
"for",
"_",
"in",
"range",
"(",
"fills",
")",
":",
"yield",
"None",
"current_index",
"+=",
"1"
] | Ungroups items returning a generator.
Note that this is much slower than the list version and is not gaurenteed
to have better memory usage.
Args:
grouped_items (list):
groupxs (list):
maxval (int): (default = None)
Returns:
list: ungrouped_items
SeeAlso:
vt.invert_apply_grouping
CommandLine:
python -m utool.util_alg ungroup_unique
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> grouped_items = [[1.1, 1.2], [2.1, 2.2], [3.1, 3.2]]
>>> groupxs = [[1, 2], [5, 6], [9, 3]]
>>> ungrouped_items1 = list(ungroup_gen(grouped_items, groupxs))
>>> ungrouped_items2 = ungroup(grouped_items, groupxs)
>>> assert ungrouped_items1 == ungrouped_items2
>>> grouped_items = [[1.1, 1.2], [2.1, 2.2], [3.1, 3.2]]
>>> groupxs = [[0, 2], [1, 5], [4, 3]]
>>> ungrouped_items1 = list(ungroup_gen(grouped_items, groupxs))
>>> ungrouped_items2 = ungroup(grouped_items, groupxs)
>>> assert ungrouped_items1 == ungrouped_items2
Ignore:
labels = np.random.randint(0, 64, 10000)
unique_labels, groupxs = ut.group_indices(labels)
grouped_items = ut.apply_grouping(np.arange(len(labels)), groupxs)
ungrouped_items1 = list(ungroup_gen(grouped_items, groupxs))
ungrouped_items2 = ungroup(grouped_items, groupxs)
assert ungrouped_items2 == ungrouped_items1
%timeit list(ungroup_gen(grouped_items, groupxs))
%timeit ungroup(grouped_items, groupxs) | [
"Ungroups",
"items",
"returning",
"a",
"generator",
".",
"Note",
"that",
"this",
"is",
"much",
"slower",
"than",
"the",
"list",
"version",
"and",
"is",
"not",
"gaurenteed",
"to",
"have",
"better",
"memory",
"usage",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L2306-L2406 | train |
Erotemic/utool | utool/util_alg.py | ungroup_unique | def ungroup_unique(unique_items, groupxs, maxval=None):
"""
Ungroups unique items to correspond to original non-unique list
Args:
unique_items (list):
groupxs (list):
maxval (int): (default = None)
Returns:
list: ungrouped_items
CommandLine:
python -m utool.util_alg ungroup_unique
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> unique_items = [1, 2, 3]
>>> groupxs = [[0, 2], [1, 3], [4, 5]]
>>> maxval = None
>>> ungrouped_items = ungroup_unique(unique_items, groupxs, maxval)
>>> result = ('ungrouped_items = %s' % (ut.repr2(ungrouped_items),))
>>> print(result)
ungrouped_items = [1, 2, 1, 2, 3, 3]
"""
if maxval is None:
maxpergroup = [max(xs) if len(xs) else 0 for xs in groupxs]
maxval = max(maxpergroup) if len(maxpergroup) else 0
ungrouped_items = [None] * (maxval + 1)
for item, xs in zip(unique_items, groupxs):
for x in xs:
ungrouped_items[x] = item
return ungrouped_items | python | def ungroup_unique(unique_items, groupxs, maxval=None):
"""
Ungroups unique items to correspond to original non-unique list
Args:
unique_items (list):
groupxs (list):
maxval (int): (default = None)
Returns:
list: ungrouped_items
CommandLine:
python -m utool.util_alg ungroup_unique
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> unique_items = [1, 2, 3]
>>> groupxs = [[0, 2], [1, 3], [4, 5]]
>>> maxval = None
>>> ungrouped_items = ungroup_unique(unique_items, groupxs, maxval)
>>> result = ('ungrouped_items = %s' % (ut.repr2(ungrouped_items),))
>>> print(result)
ungrouped_items = [1, 2, 1, 2, 3, 3]
"""
if maxval is None:
maxpergroup = [max(xs) if len(xs) else 0 for xs in groupxs]
maxval = max(maxpergroup) if len(maxpergroup) else 0
ungrouped_items = [None] * (maxval + 1)
for item, xs in zip(unique_items, groupxs):
for x in xs:
ungrouped_items[x] = item
return ungrouped_items | [
"def",
"ungroup_unique",
"(",
"unique_items",
",",
"groupxs",
",",
"maxval",
"=",
"None",
")",
":",
"if",
"maxval",
"is",
"None",
":",
"maxpergroup",
"=",
"[",
"max",
"(",
"xs",
")",
"if",
"len",
"(",
"xs",
")",
"else",
"0",
"for",
"xs",
"in",
"groupxs",
"]",
"maxval",
"=",
"max",
"(",
"maxpergroup",
")",
"if",
"len",
"(",
"maxpergroup",
")",
"else",
"0",
"ungrouped_items",
"=",
"[",
"None",
"]",
"*",
"(",
"maxval",
"+",
"1",
")",
"for",
"item",
",",
"xs",
"in",
"zip",
"(",
"unique_items",
",",
"groupxs",
")",
":",
"for",
"x",
"in",
"xs",
":",
"ungrouped_items",
"[",
"x",
"]",
"=",
"item",
"return",
"ungrouped_items"
] | Ungroups unique items to correspond to original non-unique list
Args:
unique_items (list):
groupxs (list):
maxval (int): (default = None)
Returns:
list: ungrouped_items
CommandLine:
python -m utool.util_alg ungroup_unique
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> unique_items = [1, 2, 3]
>>> groupxs = [[0, 2], [1, 3], [4, 5]]
>>> maxval = None
>>> ungrouped_items = ungroup_unique(unique_items, groupxs, maxval)
>>> result = ('ungrouped_items = %s' % (ut.repr2(ungrouped_items),))
>>> print(result)
ungrouped_items = [1, 2, 1, 2, 3, 3] | [
"Ungroups",
"unique",
"items",
"to",
"correspond",
"to",
"original",
"non",
"-",
"unique",
"list"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L2409-L2443 | train |
Erotemic/utool | utool/util_alg.py | edit_distance | def edit_distance(string1, string2):
"""
Edit distance algorithm. String1 and string2 can be either
strings or lists of strings
pip install python-Levenshtein
Args:
string1 (str or list):
string2 (str or list):
CommandLine:
python -m utool.util_alg edit_distance --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> string1 = 'hello world'
>>> string2 = ['goodbye world', 'rofl', 'hello', 'world', 'lowo']
>>> edit_distance(['hello', 'one'], ['goodbye', 'two'])
>>> edit_distance('hello', ['goodbye', 'two'])
>>> edit_distance(['hello', 'one'], 'goodbye')
>>> edit_distance('hello', 'goodbye')
>>> distmat = edit_distance(string1, string2)
>>> result = ('distmat = %s' % (ut.repr2(distmat),))
>>> print(result)
>>> [7, 9, 6, 6, 7]
"""
import utool as ut
try:
import Levenshtein
except ImportError as ex:
ut.printex(ex, 'pip install python-Levenshtein')
raise
#np.vectorize(Levenshtein.distance, [np.int])
#vec_lev = np.frompyfunc(Levenshtein.distance, 2, 1)
#return vec_lev(string1, string2)
import utool as ut
isiter1 = ut.isiterable(string1)
isiter2 = ut.isiterable(string2)
strs1 = string1 if isiter1 else [string1]
strs2 = string2 if isiter2 else [string2]
distmat = [
[Levenshtein.distance(str1, str2) for str2 in strs2]
for str1 in strs1
]
# broadcast
if not isiter2:
distmat = ut.take_column(distmat, 0)
if not isiter1:
distmat = distmat[0]
return distmat | python | def edit_distance(string1, string2):
"""
Edit distance algorithm. String1 and string2 can be either
strings or lists of strings
pip install python-Levenshtein
Args:
string1 (str or list):
string2 (str or list):
CommandLine:
python -m utool.util_alg edit_distance --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> string1 = 'hello world'
>>> string2 = ['goodbye world', 'rofl', 'hello', 'world', 'lowo']
>>> edit_distance(['hello', 'one'], ['goodbye', 'two'])
>>> edit_distance('hello', ['goodbye', 'two'])
>>> edit_distance(['hello', 'one'], 'goodbye')
>>> edit_distance('hello', 'goodbye')
>>> distmat = edit_distance(string1, string2)
>>> result = ('distmat = %s' % (ut.repr2(distmat),))
>>> print(result)
>>> [7, 9, 6, 6, 7]
"""
import utool as ut
try:
import Levenshtein
except ImportError as ex:
ut.printex(ex, 'pip install python-Levenshtein')
raise
#np.vectorize(Levenshtein.distance, [np.int])
#vec_lev = np.frompyfunc(Levenshtein.distance, 2, 1)
#return vec_lev(string1, string2)
import utool as ut
isiter1 = ut.isiterable(string1)
isiter2 = ut.isiterable(string2)
strs1 = string1 if isiter1 else [string1]
strs2 = string2 if isiter2 else [string2]
distmat = [
[Levenshtein.distance(str1, str2) for str2 in strs2]
for str1 in strs1
]
# broadcast
if not isiter2:
distmat = ut.take_column(distmat, 0)
if not isiter1:
distmat = distmat[0]
return distmat | [
"def",
"edit_distance",
"(",
"string1",
",",
"string2",
")",
":",
"import",
"utool",
"as",
"ut",
"try",
":",
"import",
"Levenshtein",
"except",
"ImportError",
"as",
"ex",
":",
"ut",
".",
"printex",
"(",
"ex",
",",
"'pip install python-Levenshtein'",
")",
"raise",
"#np.vectorize(Levenshtein.distance, [np.int])",
"#vec_lev = np.frompyfunc(Levenshtein.distance, 2, 1)",
"#return vec_lev(string1, string2)",
"import",
"utool",
"as",
"ut",
"isiter1",
"=",
"ut",
".",
"isiterable",
"(",
"string1",
")",
"isiter2",
"=",
"ut",
".",
"isiterable",
"(",
"string2",
")",
"strs1",
"=",
"string1",
"if",
"isiter1",
"else",
"[",
"string1",
"]",
"strs2",
"=",
"string2",
"if",
"isiter2",
"else",
"[",
"string2",
"]",
"distmat",
"=",
"[",
"[",
"Levenshtein",
".",
"distance",
"(",
"str1",
",",
"str2",
")",
"for",
"str2",
"in",
"strs2",
"]",
"for",
"str1",
"in",
"strs1",
"]",
"# broadcast",
"if",
"not",
"isiter2",
":",
"distmat",
"=",
"ut",
".",
"take_column",
"(",
"distmat",
",",
"0",
")",
"if",
"not",
"isiter1",
":",
"distmat",
"=",
"distmat",
"[",
"0",
"]",
"return",
"distmat"
] | Edit distance algorithm. String1 and string2 can be either
strings or lists of strings
pip install python-Levenshtein
Args:
string1 (str or list):
string2 (str or list):
CommandLine:
python -m utool.util_alg edit_distance --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> string1 = 'hello world'
>>> string2 = ['goodbye world', 'rofl', 'hello', 'world', 'lowo']
>>> edit_distance(['hello', 'one'], ['goodbye', 'two'])
>>> edit_distance('hello', ['goodbye', 'two'])
>>> edit_distance(['hello', 'one'], 'goodbye')
>>> edit_distance('hello', 'goodbye')
>>> distmat = edit_distance(string1, string2)
>>> result = ('distmat = %s' % (ut.repr2(distmat),))
>>> print(result)
>>> [7, 9, 6, 6, 7] | [
"Edit",
"distance",
"algorithm",
".",
"String1",
"and",
"string2",
"can",
"be",
"either",
"strings",
"or",
"lists",
"of",
"strings"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L2446-L2499 | train |
Erotemic/utool | utool/util_alg.py | standardize_boolexpr | def standardize_boolexpr(boolexpr_, parens=False):
r"""
Standardizes a boolean expression into an or-ing of and-ed variables
Args:
boolexpr_ (str):
Returns:
str: final_expr
CommandLine:
sudo pip install git+https://github.com/tpircher/quine-mccluskey.git
python -m utool.util_alg standardize_boolexpr --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> boolexpr_ = 'not force_opencv and (orient_ or is_gif)'
>>> result = standardize_boolexpr(boolexpr_, parens=True)
>>> print(result)
(orient_ and (not force_opencv)) or (is_gif and (not force_opencv))
"""
import utool as ut
import re
onlyvars = boolexpr_
onlyvars = re.sub('\\bnot\\b', '', onlyvars)
onlyvars = re.sub('\\band\\b', '', onlyvars)
onlyvars = re.sub('\\bor\\b', '', onlyvars)
onlyvars = re.sub('\\(', '', onlyvars)
onlyvars = re.sub('\\)', '', onlyvars)
varnames = ut.remove_doublspaces(onlyvars).strip().split(' ')
varied_dict = {var: [True, False] for var in varnames}
bool_states = ut.all_dict_combinations(varied_dict)
outputs = [eval(boolexpr_, state.copy(), state.copy()) for state in bool_states]
true_states = ut.compress(bool_states, outputs)
true_tuples = ut.take_column(true_states, varnames)
true_cases = [str(''.join([str(int(t)) for t in tup])) for tup in true_tuples]
# Convert to binary
ones_bin = [int(x, 2) for x in true_cases]
#ones_str = [str(x) for x in true_cases]
from quine_mccluskey.qm import QuineMcCluskey
qm = QuineMcCluskey()
result = qm.simplify(ones=ones_bin, num_bits=len(varnames))
#result = qm.simplify_los(ones=ones_str, num_bits=len(varnames))
grouped_terms = [dict(ut.group_items(varnames, rs)) for rs in result]
def parenjoin(char, list_):
if len(list_) == 0:
return ''
else:
if parens:
return '(' + char.join(list_) + ')'
else:
return char.join(list_)
if parens:
expanded_terms = [
(
term.get('1', []) +
['(not ' + b + ')' for b in term.get('0', [])] +
[
parenjoin(' ^ ', term.get('^', [])),
parenjoin(' ~ ', term.get('~', [])),
]
) for term in grouped_terms
]
else:
expanded_terms = [
(
term.get('1', []) +
['not ' + b for b in term.get('0', [])] +
[
parenjoin(' ^ ', term.get('^', [])),
parenjoin(' ~ ', term.get('~', [])),
]
) for term in grouped_terms
]
final_terms = [[t for t in term if t] for term in expanded_terms]
products = [parenjoin(' and ', [f for f in form if f]) for form in final_terms]
final_expr = ' or '.join(products)
return final_expr | python | def standardize_boolexpr(boolexpr_, parens=False):
r"""
Standardizes a boolean expression into an or-ing of and-ed variables
Args:
boolexpr_ (str):
Returns:
str: final_expr
CommandLine:
sudo pip install git+https://github.com/tpircher/quine-mccluskey.git
python -m utool.util_alg standardize_boolexpr --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> boolexpr_ = 'not force_opencv and (orient_ or is_gif)'
>>> result = standardize_boolexpr(boolexpr_, parens=True)
>>> print(result)
(orient_ and (not force_opencv)) or (is_gif and (not force_opencv))
"""
import utool as ut
import re
onlyvars = boolexpr_
onlyvars = re.sub('\\bnot\\b', '', onlyvars)
onlyvars = re.sub('\\band\\b', '', onlyvars)
onlyvars = re.sub('\\bor\\b', '', onlyvars)
onlyvars = re.sub('\\(', '', onlyvars)
onlyvars = re.sub('\\)', '', onlyvars)
varnames = ut.remove_doublspaces(onlyvars).strip().split(' ')
varied_dict = {var: [True, False] for var in varnames}
bool_states = ut.all_dict_combinations(varied_dict)
outputs = [eval(boolexpr_, state.copy(), state.copy()) for state in bool_states]
true_states = ut.compress(bool_states, outputs)
true_tuples = ut.take_column(true_states, varnames)
true_cases = [str(''.join([str(int(t)) for t in tup])) for tup in true_tuples]
# Convert to binary
ones_bin = [int(x, 2) for x in true_cases]
#ones_str = [str(x) for x in true_cases]
from quine_mccluskey.qm import QuineMcCluskey
qm = QuineMcCluskey()
result = qm.simplify(ones=ones_bin, num_bits=len(varnames))
#result = qm.simplify_los(ones=ones_str, num_bits=len(varnames))
grouped_terms = [dict(ut.group_items(varnames, rs)) for rs in result]
def parenjoin(char, list_):
if len(list_) == 0:
return ''
else:
if parens:
return '(' + char.join(list_) + ')'
else:
return char.join(list_)
if parens:
expanded_terms = [
(
term.get('1', []) +
['(not ' + b + ')' for b in term.get('0', [])] +
[
parenjoin(' ^ ', term.get('^', [])),
parenjoin(' ~ ', term.get('~', [])),
]
) for term in grouped_terms
]
else:
expanded_terms = [
(
term.get('1', []) +
['not ' + b for b in term.get('0', [])] +
[
parenjoin(' ^ ', term.get('^', [])),
parenjoin(' ~ ', term.get('~', [])),
]
) for term in grouped_terms
]
final_terms = [[t for t in term if t] for term in expanded_terms]
products = [parenjoin(' and ', [f for f in form if f]) for form in final_terms]
final_expr = ' or '.join(products)
return final_expr | [
"def",
"standardize_boolexpr",
"(",
"boolexpr_",
",",
"parens",
"=",
"False",
")",
":",
"import",
"utool",
"as",
"ut",
"import",
"re",
"onlyvars",
"=",
"boolexpr_",
"onlyvars",
"=",
"re",
".",
"sub",
"(",
"'\\\\bnot\\\\b'",
",",
"''",
",",
"onlyvars",
")",
"onlyvars",
"=",
"re",
".",
"sub",
"(",
"'\\\\band\\\\b'",
",",
"''",
",",
"onlyvars",
")",
"onlyvars",
"=",
"re",
".",
"sub",
"(",
"'\\\\bor\\\\b'",
",",
"''",
",",
"onlyvars",
")",
"onlyvars",
"=",
"re",
".",
"sub",
"(",
"'\\\\('",
",",
"''",
",",
"onlyvars",
")",
"onlyvars",
"=",
"re",
".",
"sub",
"(",
"'\\\\)'",
",",
"''",
",",
"onlyvars",
")",
"varnames",
"=",
"ut",
".",
"remove_doublspaces",
"(",
"onlyvars",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
"' '",
")",
"varied_dict",
"=",
"{",
"var",
":",
"[",
"True",
",",
"False",
"]",
"for",
"var",
"in",
"varnames",
"}",
"bool_states",
"=",
"ut",
".",
"all_dict_combinations",
"(",
"varied_dict",
")",
"outputs",
"=",
"[",
"eval",
"(",
"boolexpr_",
",",
"state",
".",
"copy",
"(",
")",
",",
"state",
".",
"copy",
"(",
")",
")",
"for",
"state",
"in",
"bool_states",
"]",
"true_states",
"=",
"ut",
".",
"compress",
"(",
"bool_states",
",",
"outputs",
")",
"true_tuples",
"=",
"ut",
".",
"take_column",
"(",
"true_states",
",",
"varnames",
")",
"true_cases",
"=",
"[",
"str",
"(",
"''",
".",
"join",
"(",
"[",
"str",
"(",
"int",
"(",
"t",
")",
")",
"for",
"t",
"in",
"tup",
"]",
")",
")",
"for",
"tup",
"in",
"true_tuples",
"]",
"# Convert to binary",
"ones_bin",
"=",
"[",
"int",
"(",
"x",
",",
"2",
")",
"for",
"x",
"in",
"true_cases",
"]",
"#ones_str = [str(x) for x in true_cases]",
"from",
"quine_mccluskey",
".",
"qm",
"import",
"QuineMcCluskey",
"qm",
"=",
"QuineMcCluskey",
"(",
")",
"result",
"=",
"qm",
".",
"simplify",
"(",
"ones",
"=",
"ones_bin",
",",
"num_bits",
"=",
"len",
"(",
"varnames",
")",
")",
"#result = qm.simplify_los(ones=ones_str, num_bits=len(varnames))",
"grouped_terms",
"=",
"[",
"dict",
"(",
"ut",
".",
"group_items",
"(",
"varnames",
",",
"rs",
")",
")",
"for",
"rs",
"in",
"result",
"]",
"def",
"parenjoin",
"(",
"char",
",",
"list_",
")",
":",
"if",
"len",
"(",
"list_",
")",
"==",
"0",
":",
"return",
"''",
"else",
":",
"if",
"parens",
":",
"return",
"'('",
"+",
"char",
".",
"join",
"(",
"list_",
")",
"+",
"')'",
"else",
":",
"return",
"char",
".",
"join",
"(",
"list_",
")",
"if",
"parens",
":",
"expanded_terms",
"=",
"[",
"(",
"term",
".",
"get",
"(",
"'1'",
",",
"[",
"]",
")",
"+",
"[",
"'(not '",
"+",
"b",
"+",
"')'",
"for",
"b",
"in",
"term",
".",
"get",
"(",
"'0'",
",",
"[",
"]",
")",
"]",
"+",
"[",
"parenjoin",
"(",
"' ^ '",
",",
"term",
".",
"get",
"(",
"'^'",
",",
"[",
"]",
")",
")",
",",
"parenjoin",
"(",
"' ~ '",
",",
"term",
".",
"get",
"(",
"'~'",
",",
"[",
"]",
")",
")",
",",
"]",
")",
"for",
"term",
"in",
"grouped_terms",
"]",
"else",
":",
"expanded_terms",
"=",
"[",
"(",
"term",
".",
"get",
"(",
"'1'",
",",
"[",
"]",
")",
"+",
"[",
"'not '",
"+",
"b",
"for",
"b",
"in",
"term",
".",
"get",
"(",
"'0'",
",",
"[",
"]",
")",
"]",
"+",
"[",
"parenjoin",
"(",
"' ^ '",
",",
"term",
".",
"get",
"(",
"'^'",
",",
"[",
"]",
")",
")",
",",
"parenjoin",
"(",
"' ~ '",
",",
"term",
".",
"get",
"(",
"'~'",
",",
"[",
"]",
")",
")",
",",
"]",
")",
"for",
"term",
"in",
"grouped_terms",
"]",
"final_terms",
"=",
"[",
"[",
"t",
"for",
"t",
"in",
"term",
"if",
"t",
"]",
"for",
"term",
"in",
"expanded_terms",
"]",
"products",
"=",
"[",
"parenjoin",
"(",
"' and '",
",",
"[",
"f",
"for",
"f",
"in",
"form",
"if",
"f",
"]",
")",
"for",
"form",
"in",
"final_terms",
"]",
"final_expr",
"=",
"' or '",
".",
"join",
"(",
"products",
")",
"return",
"final_expr"
] | r"""
Standardizes a boolean expression into an or-ing of and-ed variables
Args:
boolexpr_ (str):
Returns:
str: final_expr
CommandLine:
sudo pip install git+https://github.com/tpircher/quine-mccluskey.git
python -m utool.util_alg standardize_boolexpr --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> boolexpr_ = 'not force_opencv and (orient_ or is_gif)'
>>> result = standardize_boolexpr(boolexpr_, parens=True)
>>> print(result)
(orient_ and (not force_opencv)) or (is_gif and (not force_opencv)) | [
"r",
"Standardizes",
"a",
"boolean",
"expression",
"into",
"an",
"or",
"-",
"ing",
"of",
"and",
"-",
"ed",
"variables"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L2545-L2629 | train |
Erotemic/utool | utool/util_alg.py | expensive_task_gen | def expensive_task_gen(num=8700):
r"""
Runs a task that takes some time
Args:
num (int): (default = 8700)
CommandLine:
python -m utool.util_alg expensive_task_gen --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> #num = 8700
>>> num = 40000
>>> with ut.Timer('expanesive task'):
>>> time_list = list(ut.expensive_task_gen(num))
>>> print(sum(time_list))
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> #pt.plot(time_list)
>>> from scipy.optimize import curve_fit
>>> def func(x, a, b, c, d):
>>> return a * np.exp(-c * x) + d
>>> #a*x**3 + b*x**2 +c*x + d
>>> y = np.array(time_list)
>>> y = np.array(ut.cumsum(y))
>>> x = np.arange(len(y))
>>> #popt, pcov = curve_fit(func, x, y, p0=(1, 1e-6, 1))
>>> #print('pcov = %r' % (pcov,))
>>> #print('popt = %r' % (popt,))
>>> # http://stackoverflow.com/questions/3433486/-curve-fitting-in-python
>>> pt.plt.plot(x[::num//50], y[::num//50], 'rx', label='measured data')
>>> #x2 = np.arange(len(y) * 2)
>>> #pt.plt.plot(x2, func(x2, *popt), 'b', label="Fitted Curve") #same as line above \/
>>> #pt.plt.legend(loc='upper left')
>>> ut.show_if_requested()
"""
import utool as ut
#time_list = []
for x in range(0, num):
with ut.Timer(verbose=False) as t:
ut.is_prime(x)
yield t.ellapsed | python | def expensive_task_gen(num=8700):
r"""
Runs a task that takes some time
Args:
num (int): (default = 8700)
CommandLine:
python -m utool.util_alg expensive_task_gen --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> #num = 8700
>>> num = 40000
>>> with ut.Timer('expanesive task'):
>>> time_list = list(ut.expensive_task_gen(num))
>>> print(sum(time_list))
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> #pt.plot(time_list)
>>> from scipy.optimize import curve_fit
>>> def func(x, a, b, c, d):
>>> return a * np.exp(-c * x) + d
>>> #a*x**3 + b*x**2 +c*x + d
>>> y = np.array(time_list)
>>> y = np.array(ut.cumsum(y))
>>> x = np.arange(len(y))
>>> #popt, pcov = curve_fit(func, x, y, p0=(1, 1e-6, 1))
>>> #print('pcov = %r' % (pcov,))
>>> #print('popt = %r' % (popt,))
>>> # http://stackoverflow.com/questions/3433486/-curve-fitting-in-python
>>> pt.plt.plot(x[::num//50], y[::num//50], 'rx', label='measured data')
>>> #x2 = np.arange(len(y) * 2)
>>> #pt.plt.plot(x2, func(x2, *popt), 'b', label="Fitted Curve") #same as line above \/
>>> #pt.plt.legend(loc='upper left')
>>> ut.show_if_requested()
"""
import utool as ut
#time_list = []
for x in range(0, num):
with ut.Timer(verbose=False) as t:
ut.is_prime(x)
yield t.ellapsed | [
"def",
"expensive_task_gen",
"(",
"num",
"=",
"8700",
")",
":",
"import",
"utool",
"as",
"ut",
"#time_list = []",
"for",
"x",
"in",
"range",
"(",
"0",
",",
"num",
")",
":",
"with",
"ut",
".",
"Timer",
"(",
"verbose",
"=",
"False",
")",
"as",
"t",
":",
"ut",
".",
"is_prime",
"(",
"x",
")",
"yield",
"t",
".",
"ellapsed"
] | r"""
Runs a task that takes some time
Args:
num (int): (default = 8700)
CommandLine:
python -m utool.util_alg expensive_task_gen --show
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> #num = 8700
>>> num = 40000
>>> with ut.Timer('expanesive task'):
>>> time_list = list(ut.expensive_task_gen(num))
>>> print(sum(time_list))
>>> ut.quit_if_noshow()
>>> import plottool as pt
>>> #pt.plot(time_list)
>>> from scipy.optimize import curve_fit
>>> def func(x, a, b, c, d):
>>> return a * np.exp(-c * x) + d
>>> #a*x**3 + b*x**2 +c*x + d
>>> y = np.array(time_list)
>>> y = np.array(ut.cumsum(y))
>>> x = np.arange(len(y))
>>> #popt, pcov = curve_fit(func, x, y, p0=(1, 1e-6, 1))
>>> #print('pcov = %r' % (pcov,))
>>> #print('popt = %r' % (popt,))
>>> # http://stackoverflow.com/questions/3433486/-curve-fitting-in-python
>>> pt.plt.plot(x[::num//50], y[::num//50], 'rx', label='measured data')
>>> #x2 = np.arange(len(y) * 2)
>>> #pt.plt.plot(x2, func(x2, *popt), 'b', label="Fitted Curve") #same as line above \/
>>> #pt.plt.legend(loc='upper left')
>>> ut.show_if_requested() | [
"r",
"Runs",
"a",
"task",
"that",
"takes",
"some",
"time"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L2751-L2795 | train |
Erotemic/utool | utool/util_alg.py | factors | def factors(n):
"""
Computes all the integer factors of the number `n`
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> result = sorted(ut.factors(10))
>>> print(result)
[1, 2, 5, 10]
References:
http://stackoverflow.com/questions/6800193/finding-all-the-factors
"""
return set(reduce(list.__add__,
([i, n // i] for i in range(1, int(n ** 0.5) + 1) if n % i == 0))) | python | def factors(n):
"""
Computes all the integer factors of the number `n`
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> result = sorted(ut.factors(10))
>>> print(result)
[1, 2, 5, 10]
References:
http://stackoverflow.com/questions/6800193/finding-all-the-factors
"""
return set(reduce(list.__add__,
([i, n // i] for i in range(1, int(n ** 0.5) + 1) if n % i == 0))) | [
"def",
"factors",
"(",
"n",
")",
":",
"return",
"set",
"(",
"reduce",
"(",
"list",
".",
"__add__",
",",
"(",
"[",
"i",
",",
"n",
"//",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"int",
"(",
"n",
"**",
"0.5",
")",
"+",
"1",
")",
"if",
"n",
"%",
"i",
"==",
"0",
")",
")",
")"
] | Computes all the integer factors of the number `n`
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_alg import * # NOQA
>>> import utool as ut
>>> result = sorted(ut.factors(10))
>>> print(result)
[1, 2, 5, 10]
References:
http://stackoverflow.com/questions/6800193/finding-all-the-factors | [
"Computes",
"all",
"the",
"integer",
"factors",
"of",
"the",
"number",
"n"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_alg.py#L2801-L2817 | train |
glormph/msstitch | src/app/actions/prottable/info.py | add_protein_data | def add_protein_data(proteins, pgdb, headerfields, genecentric=False,
pool_to_output=False):
"""First creates a map with all master proteins with data,
then outputs protein data dicts for rows of a tsv. If a pool
is given then only output for that pool will be shown in the
protein table."""
proteindata = create_featuredata_map(pgdb, genecentric=genecentric,
psm_fill_fun=add_psms_to_proteindata,
pgene_fill_fun=add_protgene_to_protdata,
count_fun=count_peps_psms,
pool_to_output=pool_to_output,
get_uniques=True)
dataget_fun = {True: get_protein_data_genecentric,
False: get_protein_data_pgrouped}[genecentric is not False]
firstfield = prottabledata.ACCESSIONS[genecentric]
for protein in proteins:
outprotein = {k: v for k, v in protein.items()}
outprotein[firstfield] = outprotein.pop(prottabledata.HEADER_PROTEIN)
protein_acc = protein[prottabledata.HEADER_PROTEIN]
outprotein.update(dataget_fun(proteindata, protein_acc, headerfields))
outprotein = {k: str(v) for k, v in outprotein.items()}
yield outprotein | python | def add_protein_data(proteins, pgdb, headerfields, genecentric=False,
pool_to_output=False):
"""First creates a map with all master proteins with data,
then outputs protein data dicts for rows of a tsv. If a pool
is given then only output for that pool will be shown in the
protein table."""
proteindata = create_featuredata_map(pgdb, genecentric=genecentric,
psm_fill_fun=add_psms_to_proteindata,
pgene_fill_fun=add_protgene_to_protdata,
count_fun=count_peps_psms,
pool_to_output=pool_to_output,
get_uniques=True)
dataget_fun = {True: get_protein_data_genecentric,
False: get_protein_data_pgrouped}[genecentric is not False]
firstfield = prottabledata.ACCESSIONS[genecentric]
for protein in proteins:
outprotein = {k: v for k, v in protein.items()}
outprotein[firstfield] = outprotein.pop(prottabledata.HEADER_PROTEIN)
protein_acc = protein[prottabledata.HEADER_PROTEIN]
outprotein.update(dataget_fun(proteindata, protein_acc, headerfields))
outprotein = {k: str(v) for k, v in outprotein.items()}
yield outprotein | [
"def",
"add_protein_data",
"(",
"proteins",
",",
"pgdb",
",",
"headerfields",
",",
"genecentric",
"=",
"False",
",",
"pool_to_output",
"=",
"False",
")",
":",
"proteindata",
"=",
"create_featuredata_map",
"(",
"pgdb",
",",
"genecentric",
"=",
"genecentric",
",",
"psm_fill_fun",
"=",
"add_psms_to_proteindata",
",",
"pgene_fill_fun",
"=",
"add_protgene_to_protdata",
",",
"count_fun",
"=",
"count_peps_psms",
",",
"pool_to_output",
"=",
"pool_to_output",
",",
"get_uniques",
"=",
"True",
")",
"dataget_fun",
"=",
"{",
"True",
":",
"get_protein_data_genecentric",
",",
"False",
":",
"get_protein_data_pgrouped",
"}",
"[",
"genecentric",
"is",
"not",
"False",
"]",
"firstfield",
"=",
"prottabledata",
".",
"ACCESSIONS",
"[",
"genecentric",
"]",
"for",
"protein",
"in",
"proteins",
":",
"outprotein",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"protein",
".",
"items",
"(",
")",
"}",
"outprotein",
"[",
"firstfield",
"]",
"=",
"outprotein",
".",
"pop",
"(",
"prottabledata",
".",
"HEADER_PROTEIN",
")",
"protein_acc",
"=",
"protein",
"[",
"prottabledata",
".",
"HEADER_PROTEIN",
"]",
"outprotein",
".",
"update",
"(",
"dataget_fun",
"(",
"proteindata",
",",
"protein_acc",
",",
"headerfields",
")",
")",
"outprotein",
"=",
"{",
"k",
":",
"str",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"outprotein",
".",
"items",
"(",
")",
"}",
"yield",
"outprotein"
] | First creates a map with all master proteins with data,
then outputs protein data dicts for rows of a tsv. If a pool
is given then only output for that pool will be shown in the
protein table. | [
"First",
"creates",
"a",
"map",
"with",
"all",
"master",
"proteins",
"with",
"data",
"then",
"outputs",
"protein",
"data",
"dicts",
"for",
"rows",
"of",
"a",
"tsv",
".",
"If",
"a",
"pool",
"is",
"given",
"then",
"only",
"output",
"for",
"that",
"pool",
"will",
"be",
"shown",
"in",
"the",
"protein",
"table",
"."
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/prottable/info.py#L13-L34 | train |
glormph/msstitch | src/app/actions/prottable/info.py | get_protein_data_pgrouped | def get_protein_data_pgrouped(proteindata, p_acc, headerfields):
"""Parses protein data for a certain protein into tsv output
dictionary"""
report = get_protein_data_base(proteindata, p_acc, headerfields)
return get_cov_protnumbers(proteindata, p_acc, report) | python | def get_protein_data_pgrouped(proteindata, p_acc, headerfields):
"""Parses protein data for a certain protein into tsv output
dictionary"""
report = get_protein_data_base(proteindata, p_acc, headerfields)
return get_cov_protnumbers(proteindata, p_acc, report) | [
"def",
"get_protein_data_pgrouped",
"(",
"proteindata",
",",
"p_acc",
",",
"headerfields",
")",
":",
"report",
"=",
"get_protein_data_base",
"(",
"proteindata",
",",
"p_acc",
",",
"headerfields",
")",
"return",
"get_cov_protnumbers",
"(",
"proteindata",
",",
"p_acc",
",",
"report",
")"
] | Parses protein data for a certain protein into tsv output
dictionary | [
"Parses",
"protein",
"data",
"for",
"a",
"certain",
"protein",
"into",
"tsv",
"output",
"dictionary"
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/prottable/info.py#L41-L45 | train |
chriso/gauged | gauged/drivers/postgresql.py | PostgreSQLDriver.keys | def keys(self, namespace, prefix=None, limit=None, offset=None):
"""Get keys from a namespace"""
params = [namespace]
query = 'SELECT key FROM gauged_keys WHERE namespace = %s'
if prefix is not None:
query += ' AND key LIKE %s'
params.append(prefix + '%')
if limit is not None:
query += ' LIMIT %s'
params.append(limit)
if offset is not None:
query += ' OFFSET %s'
params.append(offset)
cursor = self.cursor
cursor.execute(query, params)
return [key for key, in cursor] | python | def keys(self, namespace, prefix=None, limit=None, offset=None):
"""Get keys from a namespace"""
params = [namespace]
query = 'SELECT key FROM gauged_keys WHERE namespace = %s'
if prefix is not None:
query += ' AND key LIKE %s'
params.append(prefix + '%')
if limit is not None:
query += ' LIMIT %s'
params.append(limit)
if offset is not None:
query += ' OFFSET %s'
params.append(offset)
cursor = self.cursor
cursor.execute(query, params)
return [key for key, in cursor] | [
"def",
"keys",
"(",
"self",
",",
"namespace",
",",
"prefix",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"offset",
"=",
"None",
")",
":",
"params",
"=",
"[",
"namespace",
"]",
"query",
"=",
"'SELECT key FROM gauged_keys WHERE namespace = %s'",
"if",
"prefix",
"is",
"not",
"None",
":",
"query",
"+=",
"' AND key LIKE %s'",
"params",
".",
"append",
"(",
"prefix",
"+",
"'%'",
")",
"if",
"limit",
"is",
"not",
"None",
":",
"query",
"+=",
"' LIMIT %s'",
"params",
".",
"append",
"(",
"limit",
")",
"if",
"offset",
"is",
"not",
"None",
":",
"query",
"+=",
"' OFFSET %s'",
"params",
".",
"append",
"(",
"offset",
")",
"cursor",
"=",
"self",
".",
"cursor",
"cursor",
".",
"execute",
"(",
"query",
",",
"params",
")",
"return",
"[",
"key",
"for",
"key",
",",
"in",
"cursor",
"]"
] | Get keys from a namespace | [
"Get",
"keys",
"from",
"a",
"namespace"
] | cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976 | https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/postgresql.py#L25-L40 | train |
chriso/gauged | gauged/drivers/postgresql.py | PostgreSQLDriver.get_block | def get_block(self, namespace, offset, key):
"""Get the block identified by namespace, offset, key and
value"""
cursor = self.cursor
cursor.execute('SELECT data, flags FROM gauged_data '
'WHERE namespace = %s AND "offset" = %s AND key = %s',
(namespace, offset, key))
row = cursor.fetchone()
return (None, None) if row is None else row | python | def get_block(self, namespace, offset, key):
"""Get the block identified by namespace, offset, key and
value"""
cursor = self.cursor
cursor.execute('SELECT data, flags FROM gauged_data '
'WHERE namespace = %s AND "offset" = %s AND key = %s',
(namespace, offset, key))
row = cursor.fetchone()
return (None, None) if row is None else row | [
"def",
"get_block",
"(",
"self",
",",
"namespace",
",",
"offset",
",",
"key",
")",
":",
"cursor",
"=",
"self",
".",
"cursor",
"cursor",
".",
"execute",
"(",
"'SELECT data, flags FROM gauged_data '",
"'WHERE namespace = %s AND \"offset\" = %s AND key = %s'",
",",
"(",
"namespace",
",",
"offset",
",",
"key",
")",
")",
"row",
"=",
"cursor",
".",
"fetchone",
"(",
")",
"return",
"(",
"None",
",",
"None",
")",
"if",
"row",
"is",
"None",
"else",
"row"
] | Get the block identified by namespace, offset, key and
value | [
"Get",
"the",
"block",
"identified",
"by",
"namespace",
"offset",
"key",
"and",
"value"
] | cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976 | https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/postgresql.py#L63-L71 | train |
chriso/gauged | gauged/drivers/postgresql.py | PostgreSQLDriver.block_offset_bounds | def block_offset_bounds(self, namespace):
"""Get the minimum and maximum block offset for the specified
namespace"""
cursor = self.cursor
cursor.execute('SELECT MIN("offset"), MAX("offset") '
'FROM gauged_statistics WHERE namespace = %s',
(namespace,))
return cursor.fetchone() | python | def block_offset_bounds(self, namespace):
"""Get the minimum and maximum block offset for the specified
namespace"""
cursor = self.cursor
cursor.execute('SELECT MIN("offset"), MAX("offset") '
'FROM gauged_statistics WHERE namespace = %s',
(namespace,))
return cursor.fetchone() | [
"def",
"block_offset_bounds",
"(",
"self",
",",
"namespace",
")",
":",
"cursor",
"=",
"self",
".",
"cursor",
"cursor",
".",
"execute",
"(",
"'SELECT MIN(\"offset\"), MAX(\"offset\") '",
"'FROM gauged_statistics WHERE namespace = %s'",
",",
"(",
"namespace",
",",
")",
")",
"return",
"cursor",
".",
"fetchone",
"(",
")"
] | Get the minimum and maximum block offset for the specified
namespace | [
"Get",
"the",
"minimum",
"and",
"maximum",
"block",
"offset",
"for",
"the",
"specified",
"namespace"
] | cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976 | https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/postgresql.py#L128-L135 | train |
chriso/gauged | gauged/drivers/postgresql.py | PostgreSQLDriver.set_writer_position | def set_writer_position(self, name, timestamp):
"""Insert a timestamp to keep track of the current writer position"""
execute = self.cursor.execute
execute('DELETE FROM gauged_writer_history WHERE id = %s', (name,))
execute('INSERT INTO gauged_writer_history (id, timestamp) '
'VALUES (%s, %s)', (name, timestamp,)) | python | def set_writer_position(self, name, timestamp):
"""Insert a timestamp to keep track of the current writer position"""
execute = self.cursor.execute
execute('DELETE FROM gauged_writer_history WHERE id = %s', (name,))
execute('INSERT INTO gauged_writer_history (id, timestamp) '
'VALUES (%s, %s)', (name, timestamp,)) | [
"def",
"set_writer_position",
"(",
"self",
",",
"name",
",",
"timestamp",
")",
":",
"execute",
"=",
"self",
".",
"cursor",
".",
"execute",
"execute",
"(",
"'DELETE FROM gauged_writer_history WHERE id = %s'",
",",
"(",
"name",
",",
")",
")",
"execute",
"(",
"'INSERT INTO gauged_writer_history (id, timestamp) '",
"'VALUES (%s, %s)'",
",",
"(",
"name",
",",
"timestamp",
",",
")",
")"
] | Insert a timestamp to keep track of the current writer position | [
"Insert",
"a",
"timestamp",
"to",
"keep",
"track",
"of",
"the",
"current",
"writer",
"position"
] | cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976 | https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/postgresql.py#L161-L166 | train |
chriso/gauged | gauged/drivers/postgresql.py | PostgreSQLDriver.add_cache | def add_cache(self, namespace, key, query_hash, length, cache):
"""Add cached values for the specified date range and query"""
start = 0
bulk_insert = self.bulk_insert
cache_len = len(cache)
row = '(%s,%s,%s,%s,%s,%s)'
query = 'INSERT INTO gauged_cache ' \
'(namespace, key, "hash", length, start, value) VALUES '
execute = self.cursor.execute
query_hash = self.psycopg2.Binary(query_hash)
while start < cache_len:
rows = cache[start:start+bulk_insert]
params = []
for timestamp, value in rows:
params.extend((namespace, key, query_hash, length,
timestamp, value))
insert = (row + ',') * (len(rows) - 1) + row
execute(query + insert, params)
start += bulk_insert
self.db.commit() | python | def add_cache(self, namespace, key, query_hash, length, cache):
"""Add cached values for the specified date range and query"""
start = 0
bulk_insert = self.bulk_insert
cache_len = len(cache)
row = '(%s,%s,%s,%s,%s,%s)'
query = 'INSERT INTO gauged_cache ' \
'(namespace, key, "hash", length, start, value) VALUES '
execute = self.cursor.execute
query_hash = self.psycopg2.Binary(query_hash)
while start < cache_len:
rows = cache[start:start+bulk_insert]
params = []
for timestamp, value in rows:
params.extend((namespace, key, query_hash, length,
timestamp, value))
insert = (row + ',') * (len(rows) - 1) + row
execute(query + insert, params)
start += bulk_insert
self.db.commit() | [
"def",
"add_cache",
"(",
"self",
",",
"namespace",
",",
"key",
",",
"query_hash",
",",
"length",
",",
"cache",
")",
":",
"start",
"=",
"0",
"bulk_insert",
"=",
"self",
".",
"bulk_insert",
"cache_len",
"=",
"len",
"(",
"cache",
")",
"row",
"=",
"'(%s,%s,%s,%s,%s,%s)'",
"query",
"=",
"'INSERT INTO gauged_cache '",
"'(namespace, key, \"hash\", length, start, value) VALUES '",
"execute",
"=",
"self",
".",
"cursor",
".",
"execute",
"query_hash",
"=",
"self",
".",
"psycopg2",
".",
"Binary",
"(",
"query_hash",
")",
"while",
"start",
"<",
"cache_len",
":",
"rows",
"=",
"cache",
"[",
"start",
":",
"start",
"+",
"bulk_insert",
"]",
"params",
"=",
"[",
"]",
"for",
"timestamp",
",",
"value",
"in",
"rows",
":",
"params",
".",
"extend",
"(",
"(",
"namespace",
",",
"key",
",",
"query_hash",
",",
"length",
",",
"timestamp",
",",
"value",
")",
")",
"insert",
"=",
"(",
"row",
"+",
"','",
")",
"*",
"(",
"len",
"(",
"rows",
")",
"-",
"1",
")",
"+",
"row",
"execute",
"(",
"query",
"+",
"insert",
",",
"params",
")",
"start",
"+=",
"bulk_insert",
"self",
".",
"db",
".",
"commit",
"(",
")"
] | Add cached values for the specified date range and query | [
"Add",
"cached",
"values",
"for",
"the",
"specified",
"date",
"range",
"and",
"query"
] | cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976 | https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/postgresql.py#L249-L268 | train |
crdoconnor/faketime | faketime/__init__.py | get_environment_vars | def get_environment_vars(filename):
"""Return a dict of environment variables required to run a service under faketime."""
if sys.platform == "linux" or sys.platform == "linux2":
return {
'LD_PRELOAD': path.join(LIBFAKETIME_DIR, "libfaketime.so.1"),
'FAKETIME_SKIP_CMDS': 'nodejs', # node doesn't seem to work in the current version.
'FAKETIME_TIMESTAMP_FILE': filename,
}
elif sys.platform == "darwin":
return {
'DYLD_INSERT_LIBRARIES': path.join(LIBFAKETIME_DIR, "libfaketime.1.dylib"),
'DYLD_FORCE_FLAT_NAMESPACE': '1',
'FAKETIME_TIMESTAMP_FILE': filename,
}
else:
raise RuntimeError("libfaketime does not support '{}' platform".format(sys.platform)) | python | def get_environment_vars(filename):
"""Return a dict of environment variables required to run a service under faketime."""
if sys.platform == "linux" or sys.platform == "linux2":
return {
'LD_PRELOAD': path.join(LIBFAKETIME_DIR, "libfaketime.so.1"),
'FAKETIME_SKIP_CMDS': 'nodejs', # node doesn't seem to work in the current version.
'FAKETIME_TIMESTAMP_FILE': filename,
}
elif sys.platform == "darwin":
return {
'DYLD_INSERT_LIBRARIES': path.join(LIBFAKETIME_DIR, "libfaketime.1.dylib"),
'DYLD_FORCE_FLAT_NAMESPACE': '1',
'FAKETIME_TIMESTAMP_FILE': filename,
}
else:
raise RuntimeError("libfaketime does not support '{}' platform".format(sys.platform)) | [
"def",
"get_environment_vars",
"(",
"filename",
")",
":",
"if",
"sys",
".",
"platform",
"==",
"\"linux\"",
"or",
"sys",
".",
"platform",
"==",
"\"linux2\"",
":",
"return",
"{",
"'LD_PRELOAD'",
":",
"path",
".",
"join",
"(",
"LIBFAKETIME_DIR",
",",
"\"libfaketime.so.1\"",
")",
",",
"'FAKETIME_SKIP_CMDS'",
":",
"'nodejs'",
",",
"# node doesn't seem to work in the current version.",
"'FAKETIME_TIMESTAMP_FILE'",
":",
"filename",
",",
"}",
"elif",
"sys",
".",
"platform",
"==",
"\"darwin\"",
":",
"return",
"{",
"'DYLD_INSERT_LIBRARIES'",
":",
"path",
".",
"join",
"(",
"LIBFAKETIME_DIR",
",",
"\"libfaketime.1.dylib\"",
")",
",",
"'DYLD_FORCE_FLAT_NAMESPACE'",
":",
"'1'",
",",
"'FAKETIME_TIMESTAMP_FILE'",
":",
"filename",
",",
"}",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"libfaketime does not support '{}' platform\"",
".",
"format",
"(",
"sys",
".",
"platform",
")",
")"
] | Return a dict of environment variables required to run a service under faketime. | [
"Return",
"a",
"dict",
"of",
"environment",
"variables",
"required",
"to",
"run",
"a",
"service",
"under",
"faketime",
"."
] | 6e81ca070c0e601a52507b945ed45d5d42576b21 | https://github.com/crdoconnor/faketime/blob/6e81ca070c0e601a52507b945ed45d5d42576b21/faketime/__init__.py#L9-L24 | train |
crdoconnor/faketime | faketime/__init__.py | change_time | def change_time(filename, newtime):
"""Change the time of a process or group of processes by writing a new time to the time file."""
with open(filename, "w") as faketimetxt_handle:
faketimetxt_handle.write("@" + newtime.strftime("%Y-%m-%d %H:%M:%S")) | python | def change_time(filename, newtime):
"""Change the time of a process or group of processes by writing a new time to the time file."""
with open(filename, "w") as faketimetxt_handle:
faketimetxt_handle.write("@" + newtime.strftime("%Y-%m-%d %H:%M:%S")) | [
"def",
"change_time",
"(",
"filename",
",",
"newtime",
")",
":",
"with",
"open",
"(",
"filename",
",",
"\"w\"",
")",
"as",
"faketimetxt_handle",
":",
"faketimetxt_handle",
".",
"write",
"(",
"\"@\"",
"+",
"newtime",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
")"
] | Change the time of a process or group of processes by writing a new time to the time file. | [
"Change",
"the",
"time",
"of",
"a",
"process",
"or",
"group",
"of",
"processes",
"by",
"writing",
"a",
"new",
"time",
"to",
"the",
"time",
"file",
"."
] | 6e81ca070c0e601a52507b945ed45d5d42576b21 | https://github.com/crdoconnor/faketime/blob/6e81ca070c0e601a52507b945ed45d5d42576b21/faketime/__init__.py#L27-L30 | train |
glormph/msstitch | src/app/actions/pycolator/filters.py | filter_unique_peptides | def filter_unique_peptides(peptides, score, ns):
""" Filters unique peptides from multiple Percolator output XML files.
Takes a dir with a set of XMLs, a score to filter on and a namespace.
Outputs an ElementTree.
"""
scores = {'q': 'q_value',
'pep': 'pep',
'p': 'p_value',
'svm': 'svm_score'}
highest = {}
for el in peptides:
featscore = float(el.xpath('xmlns:%s' % scores[score],
namespaces=ns)[0].text)
seq = reader.get_peptide_seq(el, ns)
if seq not in highest:
highest[seq] = {
'pep_el': formatting.stringify_strip_namespace_declaration(
el, ns), 'score': featscore}
if score == 'svm': # greater than score is accepted
if featscore > highest[seq]['score']:
highest[seq] = {
'pep_el':
formatting.stringify_strip_namespace_declaration(el, ns),
'score': featscore}
else: # lower than score is accepted
if featscore < highest[seq]['score']:
highest[seq] = {
'pep_el':
formatting.stringify_strip_namespace_declaration(el, ns),
'score': featscore}
formatting.clear_el(el)
for pep in list(highest.values()):
yield pep['pep_el'] | python | def filter_unique_peptides(peptides, score, ns):
""" Filters unique peptides from multiple Percolator output XML files.
Takes a dir with a set of XMLs, a score to filter on and a namespace.
Outputs an ElementTree.
"""
scores = {'q': 'q_value',
'pep': 'pep',
'p': 'p_value',
'svm': 'svm_score'}
highest = {}
for el in peptides:
featscore = float(el.xpath('xmlns:%s' % scores[score],
namespaces=ns)[0].text)
seq = reader.get_peptide_seq(el, ns)
if seq not in highest:
highest[seq] = {
'pep_el': formatting.stringify_strip_namespace_declaration(
el, ns), 'score': featscore}
if score == 'svm': # greater than score is accepted
if featscore > highest[seq]['score']:
highest[seq] = {
'pep_el':
formatting.stringify_strip_namespace_declaration(el, ns),
'score': featscore}
else: # lower than score is accepted
if featscore < highest[seq]['score']:
highest[seq] = {
'pep_el':
formatting.stringify_strip_namespace_declaration(el, ns),
'score': featscore}
formatting.clear_el(el)
for pep in list(highest.values()):
yield pep['pep_el'] | [
"def",
"filter_unique_peptides",
"(",
"peptides",
",",
"score",
",",
"ns",
")",
":",
"scores",
"=",
"{",
"'q'",
":",
"'q_value'",
",",
"'pep'",
":",
"'pep'",
",",
"'p'",
":",
"'p_value'",
",",
"'svm'",
":",
"'svm_score'",
"}",
"highest",
"=",
"{",
"}",
"for",
"el",
"in",
"peptides",
":",
"featscore",
"=",
"float",
"(",
"el",
".",
"xpath",
"(",
"'xmlns:%s'",
"%",
"scores",
"[",
"score",
"]",
",",
"namespaces",
"=",
"ns",
")",
"[",
"0",
"]",
".",
"text",
")",
"seq",
"=",
"reader",
".",
"get_peptide_seq",
"(",
"el",
",",
"ns",
")",
"if",
"seq",
"not",
"in",
"highest",
":",
"highest",
"[",
"seq",
"]",
"=",
"{",
"'pep_el'",
":",
"formatting",
".",
"stringify_strip_namespace_declaration",
"(",
"el",
",",
"ns",
")",
",",
"'score'",
":",
"featscore",
"}",
"if",
"score",
"==",
"'svm'",
":",
"# greater than score is accepted",
"if",
"featscore",
">",
"highest",
"[",
"seq",
"]",
"[",
"'score'",
"]",
":",
"highest",
"[",
"seq",
"]",
"=",
"{",
"'pep_el'",
":",
"formatting",
".",
"stringify_strip_namespace_declaration",
"(",
"el",
",",
"ns",
")",
",",
"'score'",
":",
"featscore",
"}",
"else",
":",
"# lower than score is accepted",
"if",
"featscore",
"<",
"highest",
"[",
"seq",
"]",
"[",
"'score'",
"]",
":",
"highest",
"[",
"seq",
"]",
"=",
"{",
"'pep_el'",
":",
"formatting",
".",
"stringify_strip_namespace_declaration",
"(",
"el",
",",
"ns",
")",
",",
"'score'",
":",
"featscore",
"}",
"formatting",
".",
"clear_el",
"(",
"el",
")",
"for",
"pep",
"in",
"list",
"(",
"highest",
".",
"values",
"(",
")",
")",
":",
"yield",
"pep",
"[",
"'pep_el'",
"]"
] | Filters unique peptides from multiple Percolator output XML files.
Takes a dir with a set of XMLs, a score to filter on and a namespace.
Outputs an ElementTree. | [
"Filters",
"unique",
"peptides",
"from",
"multiple",
"Percolator",
"output",
"XML",
"files",
".",
"Takes",
"a",
"dir",
"with",
"a",
"set",
"of",
"XMLs",
"a",
"score",
"to",
"filter",
"on",
"and",
"a",
"namespace",
".",
"Outputs",
"an",
"ElementTree",
"."
] | ded7e5cbd813d7797dc9d42805778266e59ff042 | https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/pycolator/filters.py#L102-L136 | train |
timedata-org/loady | loady/importer.py | import_symbol | def import_symbol(name=None, path=None, typename=None, base_path=None):
"""
Import a module, or a typename within a module from its name.
Arguments:
name: An absolute or relative (starts with a .) Python path
path: If name is relative, path is prepended to it.
base_path: (DEPRECATED) Same as path
typename: (DEPRECATED) Same as path
"""
_, symbol = _import(name or typename, path or base_path)
return symbol | python | def import_symbol(name=None, path=None, typename=None, base_path=None):
"""
Import a module, or a typename within a module from its name.
Arguments:
name: An absolute or relative (starts with a .) Python path
path: If name is relative, path is prepended to it.
base_path: (DEPRECATED) Same as path
typename: (DEPRECATED) Same as path
"""
_, symbol = _import(name or typename, path or base_path)
return symbol | [
"def",
"import_symbol",
"(",
"name",
"=",
"None",
",",
"path",
"=",
"None",
",",
"typename",
"=",
"None",
",",
"base_path",
"=",
"None",
")",
":",
"_",
",",
"symbol",
"=",
"_import",
"(",
"name",
"or",
"typename",
",",
"path",
"or",
"base_path",
")",
"return",
"symbol"
] | Import a module, or a typename within a module from its name.
Arguments:
name: An absolute or relative (starts with a .) Python path
path: If name is relative, path is prepended to it.
base_path: (DEPRECATED) Same as path
typename: (DEPRECATED) Same as path | [
"Import",
"a",
"module",
"or",
"a",
"typename",
"within",
"a",
"module",
"from",
"its",
"name",
"."
] | 94ffcdb92f15a28f3c85f77bd293a9cb59de4cad | https://github.com/timedata-org/loady/blob/94ffcdb92f15a28f3c85f77bd293a9cb59de4cad/loady/importer.py#L35-L47 | train |
Erotemic/utool | utool/util_win32.py | add_to_win32_PATH | def add_to_win32_PATH(script_fpath, *add_path_list):
r"""
Writes a registery script to update the PATH variable into the sync registry
CommandLine:
python -m utool.util_win32 --test-add_to_win32_PATH --newpath "C:\Program Files (x86)\Graphviz2.38\bin"
Example:
>>> # DISABLE_DOCTEST
>>> # SCRIPT
>>> from utool.util_win32 import * # NOQA
>>> script_fpath = join(ut.truepath('~'), 'Sync/win7/registry', 'UPDATE_PATH.reg')
>>> new_path = ut.get_argval('--newpath', str, default=None)
>>> result = add_to_win32_PATH(script_fpath, new_path)
>>> print(result)
"""
import utool as ut
write_dir = dirname(script_fpath)
key = '[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment]'
rtype = 'REG_EXPAND_SZ'
# Read current PATH values
win_pathlist = list(os.environ['PATH'].split(os.path.pathsep))
new_path_list = ut.unique_ordered(win_pathlist + list(add_path_list))
#new_path_list = unique_ordered(win_pathlist, rob_pathlist)
print('\n'.join(new_path_list))
pathtxt = pathsep.join(new_path_list)
varval_list = [('Path', pathtxt)]
regfile_str = make_regfile_str(key, varval_list, rtype)
ut.view_directory(write_dir)
print(regfile_str)
ut.writeto(script_fpath, regfile_str, mode='wb')
print('Please have an admin run the script. You may need to restart') | python | def add_to_win32_PATH(script_fpath, *add_path_list):
r"""
Writes a registery script to update the PATH variable into the sync registry
CommandLine:
python -m utool.util_win32 --test-add_to_win32_PATH --newpath "C:\Program Files (x86)\Graphviz2.38\bin"
Example:
>>> # DISABLE_DOCTEST
>>> # SCRIPT
>>> from utool.util_win32 import * # NOQA
>>> script_fpath = join(ut.truepath('~'), 'Sync/win7/registry', 'UPDATE_PATH.reg')
>>> new_path = ut.get_argval('--newpath', str, default=None)
>>> result = add_to_win32_PATH(script_fpath, new_path)
>>> print(result)
"""
import utool as ut
write_dir = dirname(script_fpath)
key = '[HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager\Environment]'
rtype = 'REG_EXPAND_SZ'
# Read current PATH values
win_pathlist = list(os.environ['PATH'].split(os.path.pathsep))
new_path_list = ut.unique_ordered(win_pathlist + list(add_path_list))
#new_path_list = unique_ordered(win_pathlist, rob_pathlist)
print('\n'.join(new_path_list))
pathtxt = pathsep.join(new_path_list)
varval_list = [('Path', pathtxt)]
regfile_str = make_regfile_str(key, varval_list, rtype)
ut.view_directory(write_dir)
print(regfile_str)
ut.writeto(script_fpath, regfile_str, mode='wb')
print('Please have an admin run the script. You may need to restart') | [
"def",
"add_to_win32_PATH",
"(",
"script_fpath",
",",
"*",
"add_path_list",
")",
":",
"import",
"utool",
"as",
"ut",
"write_dir",
"=",
"dirname",
"(",
"script_fpath",
")",
"key",
"=",
"'[HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment]'",
"rtype",
"=",
"'REG_EXPAND_SZ'",
"# Read current PATH values",
"win_pathlist",
"=",
"list",
"(",
"os",
".",
"environ",
"[",
"'PATH'",
"]",
".",
"split",
"(",
"os",
".",
"path",
".",
"pathsep",
")",
")",
"new_path_list",
"=",
"ut",
".",
"unique_ordered",
"(",
"win_pathlist",
"+",
"list",
"(",
"add_path_list",
")",
")",
"#new_path_list = unique_ordered(win_pathlist, rob_pathlist)",
"print",
"(",
"'\\n'",
".",
"join",
"(",
"new_path_list",
")",
")",
"pathtxt",
"=",
"pathsep",
".",
"join",
"(",
"new_path_list",
")",
"varval_list",
"=",
"[",
"(",
"'Path'",
",",
"pathtxt",
")",
"]",
"regfile_str",
"=",
"make_regfile_str",
"(",
"key",
",",
"varval_list",
",",
"rtype",
")",
"ut",
".",
"view_directory",
"(",
"write_dir",
")",
"print",
"(",
"regfile_str",
")",
"ut",
".",
"writeto",
"(",
"script_fpath",
",",
"regfile_str",
",",
"mode",
"=",
"'wb'",
")",
"print",
"(",
"'Please have an admin run the script. You may need to restart'",
")"
] | r"""
Writes a registery script to update the PATH variable into the sync registry
CommandLine:
python -m utool.util_win32 --test-add_to_win32_PATH --newpath "C:\Program Files (x86)\Graphviz2.38\bin"
Example:
>>> # DISABLE_DOCTEST
>>> # SCRIPT
>>> from utool.util_win32 import * # NOQA
>>> script_fpath = join(ut.truepath('~'), 'Sync/win7/registry', 'UPDATE_PATH.reg')
>>> new_path = ut.get_argval('--newpath', str, default=None)
>>> result = add_to_win32_PATH(script_fpath, new_path)
>>> print(result) | [
"r",
"Writes",
"a",
"registery",
"script",
"to",
"update",
"the",
"PATH",
"variable",
"into",
"the",
"sync",
"registry"
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_win32.py#L60-L91 | train |
Erotemic/utool | utool/util_dict.py | dzip | def dzip(list1, list2):
r"""
Zips elementwise pairs between list1 and list2 into a dictionary. Values
from list2 can be broadcast onto list1.
Args:
list1 (sequence): full sequence
list2 (sequence): can either be a sequence of one item or a sequence of
equal length to `list1`
SeeAlso:
util_list.broadcast_zip
Returns:
dict: similar to dict(zip(list1, list2))
CommandLine:
python -m utool.util_dict dzip
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> assert dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}
>>> ut.assert_raises(ValueError, dzip, [1, 2, 3], [])
>>> ut.assert_raises(ValueError, dzip, [], [4, 5, 6])
>>> ut.assert_raises(ValueError, dzip, [], [4])
>>> ut.assert_raises(ValueError, dzip, [1, 2], [4, 5, 6])
>>> ut.assert_raises(ValueError, dzip, [1, 2, 3], [4, 5])
"""
try:
len(list1)
except TypeError:
list1 = list(list1)
try:
len(list2)
except TypeError:
list2 = list(list2)
if len(list1) == 0 and len(list2) == 1:
# Corner case:
# allow the first list to be empty and the second list to broadcast a
# value. This means that the equality check wont work for the case
# where list1 and list2 are supposed to correspond, but the length of
# list2 is 1.
list2 = []
if len(list2) == 1 and len(list1) > 1:
list2 = list2 * len(list1)
if len(list1) != len(list2):
raise ValueError('out of alignment len(list1)=%r, len(list2)=%r' % (
len(list1), len(list2)))
return dict(zip(list1, list2)) | python | def dzip(list1, list2):
r"""
Zips elementwise pairs between list1 and list2 into a dictionary. Values
from list2 can be broadcast onto list1.
Args:
list1 (sequence): full sequence
list2 (sequence): can either be a sequence of one item or a sequence of
equal length to `list1`
SeeAlso:
util_list.broadcast_zip
Returns:
dict: similar to dict(zip(list1, list2))
CommandLine:
python -m utool.util_dict dzip
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> assert dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}
>>> ut.assert_raises(ValueError, dzip, [1, 2, 3], [])
>>> ut.assert_raises(ValueError, dzip, [], [4, 5, 6])
>>> ut.assert_raises(ValueError, dzip, [], [4])
>>> ut.assert_raises(ValueError, dzip, [1, 2], [4, 5, 6])
>>> ut.assert_raises(ValueError, dzip, [1, 2, 3], [4, 5])
"""
try:
len(list1)
except TypeError:
list1 = list(list1)
try:
len(list2)
except TypeError:
list2 = list(list2)
if len(list1) == 0 and len(list2) == 1:
# Corner case:
# allow the first list to be empty and the second list to broadcast a
# value. This means that the equality check wont work for the case
# where list1 and list2 are supposed to correspond, but the length of
# list2 is 1.
list2 = []
if len(list2) == 1 and len(list1) > 1:
list2 = list2 * len(list1)
if len(list1) != len(list2):
raise ValueError('out of alignment len(list1)=%r, len(list2)=%r' % (
len(list1), len(list2)))
return dict(zip(list1, list2)) | [
"def",
"dzip",
"(",
"list1",
",",
"list2",
")",
":",
"try",
":",
"len",
"(",
"list1",
")",
"except",
"TypeError",
":",
"list1",
"=",
"list",
"(",
"list1",
")",
"try",
":",
"len",
"(",
"list2",
")",
"except",
"TypeError",
":",
"list2",
"=",
"list",
"(",
"list2",
")",
"if",
"len",
"(",
"list1",
")",
"==",
"0",
"and",
"len",
"(",
"list2",
")",
"==",
"1",
":",
"# Corner case:",
"# allow the first list to be empty and the second list to broadcast a",
"# value. This means that the equality check wont work for the case",
"# where list1 and list2 are supposed to correspond, but the length of",
"# list2 is 1.",
"list2",
"=",
"[",
"]",
"if",
"len",
"(",
"list2",
")",
"==",
"1",
"and",
"len",
"(",
"list1",
")",
">",
"1",
":",
"list2",
"=",
"list2",
"*",
"len",
"(",
"list1",
")",
"if",
"len",
"(",
"list1",
")",
"!=",
"len",
"(",
"list2",
")",
":",
"raise",
"ValueError",
"(",
"'out of alignment len(list1)=%r, len(list2)=%r'",
"%",
"(",
"len",
"(",
"list1",
")",
",",
"len",
"(",
"list2",
")",
")",
")",
"return",
"dict",
"(",
"zip",
"(",
"list1",
",",
"list2",
")",
")"
] | r"""
Zips elementwise pairs between list1 and list2 into a dictionary. Values
from list2 can be broadcast onto list1.
Args:
list1 (sequence): full sequence
list2 (sequence): can either be a sequence of one item or a sequence of
equal length to `list1`
SeeAlso:
util_list.broadcast_zip
Returns:
dict: similar to dict(zip(list1, list2))
CommandLine:
python -m utool.util_dict dzip
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> assert dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}
>>> ut.assert_raises(ValueError, dzip, [1, 2, 3], [])
>>> ut.assert_raises(ValueError, dzip, [], [4, 5, 6])
>>> ut.assert_raises(ValueError, dzip, [], [4])
>>> ut.assert_raises(ValueError, dzip, [1, 2], [4, 5, 6])
>>> ut.assert_raises(ValueError, dzip, [1, 2, 3], [4, 5]) | [
"r",
"Zips",
"elementwise",
"pairs",
"between",
"list1",
"and",
"list2",
"into",
"a",
"dictionary",
".",
"Values",
"from",
"list2",
"can",
"be",
"broadcast",
"onto",
"list1",
"."
] | 3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dict.py#L25-L76 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.