repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
Erotemic/utool
utool/util_str.py
get_textdiff
def get_textdiff(text1, text2, num_context_lines=0, ignore_whitespace=False): r""" Uses difflib to return a difference string between two similar texts Args: text1 (str): text2 (str): Returns: str: formatted difference text message SeeAlso: ut.color_diff_text References: http://www.java2s.com/Code/Python/Utility/IntelligentdiffbetweentextfilesTimPeters.htm CommandLine: python -m utool.util_str --test-get_textdiff:1 python -m utool.util_str --test-get_textdiff:0 Example: >>> # DISABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> # build test data >>> text1 = 'one\ntwo\nthree' >>> text2 = 'one\ntwo\nfive' >>> # execute function >>> result = get_textdiff(text1, text2) >>> # verify results >>> print(result) - three + five Example2: >>> # DISABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> # build test data >>> text1 = 'one\ntwo\nthree\n3.1\n3.14\n3.1415\npi\n3.4\n3.5\n4' >>> text2 = 'one\ntwo\nfive\n3.1\n3.14\n3.1415\npi\n3.4\n4' >>> # execute function >>> num_context_lines = 1 >>> result = get_textdiff(text1, text2, num_context_lines) >>> # verify results >>> print(result) """ import difflib text1 = ensure_unicode(text1) text2 = ensure_unicode(text2) text1_lines = text1.splitlines() text2_lines = text2.splitlines() if ignore_whitespace: text1_lines = [t.rstrip() for t in text1_lines] text2_lines = [t.rstrip() for t in text2_lines] ndiff_kw = dict(linejunk=difflib.IS_LINE_JUNK, charjunk=difflib.IS_CHARACTER_JUNK) else: ndiff_kw = {} all_diff_lines = list(difflib.ndiff(text1_lines, text2_lines, **ndiff_kw)) if num_context_lines is None: diff_lines = all_diff_lines else: from utool import util_list # boolean for every line if it is marked or not ismarked_list = [len(line) > 0 and line[0] in '+-?' for line in all_diff_lines] # flag lines that are within num_context_lines away from a diff line isvalid_list = ismarked_list[:] for i in range(1, num_context_lines + 1): isvalid_list[:-i] = util_list.or_lists(isvalid_list[:-i], ismarked_list[i:]) isvalid_list[i:] = util_list.or_lists(isvalid_list[i:], ismarked_list[:-i]) USE_BREAK_LINE = True if USE_BREAK_LINE: # insert a visual break when there is a break in context diff_lines = [] prev = False visual_break = '\n <... FILTERED CONTEXT ...> \n' #print(isvalid_list) for line, valid in zip(all_diff_lines, isvalid_list): if valid: diff_lines.append(line) elif prev: if False: diff_lines.append(visual_break) prev = valid else: diff_lines = util_list.compress(all_diff_lines, isvalid_list) return '\n'.join(diff_lines)
python
def get_textdiff(text1, text2, num_context_lines=0, ignore_whitespace=False): r""" Uses difflib to return a difference string between two similar texts Args: text1 (str): text2 (str): Returns: str: formatted difference text message SeeAlso: ut.color_diff_text References: http://www.java2s.com/Code/Python/Utility/IntelligentdiffbetweentextfilesTimPeters.htm CommandLine: python -m utool.util_str --test-get_textdiff:1 python -m utool.util_str --test-get_textdiff:0 Example: >>> # DISABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> # build test data >>> text1 = 'one\ntwo\nthree' >>> text2 = 'one\ntwo\nfive' >>> # execute function >>> result = get_textdiff(text1, text2) >>> # verify results >>> print(result) - three + five Example2: >>> # DISABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> # build test data >>> text1 = 'one\ntwo\nthree\n3.1\n3.14\n3.1415\npi\n3.4\n3.5\n4' >>> text2 = 'one\ntwo\nfive\n3.1\n3.14\n3.1415\npi\n3.4\n4' >>> # execute function >>> num_context_lines = 1 >>> result = get_textdiff(text1, text2, num_context_lines) >>> # verify results >>> print(result) """ import difflib text1 = ensure_unicode(text1) text2 = ensure_unicode(text2) text1_lines = text1.splitlines() text2_lines = text2.splitlines() if ignore_whitespace: text1_lines = [t.rstrip() for t in text1_lines] text2_lines = [t.rstrip() for t in text2_lines] ndiff_kw = dict(linejunk=difflib.IS_LINE_JUNK, charjunk=difflib.IS_CHARACTER_JUNK) else: ndiff_kw = {} all_diff_lines = list(difflib.ndiff(text1_lines, text2_lines, **ndiff_kw)) if num_context_lines is None: diff_lines = all_diff_lines else: from utool import util_list # boolean for every line if it is marked or not ismarked_list = [len(line) > 0 and line[0] in '+-?' for line in all_diff_lines] # flag lines that are within num_context_lines away from a diff line isvalid_list = ismarked_list[:] for i in range(1, num_context_lines + 1): isvalid_list[:-i] = util_list.or_lists(isvalid_list[:-i], ismarked_list[i:]) isvalid_list[i:] = util_list.or_lists(isvalid_list[i:], ismarked_list[:-i]) USE_BREAK_LINE = True if USE_BREAK_LINE: # insert a visual break when there is a break in context diff_lines = [] prev = False visual_break = '\n <... FILTERED CONTEXT ...> \n' #print(isvalid_list) for line, valid in zip(all_diff_lines, isvalid_list): if valid: diff_lines.append(line) elif prev: if False: diff_lines.append(visual_break) prev = valid else: diff_lines = util_list.compress(all_diff_lines, isvalid_list) return '\n'.join(diff_lines)
[ "def", "get_textdiff", "(", "text1", ",", "text2", ",", "num_context_lines", "=", "0", ",", "ignore_whitespace", "=", "False", ")", ":", "import", "difflib", "text1", "=", "ensure_unicode", "(", "text1", ")", "text2", "=", "ensure_unicode", "(", "text2", ")", "text1_lines", "=", "text1", ".", "splitlines", "(", ")", "text2_lines", "=", "text2", ".", "splitlines", "(", ")", "if", "ignore_whitespace", ":", "text1_lines", "=", "[", "t", ".", "rstrip", "(", ")", "for", "t", "in", "text1_lines", "]", "text2_lines", "=", "[", "t", ".", "rstrip", "(", ")", "for", "t", "in", "text2_lines", "]", "ndiff_kw", "=", "dict", "(", "linejunk", "=", "difflib", ".", "IS_LINE_JUNK", ",", "charjunk", "=", "difflib", ".", "IS_CHARACTER_JUNK", ")", "else", ":", "ndiff_kw", "=", "{", "}", "all_diff_lines", "=", "list", "(", "difflib", ".", "ndiff", "(", "text1_lines", ",", "text2_lines", ",", "*", "*", "ndiff_kw", ")", ")", "if", "num_context_lines", "is", "None", ":", "diff_lines", "=", "all_diff_lines", "else", ":", "from", "utool", "import", "util_list", "# boolean for every line if it is marked or not", "ismarked_list", "=", "[", "len", "(", "line", ")", ">", "0", "and", "line", "[", "0", "]", "in", "'+-?'", "for", "line", "in", "all_diff_lines", "]", "# flag lines that are within num_context_lines away from a diff line", "isvalid_list", "=", "ismarked_list", "[", ":", "]", "for", "i", "in", "range", "(", "1", ",", "num_context_lines", "+", "1", ")", ":", "isvalid_list", "[", ":", "-", "i", "]", "=", "util_list", ".", "or_lists", "(", "isvalid_list", "[", ":", "-", "i", "]", ",", "ismarked_list", "[", "i", ":", "]", ")", "isvalid_list", "[", "i", ":", "]", "=", "util_list", ".", "or_lists", "(", "isvalid_list", "[", "i", ":", "]", ",", "ismarked_list", "[", ":", "-", "i", "]", ")", "USE_BREAK_LINE", "=", "True", "if", "USE_BREAK_LINE", ":", "# insert a visual break when there is a break in context", "diff_lines", "=", "[", "]", "prev", "=", "False", "visual_break", "=", "'\\n <... FILTERED CONTEXT ...> \\n'", "#print(isvalid_list)", "for", "line", ",", "valid", "in", "zip", "(", "all_diff_lines", ",", "isvalid_list", ")", ":", "if", "valid", ":", "diff_lines", ".", "append", "(", "line", ")", "elif", "prev", ":", "if", "False", ":", "diff_lines", ".", "append", "(", "visual_break", ")", "prev", "=", "valid", "else", ":", "diff_lines", "=", "util_list", ".", "compress", "(", "all_diff_lines", ",", "isvalid_list", ")", "return", "'\\n'", ".", "join", "(", "diff_lines", ")" ]
r""" Uses difflib to return a difference string between two similar texts Args: text1 (str): text2 (str): Returns: str: formatted difference text message SeeAlso: ut.color_diff_text References: http://www.java2s.com/Code/Python/Utility/IntelligentdiffbetweentextfilesTimPeters.htm CommandLine: python -m utool.util_str --test-get_textdiff:1 python -m utool.util_str --test-get_textdiff:0 Example: >>> # DISABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> # build test data >>> text1 = 'one\ntwo\nthree' >>> text2 = 'one\ntwo\nfive' >>> # execute function >>> result = get_textdiff(text1, text2) >>> # verify results >>> print(result) - three + five Example2: >>> # DISABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> # build test data >>> text1 = 'one\ntwo\nthree\n3.1\n3.14\n3.1415\npi\n3.4\n3.5\n4' >>> text2 = 'one\ntwo\nfive\n3.1\n3.14\n3.1415\npi\n3.4\n4' >>> # execute function >>> num_context_lines = 1 >>> result = get_textdiff(text1, text2, num_context_lines) >>> # verify results >>> print(result)
[ "r", "Uses", "difflib", "to", "return", "a", "difference", "string", "between", "two", "similar", "texts" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L2343-L2433
train
Erotemic/utool
utool/util_str.py
conj_phrase
def conj_phrase(list_, cond='or'): """ Joins a list of words using English conjunction rules Args: list_ (list): of strings cond (str): a conjunction (or, and, but) Returns: str: the joined cconjunction phrase References: http://en.wikipedia.org/wiki/Conjunction_(grammar) Example: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> list_ = ['a', 'b', 'c'] >>> result = conj_phrase(list_, 'or') >>> print(result) a, b, or c Example1: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> list_ = ['a', 'b'] >>> result = conj_phrase(list_, 'and') >>> print(result) a and b """ if len(list_) == 0: return '' elif len(list_) == 1: return list_[0] elif len(list_) == 2: return ' '.join((list_[0], cond, list_[1])) else: condstr = ''.join((', ' + cond, ' ')) return ', '.join((', '.join(list_[:-2]), condstr.join(list_[-2:])))
python
def conj_phrase(list_, cond='or'): """ Joins a list of words using English conjunction rules Args: list_ (list): of strings cond (str): a conjunction (or, and, but) Returns: str: the joined cconjunction phrase References: http://en.wikipedia.org/wiki/Conjunction_(grammar) Example: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> list_ = ['a', 'b', 'c'] >>> result = conj_phrase(list_, 'or') >>> print(result) a, b, or c Example1: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> list_ = ['a', 'b'] >>> result = conj_phrase(list_, 'and') >>> print(result) a and b """ if len(list_) == 0: return '' elif len(list_) == 1: return list_[0] elif len(list_) == 2: return ' '.join((list_[0], cond, list_[1])) else: condstr = ''.join((', ' + cond, ' ')) return ', '.join((', '.join(list_[:-2]), condstr.join(list_[-2:])))
[ "def", "conj_phrase", "(", "list_", ",", "cond", "=", "'or'", ")", ":", "if", "len", "(", "list_", ")", "==", "0", ":", "return", "''", "elif", "len", "(", "list_", ")", "==", "1", ":", "return", "list_", "[", "0", "]", "elif", "len", "(", "list_", ")", "==", "2", ":", "return", "' '", ".", "join", "(", "(", "list_", "[", "0", "]", ",", "cond", ",", "list_", "[", "1", "]", ")", ")", "else", ":", "condstr", "=", "''", ".", "join", "(", "(", "', '", "+", "cond", ",", "' '", ")", ")", "return", "', '", ".", "join", "(", "(", "', '", ".", "join", "(", "list_", "[", ":", "-", "2", "]", ")", ",", "condstr", ".", "join", "(", "list_", "[", "-", "2", ":", "]", ")", ")", ")" ]
Joins a list of words using English conjunction rules Args: list_ (list): of strings cond (str): a conjunction (or, and, but) Returns: str: the joined cconjunction phrase References: http://en.wikipedia.org/wiki/Conjunction_(grammar) Example: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> list_ = ['a', 'b', 'c'] >>> result = conj_phrase(list_, 'or') >>> print(result) a, b, or c Example1: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> list_ = ['a', 'b'] >>> result = conj_phrase(list_, 'and') >>> print(result) a and b
[ "Joins", "a", "list", "of", "words", "using", "English", "conjunction", "rules" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L2439-L2477
train
Erotemic/utool
utool/util_str.py
bubbletext
def bubbletext(text, font='cybermedium'): r""" Uses pyfiglet to create bubble text. Args: font (str): default=cybermedium, other fonts include: cybersmall and cyberlarge. References: http://www.figlet.org/ Example: >>> # ENABLE_DOCTEST >>> import utool as ut >>> bubble_text = ut.bubbletext('TESTING BUBBLE TEXT', font='cybermedium') >>> print(bubble_text) """ import utool as ut pyfiglet = ut.tryimport('pyfiglet', 'git+https://github.com/pwaller/pyfiglet') if pyfiglet is None: return text else: bubble_text = pyfiglet.figlet_format(text, font=font) return bubble_text
python
def bubbletext(text, font='cybermedium'): r""" Uses pyfiglet to create bubble text. Args: font (str): default=cybermedium, other fonts include: cybersmall and cyberlarge. References: http://www.figlet.org/ Example: >>> # ENABLE_DOCTEST >>> import utool as ut >>> bubble_text = ut.bubbletext('TESTING BUBBLE TEXT', font='cybermedium') >>> print(bubble_text) """ import utool as ut pyfiglet = ut.tryimport('pyfiglet', 'git+https://github.com/pwaller/pyfiglet') if pyfiglet is None: return text else: bubble_text = pyfiglet.figlet_format(text, font=font) return bubble_text
[ "def", "bubbletext", "(", "text", ",", "font", "=", "'cybermedium'", ")", ":", "import", "utool", "as", "ut", "pyfiglet", "=", "ut", ".", "tryimport", "(", "'pyfiglet'", ",", "'git+https://github.com/pwaller/pyfiglet'", ")", "if", "pyfiglet", "is", "None", ":", "return", "text", "else", ":", "bubble_text", "=", "pyfiglet", ".", "figlet_format", "(", "text", ",", "font", "=", "font", ")", "return", "bubble_text" ]
r""" Uses pyfiglet to create bubble text. Args: font (str): default=cybermedium, other fonts include: cybersmall and cyberlarge. References: http://www.figlet.org/ Example: >>> # ENABLE_DOCTEST >>> import utool as ut >>> bubble_text = ut.bubbletext('TESTING BUBBLE TEXT', font='cybermedium') >>> print(bubble_text)
[ "r", "Uses", "pyfiglet", "to", "create", "bubble", "text", "." ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L2563-L2586
train
Erotemic/utool
utool/util_str.py
is_url
def is_url(str_): """ heuristic check if str is url formatted """ return any([ str_.startswith('http://'), str_.startswith('https://'), str_.startswith('www.'), '.org/' in str_, '.com/' in str_, ])
python
def is_url(str_): """ heuristic check if str is url formatted """ return any([ str_.startswith('http://'), str_.startswith('https://'), str_.startswith('www.'), '.org/' in str_, '.com/' in str_, ])
[ "def", "is_url", "(", "str_", ")", ":", "return", "any", "(", "[", "str_", ".", "startswith", "(", "'http://'", ")", ",", "str_", ".", "startswith", "(", "'https://'", ")", ",", "str_", ".", "startswith", "(", "'www.'", ")", ",", "'.org/'", "in", "str_", ",", "'.com/'", "in", "str_", ",", "]", ")" ]
heuristic check if str is url formatted
[ "heuristic", "check", "if", "str", "is", "url", "formatted" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L2685-L2693
train
Erotemic/utool
utool/util_str.py
chr_range
def chr_range(*args, **kw): r""" Like range but returns characters Args: start (None): (default = None) stop (None): (default = None) step (None): (default = None) Kwargs: base (str): charater to start with (default='a') Returns: list: list of characters CommandLine: python -m utool.util_str --exec-chr_range Example: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> import utool as ut >>> args = (5,) >>> result = ut.repr2(chr_range(2, base='a')) >>> print(chr_range(0, 5)) >>> print(chr_range(0, 50)) >>> print(chr_range(0, 5, 2)) >>> print(result) ['a', 'b'] """ if len(args) == 1: stop, = args start, step = 0, 1 elif len(args) == 2: start, stop = args step = 1 elif len(args) == 3: start, stop, step = args else: raise ValueError('incorrect args') chr_ = six.unichr base = ord(kw.get('base', 'a')) if isinstance(start, int): start = base + start if isinstance(stop, int): stop = base + stop if isinstance(start, six.string_types): start = ord(start) if isinstance(stop, six.string_types): stop = ord(stop) if step is None: step = 1 list_ = list(map(six.text_type, map(chr_, range(start, stop, step)))) return list_
python
def chr_range(*args, **kw): r""" Like range but returns characters Args: start (None): (default = None) stop (None): (default = None) step (None): (default = None) Kwargs: base (str): charater to start with (default='a') Returns: list: list of characters CommandLine: python -m utool.util_str --exec-chr_range Example: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> import utool as ut >>> args = (5,) >>> result = ut.repr2(chr_range(2, base='a')) >>> print(chr_range(0, 5)) >>> print(chr_range(0, 50)) >>> print(chr_range(0, 5, 2)) >>> print(result) ['a', 'b'] """ if len(args) == 1: stop, = args start, step = 0, 1 elif len(args) == 2: start, stop = args step = 1 elif len(args) == 3: start, stop, step = args else: raise ValueError('incorrect args') chr_ = six.unichr base = ord(kw.get('base', 'a')) if isinstance(start, int): start = base + start if isinstance(stop, int): stop = base + stop if isinstance(start, six.string_types): start = ord(start) if isinstance(stop, six.string_types): stop = ord(stop) if step is None: step = 1 list_ = list(map(six.text_type, map(chr_, range(start, stop, step)))) return list_
[ "def", "chr_range", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "if", "len", "(", "args", ")", "==", "1", ":", "stop", ",", "=", "args", "start", ",", "step", "=", "0", ",", "1", "elif", "len", "(", "args", ")", "==", "2", ":", "start", ",", "stop", "=", "args", "step", "=", "1", "elif", "len", "(", "args", ")", "==", "3", ":", "start", ",", "stop", ",", "step", "=", "args", "else", ":", "raise", "ValueError", "(", "'incorrect args'", ")", "chr_", "=", "six", ".", "unichr", "base", "=", "ord", "(", "kw", ".", "get", "(", "'base'", ",", "'a'", ")", ")", "if", "isinstance", "(", "start", ",", "int", ")", ":", "start", "=", "base", "+", "start", "if", "isinstance", "(", "stop", ",", "int", ")", ":", "stop", "=", "base", "+", "stop", "if", "isinstance", "(", "start", ",", "six", ".", "string_types", ")", ":", "start", "=", "ord", "(", "start", ")", "if", "isinstance", "(", "stop", ",", "six", ".", "string_types", ")", ":", "stop", "=", "ord", "(", "stop", ")", "if", "step", "is", "None", ":", "step", "=", "1", "list_", "=", "list", "(", "map", "(", "six", ".", "text_type", ",", "map", "(", "chr_", ",", "range", "(", "start", ",", "stop", ",", "step", ")", ")", ")", ")", "return", "list_" ]
r""" Like range but returns characters Args: start (None): (default = None) stop (None): (default = None) step (None): (default = None) Kwargs: base (str): charater to start with (default='a') Returns: list: list of characters CommandLine: python -m utool.util_str --exec-chr_range Example: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> import utool as ut >>> args = (5,) >>> result = ut.repr2(chr_range(2, base='a')) >>> print(chr_range(0, 5)) >>> print(chr_range(0, 50)) >>> print(chr_range(0, 5, 2)) >>> print(result) ['a', 'b']
[ "r", "Like", "range", "but", "returns", "characters" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L2772-L2828
train
Erotemic/utool
utool/util_str.py
highlight_regex
def highlight_regex(str_, pat, reflags=0, color='red'): """ FIXME Use pygments instead """ #import colorama # from colorama import Fore, Style #color = Fore.MAGENTA # color = Fore.RED #match = re.search(pat, str_, flags=reflags) matches = list(re.finditer(pat, str_, flags=reflags)) colored = str_ for match in reversed(matches): #pass #if match is None: # return str_ #else: start = match.start() end = match.end() #colorama.init() colored_part = color_text(colored[start:end], color) colored = colored[:start] + colored_part + colored[end:] # colored = (colored[:start] + color + colored[start:end] + # Style.RESET_ALL + colored[end:]) #colorama.deinit() return colored
python
def highlight_regex(str_, pat, reflags=0, color='red'): """ FIXME Use pygments instead """ #import colorama # from colorama import Fore, Style #color = Fore.MAGENTA # color = Fore.RED #match = re.search(pat, str_, flags=reflags) matches = list(re.finditer(pat, str_, flags=reflags)) colored = str_ for match in reversed(matches): #pass #if match is None: # return str_ #else: start = match.start() end = match.end() #colorama.init() colored_part = color_text(colored[start:end], color) colored = colored[:start] + colored_part + colored[end:] # colored = (colored[:start] + color + colored[start:end] + # Style.RESET_ALL + colored[end:]) #colorama.deinit() return colored
[ "def", "highlight_regex", "(", "str_", ",", "pat", ",", "reflags", "=", "0", ",", "color", "=", "'red'", ")", ":", "#import colorama", "# from colorama import Fore, Style", "#color = Fore.MAGENTA", "# color = Fore.RED", "#match = re.search(pat, str_, flags=reflags)", "matches", "=", "list", "(", "re", ".", "finditer", "(", "pat", ",", "str_", ",", "flags", "=", "reflags", ")", ")", "colored", "=", "str_", "for", "match", "in", "reversed", "(", "matches", ")", ":", "#pass", "#if match is None:", "# return str_", "#else:", "start", "=", "match", ".", "start", "(", ")", "end", "=", "match", ".", "end", "(", ")", "#colorama.init()", "colored_part", "=", "color_text", "(", "colored", "[", "start", ":", "end", "]", ",", "color", ")", "colored", "=", "colored", "[", ":", "start", "]", "+", "colored_part", "+", "colored", "[", "end", ":", "]", "# colored = (colored[:start] + color + colored[start:end] +", "# Style.RESET_ALL + colored[end:])", "#colorama.deinit()", "return", "colored" ]
FIXME Use pygments instead
[ "FIXME", "Use", "pygments", "instead" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L2912-L2938
train
Erotemic/utool
utool/util_str.py
highlight_multi_regex
def highlight_multi_regex(str_, pat_to_color, reflags=0): """ FIXME Use pygments instead. must be mututally exclusive """ #import colorama # from colorama import Fore, Style #color = Fore.MAGENTA # color = Fore.RED #match = re.search(pat, str_, flags=reflags) colored = str_ to_replace = [] for pat, color in pat_to_color.items(): matches = list(re.finditer(pat, str_, flags=reflags)) for match in matches: start = match.start() end = match.end() to_replace.append((end, start, color)) for tup in reversed(sorted(to_replace)): end, start, color = tup colored_part = color_text(colored[start:end], color) colored = colored[:start] + colored_part + colored[end:] return colored
python
def highlight_multi_regex(str_, pat_to_color, reflags=0): """ FIXME Use pygments instead. must be mututally exclusive """ #import colorama # from colorama import Fore, Style #color = Fore.MAGENTA # color = Fore.RED #match = re.search(pat, str_, flags=reflags) colored = str_ to_replace = [] for pat, color in pat_to_color.items(): matches = list(re.finditer(pat, str_, flags=reflags)) for match in matches: start = match.start() end = match.end() to_replace.append((end, start, color)) for tup in reversed(sorted(to_replace)): end, start, color = tup colored_part = color_text(colored[start:end], color) colored = colored[:start] + colored_part + colored[end:] return colored
[ "def", "highlight_multi_regex", "(", "str_", ",", "pat_to_color", ",", "reflags", "=", "0", ")", ":", "#import colorama", "# from colorama import Fore, Style", "#color = Fore.MAGENTA", "# color = Fore.RED", "#match = re.search(pat, str_, flags=reflags)", "colored", "=", "str_", "to_replace", "=", "[", "]", "for", "pat", ",", "color", "in", "pat_to_color", ".", "items", "(", ")", ":", "matches", "=", "list", "(", "re", ".", "finditer", "(", "pat", ",", "str_", ",", "flags", "=", "reflags", ")", ")", "for", "match", "in", "matches", ":", "start", "=", "match", ".", "start", "(", ")", "end", "=", "match", ".", "end", "(", ")", "to_replace", ".", "append", "(", "(", "end", ",", "start", ",", "color", ")", ")", "for", "tup", "in", "reversed", "(", "sorted", "(", "to_replace", ")", ")", ":", "end", ",", "start", ",", "color", "=", "tup", "colored_part", "=", "color_text", "(", "colored", "[", "start", ":", "end", "]", ",", "color", ")", "colored", "=", "colored", "[", ":", "start", "]", "+", "colored_part", "+", "colored", "[", "end", ":", "]", "return", "colored" ]
FIXME Use pygments instead. must be mututally exclusive
[ "FIXME", "Use", "pygments", "instead", ".", "must", "be", "mututally", "exclusive" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L2941-L2967
train
Erotemic/utool
utool/util_str.py
find_block_end
def find_block_end(row, line_list, sentinal, direction=1): """ Searches up and down until it finds the endpoints of a block Rectify with find_paragraph_end in pyvim_funcs """ import re row_ = row line_ = line_list[row_] flag1 = row_ == 0 or row_ == len(line_list) - 1 flag2 = re.match(sentinal, line_) if not (flag1 or flag2): while True: if (row_ == 0 or row_ == len(line_list) - 1): break line_ = line_list[row_] if re.match(sentinal, line_): break row_ += direction return row_
python
def find_block_end(row, line_list, sentinal, direction=1): """ Searches up and down until it finds the endpoints of a block Rectify with find_paragraph_end in pyvim_funcs """ import re row_ = row line_ = line_list[row_] flag1 = row_ == 0 or row_ == len(line_list) - 1 flag2 = re.match(sentinal, line_) if not (flag1 or flag2): while True: if (row_ == 0 or row_ == len(line_list) - 1): break line_ = line_list[row_] if re.match(sentinal, line_): break row_ += direction return row_
[ "def", "find_block_end", "(", "row", ",", "line_list", ",", "sentinal", ",", "direction", "=", "1", ")", ":", "import", "re", "row_", "=", "row", "line_", "=", "line_list", "[", "row_", "]", "flag1", "=", "row_", "==", "0", "or", "row_", "==", "len", "(", "line_list", ")", "-", "1", "flag2", "=", "re", ".", "match", "(", "sentinal", ",", "line_", ")", "if", "not", "(", "flag1", "or", "flag2", ")", ":", "while", "True", ":", "if", "(", "row_", "==", "0", "or", "row_", "==", "len", "(", "line_list", ")", "-", "1", ")", ":", "break", "line_", "=", "line_list", "[", "row_", "]", "if", "re", ".", "match", "(", "sentinal", ",", "line_", ")", ":", "break", "row_", "+=", "direction", "return", "row_" ]
Searches up and down until it finds the endpoints of a block Rectify with find_paragraph_end in pyvim_funcs
[ "Searches", "up", "and", "down", "until", "it", "finds", "the", "endpoints", "of", "a", "block", "Rectify", "with", "find_paragraph_end", "in", "pyvim_funcs" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_str.py#L3492-L3510
train
Erotemic/utool
utool/util_latex.py
compress_pdf
def compress_pdf(pdf_fpath, output_fname=None): """ uses ghostscript to write a pdf """ import utool as ut ut.assertpath(pdf_fpath) suffix = '_' + ut.get_datestamp(False) + '_compressed' print('pdf_fpath = %r' % (pdf_fpath,)) output_pdf_fpath = ut.augpath(pdf_fpath, suffix, newfname=output_fname) print('output_pdf_fpath = %r' % (output_pdf_fpath,)) gs_exe = find_ghostscript_exe() cmd_list = ( gs_exe, '-sDEVICE=pdfwrite', '-dCompatibilityLevel=1.4', '-dNOPAUSE', '-dQUIET', '-dBATCH', '-sOutputFile=' + output_pdf_fpath, pdf_fpath ) ut.cmd(*cmd_list) return output_pdf_fpath
python
def compress_pdf(pdf_fpath, output_fname=None): """ uses ghostscript to write a pdf """ import utool as ut ut.assertpath(pdf_fpath) suffix = '_' + ut.get_datestamp(False) + '_compressed' print('pdf_fpath = %r' % (pdf_fpath,)) output_pdf_fpath = ut.augpath(pdf_fpath, suffix, newfname=output_fname) print('output_pdf_fpath = %r' % (output_pdf_fpath,)) gs_exe = find_ghostscript_exe() cmd_list = ( gs_exe, '-sDEVICE=pdfwrite', '-dCompatibilityLevel=1.4', '-dNOPAUSE', '-dQUIET', '-dBATCH', '-sOutputFile=' + output_pdf_fpath, pdf_fpath ) ut.cmd(*cmd_list) return output_pdf_fpath
[ "def", "compress_pdf", "(", "pdf_fpath", ",", "output_fname", "=", "None", ")", ":", "import", "utool", "as", "ut", "ut", ".", "assertpath", "(", "pdf_fpath", ")", "suffix", "=", "'_'", "+", "ut", ".", "get_datestamp", "(", "False", ")", "+", "'_compressed'", "print", "(", "'pdf_fpath = %r'", "%", "(", "pdf_fpath", ",", ")", ")", "output_pdf_fpath", "=", "ut", ".", "augpath", "(", "pdf_fpath", ",", "suffix", ",", "newfname", "=", "output_fname", ")", "print", "(", "'output_pdf_fpath = %r'", "%", "(", "output_pdf_fpath", ",", ")", ")", "gs_exe", "=", "find_ghostscript_exe", "(", ")", "cmd_list", "=", "(", "gs_exe", ",", "'-sDEVICE=pdfwrite'", ",", "'-dCompatibilityLevel=1.4'", ",", "'-dNOPAUSE'", ",", "'-dQUIET'", ",", "'-dBATCH'", ",", "'-sOutputFile='", "+", "output_pdf_fpath", ",", "pdf_fpath", ")", "ut", ".", "cmd", "(", "*", "cmd_list", ")", "return", "output_pdf_fpath" ]
uses ghostscript to write a pdf
[ "uses", "ghostscript", "to", "write", "a", "pdf" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_latex.py#L37-L57
train
Erotemic/utool
utool/util_latex.py
make_full_document
def make_full_document(text, title=None, preamp_decl={}, preamb_extra=None): r""" dummy preamble and document to wrap around latex fragment Args: text (str): title (str): Returns: str: text_ CommandLine: python -m utool.util_latex --test-make_full_document Example: >>> # DISABLE_DOCTEST >>> from utool.util_latex import * # NOQA >>> text = 'foo' >>> title = 'title' >>> preamp_decl = {} >>> text_ = make_full_document(text, title) >>> result = str(text_) >>> print(result) """ import utool as ut doc_preamb = ut.codeblock(''' %\\documentclass{article} \\documentclass[10pt,twocolumn,letterpaper]{article} % \\usepackage[utf8]{inputenc} \\usepackage[T1]{fontenc} \\usepackage{times} \\usepackage{epsfig} \\usepackage{graphicx} \\usepackage{amsmath,amsthm,amssymb} \\usepackage[usenames,dvipsnames,svgnames,table]{xcolor} \\usepackage{multirow} \\usepackage{subcaption} \\usepackage{booktabs} %\\pagenumbering{gobble} ''') if preamb_extra is not None: if isinstance(preamb_extra, (list, tuple)): preamb_extra = '\n'.join(preamb_extra) doc_preamb += '\n' + preamb_extra + '\n' if title is not None: preamp_decl['title'] = title decl_lines = [r'\{key}{{{val}}}'.format(key=key, val=val) for key, val in preamp_decl.items()] doc_decllines = '\n'.join(decl_lines) doc_header = ut.codeblock( r''' \begin{document} ''') if preamp_decl.get('title') is not None: doc_header += r'\maketitle' doc_footer = ut.codeblock( r''' \end{document} ''') text_ = '\n'.join((doc_preamb, doc_decllines, doc_header, text, doc_footer)) return text_
python
def make_full_document(text, title=None, preamp_decl={}, preamb_extra=None): r""" dummy preamble and document to wrap around latex fragment Args: text (str): title (str): Returns: str: text_ CommandLine: python -m utool.util_latex --test-make_full_document Example: >>> # DISABLE_DOCTEST >>> from utool.util_latex import * # NOQA >>> text = 'foo' >>> title = 'title' >>> preamp_decl = {} >>> text_ = make_full_document(text, title) >>> result = str(text_) >>> print(result) """ import utool as ut doc_preamb = ut.codeblock(''' %\\documentclass{article} \\documentclass[10pt,twocolumn,letterpaper]{article} % \\usepackage[utf8]{inputenc} \\usepackage[T1]{fontenc} \\usepackage{times} \\usepackage{epsfig} \\usepackage{graphicx} \\usepackage{amsmath,amsthm,amssymb} \\usepackage[usenames,dvipsnames,svgnames,table]{xcolor} \\usepackage{multirow} \\usepackage{subcaption} \\usepackage{booktabs} %\\pagenumbering{gobble} ''') if preamb_extra is not None: if isinstance(preamb_extra, (list, tuple)): preamb_extra = '\n'.join(preamb_extra) doc_preamb += '\n' + preamb_extra + '\n' if title is not None: preamp_decl['title'] = title decl_lines = [r'\{key}{{{val}}}'.format(key=key, val=val) for key, val in preamp_decl.items()] doc_decllines = '\n'.join(decl_lines) doc_header = ut.codeblock( r''' \begin{document} ''') if preamp_decl.get('title') is not None: doc_header += r'\maketitle' doc_footer = ut.codeblock( r''' \end{document} ''') text_ = '\n'.join((doc_preamb, doc_decllines, doc_header, text, doc_footer)) return text_
[ "def", "make_full_document", "(", "text", ",", "title", "=", "None", ",", "preamp_decl", "=", "{", "}", ",", "preamb_extra", "=", "None", ")", ":", "import", "utool", "as", "ut", "doc_preamb", "=", "ut", ".", "codeblock", "(", "'''\n %\\\\documentclass{article}\n \\\\documentclass[10pt,twocolumn,letterpaper]{article}\n % \\\\usepackage[utf8]{inputenc}\n \\\\usepackage[T1]{fontenc}\n\n \\\\usepackage{times}\n \\\\usepackage{epsfig}\n \\\\usepackage{graphicx}\n \\\\usepackage{amsmath,amsthm,amssymb}\n \\\\usepackage[usenames,dvipsnames,svgnames,table]{xcolor}\n \\\\usepackage{multirow}\n \\\\usepackage{subcaption}\n \\\\usepackage{booktabs}\n\n %\\\\pagenumbering{gobble}\n '''", ")", "if", "preamb_extra", "is", "not", "None", ":", "if", "isinstance", "(", "preamb_extra", ",", "(", "list", ",", "tuple", ")", ")", ":", "preamb_extra", "=", "'\\n'", ".", "join", "(", "preamb_extra", ")", "doc_preamb", "+=", "'\\n'", "+", "preamb_extra", "+", "'\\n'", "if", "title", "is", "not", "None", ":", "preamp_decl", "[", "'title'", "]", "=", "title", "decl_lines", "=", "[", "r'\\{key}{{{val}}}'", ".", "format", "(", "key", "=", "key", ",", "val", "=", "val", ")", "for", "key", ",", "val", "in", "preamp_decl", ".", "items", "(", ")", "]", "doc_decllines", "=", "'\\n'", ".", "join", "(", "decl_lines", ")", "doc_header", "=", "ut", ".", "codeblock", "(", "r'''\n \\begin{document}\n '''", ")", "if", "preamp_decl", ".", "get", "(", "'title'", ")", "is", "not", "None", ":", "doc_header", "+=", "r'\\maketitle'", "doc_footer", "=", "ut", ".", "codeblock", "(", "r'''\n \\end{document}\n '''", ")", "text_", "=", "'\\n'", ".", "join", "(", "(", "doc_preamb", ",", "doc_decllines", ",", "doc_header", ",", "text", ",", "doc_footer", ")", ")", "return", "text_" ]
r""" dummy preamble and document to wrap around latex fragment Args: text (str): title (str): Returns: str: text_ CommandLine: python -m utool.util_latex --test-make_full_document Example: >>> # DISABLE_DOCTEST >>> from utool.util_latex import * # NOQA >>> text = 'foo' >>> title = 'title' >>> preamp_decl = {} >>> text_ = make_full_document(text, title) >>> result = str(text_) >>> print(result)
[ "r", "dummy", "preamble", "and", "document", "to", "wrap", "around", "latex", "fragment" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_latex.py#L60-L125
train
Erotemic/utool
utool/util_latex.py
render_latex_text
def render_latex_text(input_text, nest_in_doc=False, preamb_extra=None, appname='utool', verbose=None): """ compiles latex and shows the result """ import utool as ut if verbose is None: verbose = ut.VERBOSE dpath = ut.ensure_app_resource_dir(appname, 'latex_tmp') # put a latex framgent in a full document # print(input_text) fname = 'temp_render_latex' pdf_fpath = ut.compile_latex_text( input_text, dpath=dpath, fname=fname, preamb_extra=preamb_extra, verbose=verbose) ut.startfile(pdf_fpath) return pdf_fpath
python
def render_latex_text(input_text, nest_in_doc=False, preamb_extra=None, appname='utool', verbose=None): """ compiles latex and shows the result """ import utool as ut if verbose is None: verbose = ut.VERBOSE dpath = ut.ensure_app_resource_dir(appname, 'latex_tmp') # put a latex framgent in a full document # print(input_text) fname = 'temp_render_latex' pdf_fpath = ut.compile_latex_text( input_text, dpath=dpath, fname=fname, preamb_extra=preamb_extra, verbose=verbose) ut.startfile(pdf_fpath) return pdf_fpath
[ "def", "render_latex_text", "(", "input_text", ",", "nest_in_doc", "=", "False", ",", "preamb_extra", "=", "None", ",", "appname", "=", "'utool'", ",", "verbose", "=", "None", ")", ":", "import", "utool", "as", "ut", "if", "verbose", "is", "None", ":", "verbose", "=", "ut", ".", "VERBOSE", "dpath", "=", "ut", ".", "ensure_app_resource_dir", "(", "appname", ",", "'latex_tmp'", ")", "# put a latex framgent in a full document", "# print(input_text)", "fname", "=", "'temp_render_latex'", "pdf_fpath", "=", "ut", ".", "compile_latex_text", "(", "input_text", ",", "dpath", "=", "dpath", ",", "fname", "=", "fname", ",", "preamb_extra", "=", "preamb_extra", ",", "verbose", "=", "verbose", ")", "ut", ".", "startfile", "(", "pdf_fpath", ")", "return", "pdf_fpath" ]
compiles latex and shows the result
[ "compiles", "latex", "and", "shows", "the", "result" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_latex.py#L128-L142
train
Erotemic/utool
utool/util_latex.py
render_latex
def render_latex(input_text, dpath=None, fname=None, preamb_extra=None, verbose=1, **kwargs): """ Renders latex text into a jpeg. Whitespace that would have appeared in the PDF is removed, so the jpeg is cropped only the the relevant part. This is ideal for figures that only take a single page. Args: input_text (?): dpath (str): directory path(default = None) fname (str): file name(default = None) preamb_extra (None): (default = None) verbose (int): verbosity flag(default = 1) Returns: str: jpg_fpath - file path string CommandLine: python -m utool.util_latex render_latex '$O(n^2)$' --fpath=~/slides/tmp.jpg Script: >>> # SCRIPT >>> from utool.util_latex import * # NOQA >>> from os.path import split, expanduser >>> import utool as ut >>> input_text = ' '.join(ut.get_varargs()[1:]) >>> dpath, fname = split(ut.argval('--fpath', '')) >>> dpath = expanduser(ut.argval('--dpath', dpath)) >>> fname = ut.argval('--fname', fname) >>> kwargs = ut.dict_subset(ut.argparse_funckw(ut.convert_pdf_to_image), ['dpi', 'quality']) >>> jpg_fpath = render_latex(input_text, dpath, fname, **kwargs) >>> if ut.argflag('--diskshow'): >>> ut.startfile(jpg_fpath) """ import utool as ut import vtool as vt # turn off page numbers input_text_ = '\pagenumbering{gobble}\n' + input_text # fname, _ = splitext(fname) img_fname = ut.ensure_ext(fname, ['.jpg'] + list(ut.IMG_EXTENSIONS)) img_fpath = join(dpath, img_fname) pdf_fpath = ut.compile_latex_text( input_text_, fname=fname, dpath=dpath, preamb_extra=preamb_extra, verbose=verbose, move=False) ext = splitext(img_fname)[1] fpath_in = ut.convert_pdf_to_image(pdf_fpath, ext=ext, verbose=verbose) # Clip of boundaries of the pdf imag vt.clipwhite_ondisk(fpath_in, fpath_out=img_fpath, verbose=verbose > 1) return img_fpath
python
def render_latex(input_text, dpath=None, fname=None, preamb_extra=None, verbose=1, **kwargs): """ Renders latex text into a jpeg. Whitespace that would have appeared in the PDF is removed, so the jpeg is cropped only the the relevant part. This is ideal for figures that only take a single page. Args: input_text (?): dpath (str): directory path(default = None) fname (str): file name(default = None) preamb_extra (None): (default = None) verbose (int): verbosity flag(default = 1) Returns: str: jpg_fpath - file path string CommandLine: python -m utool.util_latex render_latex '$O(n^2)$' --fpath=~/slides/tmp.jpg Script: >>> # SCRIPT >>> from utool.util_latex import * # NOQA >>> from os.path import split, expanduser >>> import utool as ut >>> input_text = ' '.join(ut.get_varargs()[1:]) >>> dpath, fname = split(ut.argval('--fpath', '')) >>> dpath = expanduser(ut.argval('--dpath', dpath)) >>> fname = ut.argval('--fname', fname) >>> kwargs = ut.dict_subset(ut.argparse_funckw(ut.convert_pdf_to_image), ['dpi', 'quality']) >>> jpg_fpath = render_latex(input_text, dpath, fname, **kwargs) >>> if ut.argflag('--diskshow'): >>> ut.startfile(jpg_fpath) """ import utool as ut import vtool as vt # turn off page numbers input_text_ = '\pagenumbering{gobble}\n' + input_text # fname, _ = splitext(fname) img_fname = ut.ensure_ext(fname, ['.jpg'] + list(ut.IMG_EXTENSIONS)) img_fpath = join(dpath, img_fname) pdf_fpath = ut.compile_latex_text( input_text_, fname=fname, dpath=dpath, preamb_extra=preamb_extra, verbose=verbose, move=False) ext = splitext(img_fname)[1] fpath_in = ut.convert_pdf_to_image(pdf_fpath, ext=ext, verbose=verbose) # Clip of boundaries of the pdf imag vt.clipwhite_ondisk(fpath_in, fpath_out=img_fpath, verbose=verbose > 1) return img_fpath
[ "def", "render_latex", "(", "input_text", ",", "dpath", "=", "None", ",", "fname", "=", "None", ",", "preamb_extra", "=", "None", ",", "verbose", "=", "1", ",", "*", "*", "kwargs", ")", ":", "import", "utool", "as", "ut", "import", "vtool", "as", "vt", "# turn off page numbers", "input_text_", "=", "'\\pagenumbering{gobble}\\n'", "+", "input_text", "# fname, _ = splitext(fname)", "img_fname", "=", "ut", ".", "ensure_ext", "(", "fname", ",", "[", "'.jpg'", "]", "+", "list", "(", "ut", ".", "IMG_EXTENSIONS", ")", ")", "img_fpath", "=", "join", "(", "dpath", ",", "img_fname", ")", "pdf_fpath", "=", "ut", ".", "compile_latex_text", "(", "input_text_", ",", "fname", "=", "fname", ",", "dpath", "=", "dpath", ",", "preamb_extra", "=", "preamb_extra", ",", "verbose", "=", "verbose", ",", "move", "=", "False", ")", "ext", "=", "splitext", "(", "img_fname", ")", "[", "1", "]", "fpath_in", "=", "ut", ".", "convert_pdf_to_image", "(", "pdf_fpath", ",", "ext", "=", "ext", ",", "verbose", "=", "verbose", ")", "# Clip of boundaries of the pdf imag", "vt", ".", "clipwhite_ondisk", "(", "fpath_in", ",", "fpath_out", "=", "img_fpath", ",", "verbose", "=", "verbose", ">", "1", ")", "return", "img_fpath" ]
Renders latex text into a jpeg. Whitespace that would have appeared in the PDF is removed, so the jpeg is cropped only the the relevant part. This is ideal for figures that only take a single page. Args: input_text (?): dpath (str): directory path(default = None) fname (str): file name(default = None) preamb_extra (None): (default = None) verbose (int): verbosity flag(default = 1) Returns: str: jpg_fpath - file path string CommandLine: python -m utool.util_latex render_latex '$O(n^2)$' --fpath=~/slides/tmp.jpg Script: >>> # SCRIPT >>> from utool.util_latex import * # NOQA >>> from os.path import split, expanduser >>> import utool as ut >>> input_text = ' '.join(ut.get_varargs()[1:]) >>> dpath, fname = split(ut.argval('--fpath', '')) >>> dpath = expanduser(ut.argval('--dpath', dpath)) >>> fname = ut.argval('--fname', fname) >>> kwargs = ut.dict_subset(ut.argparse_funckw(ut.convert_pdf_to_image), ['dpi', 'quality']) >>> jpg_fpath = render_latex(input_text, dpath, fname, **kwargs) >>> if ut.argflag('--diskshow'): >>> ut.startfile(jpg_fpath)
[ "Renders", "latex", "text", "into", "a", "jpeg", "." ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_latex.py#L240-L290
train
Erotemic/utool
utool/util_latex.py
get_latex_figure_str2
def get_latex_figure_str2(fpath_list, cmdname, **kwargs): """ hack for candidacy """ import utool as ut from os.path import relpath # Make relative paths if kwargs.pop('relpath', True): start = ut.truepath('~/latex/crall-candidacy-2015') fpath_list = [relpath(fpath, start) for fpath in fpath_list] cmdname = ut.latex_sanitize_command_name(cmdname) kwargs['caption_str'] = kwargs.get('caption_str', cmdname) figure_str = ut.get_latex_figure_str(fpath_list, **kwargs) latex_block = ut.latex_newcommand(cmdname, figure_str) return latex_block
python
def get_latex_figure_str2(fpath_list, cmdname, **kwargs): """ hack for candidacy """ import utool as ut from os.path import relpath # Make relative paths if kwargs.pop('relpath', True): start = ut.truepath('~/latex/crall-candidacy-2015') fpath_list = [relpath(fpath, start) for fpath in fpath_list] cmdname = ut.latex_sanitize_command_name(cmdname) kwargs['caption_str'] = kwargs.get('caption_str', cmdname) figure_str = ut.get_latex_figure_str(fpath_list, **kwargs) latex_block = ut.latex_newcommand(cmdname, figure_str) return latex_block
[ "def", "get_latex_figure_str2", "(", "fpath_list", ",", "cmdname", ",", "*", "*", "kwargs", ")", ":", "import", "utool", "as", "ut", "from", "os", ".", "path", "import", "relpath", "# Make relative paths", "if", "kwargs", ".", "pop", "(", "'relpath'", ",", "True", ")", ":", "start", "=", "ut", ".", "truepath", "(", "'~/latex/crall-candidacy-2015'", ")", "fpath_list", "=", "[", "relpath", "(", "fpath", ",", "start", ")", "for", "fpath", "in", "fpath_list", "]", "cmdname", "=", "ut", ".", "latex_sanitize_command_name", "(", "cmdname", ")", "kwargs", "[", "'caption_str'", "]", "=", "kwargs", ".", "get", "(", "'caption_str'", ",", "cmdname", ")", "figure_str", "=", "ut", ".", "get_latex_figure_str", "(", "fpath_list", ",", "*", "*", "kwargs", ")", "latex_block", "=", "ut", ".", "latex_newcommand", "(", "cmdname", ",", "figure_str", ")", "return", "latex_block" ]
hack for candidacy
[ "hack", "for", "candidacy" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_latex.py#L711-L724
train
chriso/gauged
gauged/writer.py
Writer.add
def add(self, data, value=None, timestamp=None, namespace=None, debug=False): """Queue a gauge or gauges to be written""" if value is not None: return self.add(((data, value),), timestamp=timestamp, namespace=namespace, debug=debug) writer = self.writer if writer is None: raise GaugedUseAfterFreeError if timestamp is None: timestamp = long(time() * 1000) config = self.config block_size = config.block_size this_block = timestamp // block_size this_array = (timestamp % block_size) // config.resolution if namespace is None: namespace = config.namespace if this_block < self.current_block or \ (this_block == self.current_block and this_array < self.current_array): if config.append_only_violation == Writer.ERROR: msg = 'Gauged is append-only; timestamps must be increasing' raise GaugedAppendOnlyError(msg) elif config.append_only_violation == Writer.REWRITE: this_block = self.current_block this_array = self.current_array else: return if isinstance(data, unicode): data = data.encode('utf8') if debug: return self.debug(timestamp, namespace, data) if this_block > self.current_block: self.flush_blocks() self.current_block = this_block self.current_array = this_array elif this_array > self.current_array: if not Gauged.writer_flush_arrays(writer, self.current_array): raise MemoryError self.current_array = this_array data_points = 0 namespace_statistics = self.statistics[namespace] whitelist = config.key_whitelist skip_long_keys = config.key_overflow == Writer.IGNORE skip_gauge_nan = config.gauge_nan == Writer.IGNORE if isinstance(data, str) and skip_gauge_nan \ and skip_long_keys and whitelist is None: # fast path data_points = c_uint32(0) if not Gauged.writer_emit_pairs(writer, namespace, data, byref(data_points)): raise MemoryError data_points = data_points.value else: if isinstance(data, dict): data = data.iteritems() elif isinstance(data, str): data = self.parse_query(data) emit = Gauged.writer_emit for key, value in data: key = to_bytes(key) if whitelist is not None and key not in whitelist: continue try: value = float(value) except ValueError: value = float('nan') if value != value: # => NaN? if skip_gauge_nan: continue raise GaugedNaNError success = emit(writer, namespace, key, c_float(value)) if success != 1: if not success: raise MemoryError elif success == Writer.KEY_OVERFLOW and not skip_long_keys: msg = 'Key is larger than the driver allows ' msg += '(%s)' % key raise GaugedKeyOverflowError(msg) data_points += 1 namespace_statistics.data_points += data_points if self.flush_now: self.flush()
python
def add(self, data, value=None, timestamp=None, namespace=None, debug=False): """Queue a gauge or gauges to be written""" if value is not None: return self.add(((data, value),), timestamp=timestamp, namespace=namespace, debug=debug) writer = self.writer if writer is None: raise GaugedUseAfterFreeError if timestamp is None: timestamp = long(time() * 1000) config = self.config block_size = config.block_size this_block = timestamp // block_size this_array = (timestamp % block_size) // config.resolution if namespace is None: namespace = config.namespace if this_block < self.current_block or \ (this_block == self.current_block and this_array < self.current_array): if config.append_only_violation == Writer.ERROR: msg = 'Gauged is append-only; timestamps must be increasing' raise GaugedAppendOnlyError(msg) elif config.append_only_violation == Writer.REWRITE: this_block = self.current_block this_array = self.current_array else: return if isinstance(data, unicode): data = data.encode('utf8') if debug: return self.debug(timestamp, namespace, data) if this_block > self.current_block: self.flush_blocks() self.current_block = this_block self.current_array = this_array elif this_array > self.current_array: if not Gauged.writer_flush_arrays(writer, self.current_array): raise MemoryError self.current_array = this_array data_points = 0 namespace_statistics = self.statistics[namespace] whitelist = config.key_whitelist skip_long_keys = config.key_overflow == Writer.IGNORE skip_gauge_nan = config.gauge_nan == Writer.IGNORE if isinstance(data, str) and skip_gauge_nan \ and skip_long_keys and whitelist is None: # fast path data_points = c_uint32(0) if not Gauged.writer_emit_pairs(writer, namespace, data, byref(data_points)): raise MemoryError data_points = data_points.value else: if isinstance(data, dict): data = data.iteritems() elif isinstance(data, str): data = self.parse_query(data) emit = Gauged.writer_emit for key, value in data: key = to_bytes(key) if whitelist is not None and key not in whitelist: continue try: value = float(value) except ValueError: value = float('nan') if value != value: # => NaN? if skip_gauge_nan: continue raise GaugedNaNError success = emit(writer, namespace, key, c_float(value)) if success != 1: if not success: raise MemoryError elif success == Writer.KEY_OVERFLOW and not skip_long_keys: msg = 'Key is larger than the driver allows ' msg += '(%s)' % key raise GaugedKeyOverflowError(msg) data_points += 1 namespace_statistics.data_points += data_points if self.flush_now: self.flush()
[ "def", "add", "(", "self", ",", "data", ",", "value", "=", "None", ",", "timestamp", "=", "None", ",", "namespace", "=", "None", ",", "debug", "=", "False", ")", ":", "if", "value", "is", "not", "None", ":", "return", "self", ".", "add", "(", "(", "(", "data", ",", "value", ")", ",", ")", ",", "timestamp", "=", "timestamp", ",", "namespace", "=", "namespace", ",", "debug", "=", "debug", ")", "writer", "=", "self", ".", "writer", "if", "writer", "is", "None", ":", "raise", "GaugedUseAfterFreeError", "if", "timestamp", "is", "None", ":", "timestamp", "=", "long", "(", "time", "(", ")", "*", "1000", ")", "config", "=", "self", ".", "config", "block_size", "=", "config", ".", "block_size", "this_block", "=", "timestamp", "//", "block_size", "this_array", "=", "(", "timestamp", "%", "block_size", ")", "//", "config", ".", "resolution", "if", "namespace", "is", "None", ":", "namespace", "=", "config", ".", "namespace", "if", "this_block", "<", "self", ".", "current_block", "or", "(", "this_block", "==", "self", ".", "current_block", "and", "this_array", "<", "self", ".", "current_array", ")", ":", "if", "config", ".", "append_only_violation", "==", "Writer", ".", "ERROR", ":", "msg", "=", "'Gauged is append-only; timestamps must be increasing'", "raise", "GaugedAppendOnlyError", "(", "msg", ")", "elif", "config", ".", "append_only_violation", "==", "Writer", ".", "REWRITE", ":", "this_block", "=", "self", ".", "current_block", "this_array", "=", "self", ".", "current_array", "else", ":", "return", "if", "isinstance", "(", "data", ",", "unicode", ")", ":", "data", "=", "data", ".", "encode", "(", "'utf8'", ")", "if", "debug", ":", "return", "self", ".", "debug", "(", "timestamp", ",", "namespace", ",", "data", ")", "if", "this_block", ">", "self", ".", "current_block", ":", "self", ".", "flush_blocks", "(", ")", "self", ".", "current_block", "=", "this_block", "self", ".", "current_array", "=", "this_array", "elif", "this_array", ">", "self", ".", "current_array", ":", "if", "not", "Gauged", ".", "writer_flush_arrays", "(", "writer", ",", "self", ".", "current_array", ")", ":", "raise", "MemoryError", "self", ".", "current_array", "=", "this_array", "data_points", "=", "0", "namespace_statistics", "=", "self", ".", "statistics", "[", "namespace", "]", "whitelist", "=", "config", ".", "key_whitelist", "skip_long_keys", "=", "config", ".", "key_overflow", "==", "Writer", ".", "IGNORE", "skip_gauge_nan", "=", "config", ".", "gauge_nan", "==", "Writer", ".", "IGNORE", "if", "isinstance", "(", "data", ",", "str", ")", "and", "skip_gauge_nan", "and", "skip_long_keys", "and", "whitelist", "is", "None", ":", "# fast path", "data_points", "=", "c_uint32", "(", "0", ")", "if", "not", "Gauged", ".", "writer_emit_pairs", "(", "writer", ",", "namespace", ",", "data", ",", "byref", "(", "data_points", ")", ")", ":", "raise", "MemoryError", "data_points", "=", "data_points", ".", "value", "else", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "data", "=", "data", ".", "iteritems", "(", ")", "elif", "isinstance", "(", "data", ",", "str", ")", ":", "data", "=", "self", ".", "parse_query", "(", "data", ")", "emit", "=", "Gauged", ".", "writer_emit", "for", "key", ",", "value", "in", "data", ":", "key", "=", "to_bytes", "(", "key", ")", "if", "whitelist", "is", "not", "None", "and", "key", "not", "in", "whitelist", ":", "continue", "try", ":", "value", "=", "float", "(", "value", ")", "except", "ValueError", ":", "value", "=", "float", "(", "'nan'", ")", "if", "value", "!=", "value", ":", "# => NaN?", "if", "skip_gauge_nan", ":", "continue", "raise", "GaugedNaNError", "success", "=", "emit", "(", "writer", ",", "namespace", ",", "key", ",", "c_float", "(", "value", ")", ")", "if", "success", "!=", "1", ":", "if", "not", "success", ":", "raise", "MemoryError", "elif", "success", "==", "Writer", ".", "KEY_OVERFLOW", "and", "not", "skip_long_keys", ":", "msg", "=", "'Key is larger than the driver allows '", "msg", "+=", "'(%s)'", "%", "key", "raise", "GaugedKeyOverflowError", "(", "msg", ")", "data_points", "+=", "1", "namespace_statistics", ".", "data_points", "+=", "data_points", "if", "self", ".", "flush_now", ":", "self", ".", "flush", "(", ")" ]
Queue a gauge or gauges to be written
[ "Queue", "a", "gauge", "or", "gauges", "to", "be", "written" ]
cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/writer.py#L47-L128
train
chriso/gauged
gauged/writer.py
Writer.flush
def flush(self): """Flush all pending gauges""" writer = self.writer if writer is None: raise GaugedUseAfterFreeError self.flush_writer_position() keys = self.translate_keys() blocks = [] current_block = self.current_block statistics = self.statistics driver = self.driver flags = 0 # for future extensions, e.g. block compression for namespace, key, block in self.pending_blocks(): length = block.byte_length() if not length: continue key_id = keys[(namespace, key)] statistics[namespace].byte_count += length blocks.append((namespace, current_block, key_id, block.buffer(), flags)) if self.config.overwrite_blocks: driver.replace_blocks(blocks) else: driver.insert_or_append_blocks(blocks) if not Gauged.writer_flush_maps(writer, True): raise MemoryError update_namespace = driver.add_namespace_statistics for namespace, stats in statistics.iteritems(): update_namespace(namespace, self.current_block, stats.data_points, stats.byte_count) statistics.clear() driver.commit() self.flush_now = False
python
def flush(self): """Flush all pending gauges""" writer = self.writer if writer is None: raise GaugedUseAfterFreeError self.flush_writer_position() keys = self.translate_keys() blocks = [] current_block = self.current_block statistics = self.statistics driver = self.driver flags = 0 # for future extensions, e.g. block compression for namespace, key, block in self.pending_blocks(): length = block.byte_length() if not length: continue key_id = keys[(namespace, key)] statistics[namespace].byte_count += length blocks.append((namespace, current_block, key_id, block.buffer(), flags)) if self.config.overwrite_blocks: driver.replace_blocks(blocks) else: driver.insert_or_append_blocks(blocks) if not Gauged.writer_flush_maps(writer, True): raise MemoryError update_namespace = driver.add_namespace_statistics for namespace, stats in statistics.iteritems(): update_namespace(namespace, self.current_block, stats.data_points, stats.byte_count) statistics.clear() driver.commit() self.flush_now = False
[ "def", "flush", "(", "self", ")", ":", "writer", "=", "self", ".", "writer", "if", "writer", "is", "None", ":", "raise", "GaugedUseAfterFreeError", "self", ".", "flush_writer_position", "(", ")", "keys", "=", "self", ".", "translate_keys", "(", ")", "blocks", "=", "[", "]", "current_block", "=", "self", ".", "current_block", "statistics", "=", "self", ".", "statistics", "driver", "=", "self", ".", "driver", "flags", "=", "0", "# for future extensions, e.g. block compression", "for", "namespace", ",", "key", ",", "block", "in", "self", ".", "pending_blocks", "(", ")", ":", "length", "=", "block", ".", "byte_length", "(", ")", "if", "not", "length", ":", "continue", "key_id", "=", "keys", "[", "(", "namespace", ",", "key", ")", "]", "statistics", "[", "namespace", "]", ".", "byte_count", "+=", "length", "blocks", ".", "append", "(", "(", "namespace", ",", "current_block", ",", "key_id", ",", "block", ".", "buffer", "(", ")", ",", "flags", ")", ")", "if", "self", ".", "config", ".", "overwrite_blocks", ":", "driver", ".", "replace_blocks", "(", "blocks", ")", "else", ":", "driver", ".", "insert_or_append_blocks", "(", "blocks", ")", "if", "not", "Gauged", ".", "writer_flush_maps", "(", "writer", ",", "True", ")", ":", "raise", "MemoryError", "update_namespace", "=", "driver", ".", "add_namespace_statistics", "for", "namespace", ",", "stats", "in", "statistics", ".", "iteritems", "(", ")", ":", "update_namespace", "(", "namespace", ",", "self", ".", "current_block", ",", "stats", ".", "data_points", ",", "stats", ".", "byte_count", ")", "statistics", ".", "clear", "(", ")", "driver", ".", "commit", "(", ")", "self", ".", "flush_now", "=", "False" ]
Flush all pending gauges
[ "Flush", "all", "pending", "gauges" ]
cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/writer.py#L130-L162
train
chriso/gauged
gauged/writer.py
Writer.resume_from
def resume_from(self): """Get a timestamp representing the position just after the last written gauge""" position = self.driver.get_writer_position(self.config.writer_name) return position + self.config.resolution if position else 0
python
def resume_from(self): """Get a timestamp representing the position just after the last written gauge""" position = self.driver.get_writer_position(self.config.writer_name) return position + self.config.resolution if position else 0
[ "def", "resume_from", "(", "self", ")", ":", "position", "=", "self", ".", "driver", ".", "get_writer_position", "(", "self", ".", "config", ".", "writer_name", ")", "return", "position", "+", "self", ".", "config", ".", "resolution", "if", "position", "else", "0" ]
Get a timestamp representing the position just after the last written gauge
[ "Get", "a", "timestamp", "representing", "the", "position", "just", "after", "the", "last", "written", "gauge" ]
cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/writer.py#L164-L168
train
chriso/gauged
gauged/writer.py
Writer.clear_from
def clear_from(self, timestamp): """Clear all data from `timestamp` onwards. Note that the timestamp is rounded down to the nearest block boundary""" block_size = self.config.block_size offset, remainder = timestamp // block_size, timestamp % block_size if remainder: raise ValueError('Timestamp must be on a block boundary') self.driver.clear_from(offset, timestamp)
python
def clear_from(self, timestamp): """Clear all data from `timestamp` onwards. Note that the timestamp is rounded down to the nearest block boundary""" block_size = self.config.block_size offset, remainder = timestamp // block_size, timestamp % block_size if remainder: raise ValueError('Timestamp must be on a block boundary') self.driver.clear_from(offset, timestamp)
[ "def", "clear_from", "(", "self", ",", "timestamp", ")", ":", "block_size", "=", "self", ".", "config", ".", "block_size", "offset", ",", "remainder", "=", "timestamp", "//", "block_size", ",", "timestamp", "%", "block_size", "if", "remainder", ":", "raise", "ValueError", "(", "'Timestamp must be on a block boundary'", ")", "self", ".", "driver", ".", "clear_from", "(", "offset", ",", "timestamp", ")" ]
Clear all data from `timestamp` onwards. Note that the timestamp is rounded down to the nearest block boundary
[ "Clear", "all", "data", "from", "timestamp", "onwards", ".", "Note", "that", "the", "timestamp", "is", "rounded", "down", "to", "the", "nearest", "block", "boundary" ]
cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/writer.py#L170-L177
train
chriso/gauged
gauged/writer.py
Writer.clear_key_before
def clear_key_before(self, key, namespace=None, timestamp=None): """Clear all data before `timestamp` for a given key. Note that the timestamp is rounded down to the nearest block boundary""" block_size = self.config.block_size if namespace is None: namespace = self.config.namespace if timestamp is not None: offset, remainder = divmod(timestamp, block_size) if remainder: raise ValueError('timestamp must be on a block boundary') if offset == 0: raise ValueError('cannot delete before offset zero') offset -= 1 self.driver.clear_key_before(key, namespace, offset, timestamp) else: self.driver.clear_key_before(key, namespace)
python
def clear_key_before(self, key, namespace=None, timestamp=None): """Clear all data before `timestamp` for a given key. Note that the timestamp is rounded down to the nearest block boundary""" block_size = self.config.block_size if namespace is None: namespace = self.config.namespace if timestamp is not None: offset, remainder = divmod(timestamp, block_size) if remainder: raise ValueError('timestamp must be on a block boundary') if offset == 0: raise ValueError('cannot delete before offset zero') offset -= 1 self.driver.clear_key_before(key, namespace, offset, timestamp) else: self.driver.clear_key_before(key, namespace)
[ "def", "clear_key_before", "(", "self", ",", "key", ",", "namespace", "=", "None", ",", "timestamp", "=", "None", ")", ":", "block_size", "=", "self", ".", "config", ".", "block_size", "if", "namespace", "is", "None", ":", "namespace", "=", "self", ".", "config", ".", "namespace", "if", "timestamp", "is", "not", "None", ":", "offset", ",", "remainder", "=", "divmod", "(", "timestamp", ",", "block_size", ")", "if", "remainder", ":", "raise", "ValueError", "(", "'timestamp must be on a block boundary'", ")", "if", "offset", "==", "0", ":", "raise", "ValueError", "(", "'cannot delete before offset zero'", ")", "offset", "-=", "1", "self", ".", "driver", ".", "clear_key_before", "(", "key", ",", "namespace", ",", "offset", ",", "timestamp", ")", "else", ":", "self", ".", "driver", ".", "clear_key_before", "(", "key", ",", "namespace", ")" ]
Clear all data before `timestamp` for a given key. Note that the timestamp is rounded down to the nearest block boundary
[ "Clear", "all", "data", "before", "timestamp", "for", "a", "given", "key", ".", "Note", "that", "the", "timestamp", "is", "rounded", "down", "to", "the", "nearest", "block", "boundary" ]
cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/writer.py#L179-L194
train
MoseleyBioinformaticsLab/ctfile
ctfile/ctfile.py
Ctab._to_ctfile_counts_line
def _to_ctfile_counts_line(self, key): """Create counts line in ``CTfile`` format. :param str key: Counts line key. :return: Counts line string. :rtype: :py:class:`str` """ counter = OrderedCounter(self.counts_line_format) self[key]['number_of_atoms'] = str(len(self.atoms)) self[key]['number_of_bonds'] = str(len(self.bonds)) counts_line = ''.join([str(value).rjust(spacing) for value, spacing in zip(self[key].values(), counter.values())]) return '{}\n'.format(counts_line)
python
def _to_ctfile_counts_line(self, key): """Create counts line in ``CTfile`` format. :param str key: Counts line key. :return: Counts line string. :rtype: :py:class:`str` """ counter = OrderedCounter(self.counts_line_format) self[key]['number_of_atoms'] = str(len(self.atoms)) self[key]['number_of_bonds'] = str(len(self.bonds)) counts_line = ''.join([str(value).rjust(spacing) for value, spacing in zip(self[key].values(), counter.values())]) return '{}\n'.format(counts_line)
[ "def", "_to_ctfile_counts_line", "(", "self", ",", "key", ")", ":", "counter", "=", "OrderedCounter", "(", "self", ".", "counts_line_format", ")", "self", "[", "key", "]", "[", "'number_of_atoms'", "]", "=", "str", "(", "len", "(", "self", ".", "atoms", ")", ")", "self", "[", "key", "]", "[", "'number_of_bonds'", "]", "=", "str", "(", "len", "(", "self", ".", "bonds", ")", ")", "counts_line", "=", "''", ".", "join", "(", "[", "str", "(", "value", ")", ".", "rjust", "(", "spacing", ")", "for", "value", ",", "spacing", "in", "zip", "(", "self", "[", "key", "]", ".", "values", "(", ")", ",", "counter", ".", "values", "(", ")", ")", "]", ")", "return", "'{}\\n'", ".", "format", "(", "counts_line", ")" ]
Create counts line in ``CTfile`` format. :param str key: Counts line key. :return: Counts line string. :rtype: :py:class:`str`
[ "Create", "counts", "line", "in", "CTfile", "format", "." ]
eae864126cd9102207df5d363a3222256a0f1396
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L346-L358
train
MoseleyBioinformaticsLab/ctfile
ctfile/ctfile.py
Ctab._to_ctfile_atom_block
def _to_ctfile_atom_block(self, key): """Create atom block in `CTfile` format. :param str key: Ctab atom block key. :return: Ctab atom block. :rtype: :py:class:`str` """ counter = OrderedCounter(Atom.atom_block_format) ctab_atom_block = '\n'.join([''.join([str(value).rjust(spacing) for value, spacing in zip(atom._ctab_data.values(), counter.values())]) for atom in self[key]]) return '{}\n'.format(ctab_atom_block)
python
def _to_ctfile_atom_block(self, key): """Create atom block in `CTfile` format. :param str key: Ctab atom block key. :return: Ctab atom block. :rtype: :py:class:`str` """ counter = OrderedCounter(Atom.atom_block_format) ctab_atom_block = '\n'.join([''.join([str(value).rjust(spacing) for value, spacing in zip(atom._ctab_data.values(), counter.values())]) for atom in self[key]]) return '{}\n'.format(ctab_atom_block)
[ "def", "_to_ctfile_atom_block", "(", "self", ",", "key", ")", ":", "counter", "=", "OrderedCounter", "(", "Atom", ".", "atom_block_format", ")", "ctab_atom_block", "=", "'\\n'", ".", "join", "(", "[", "''", ".", "join", "(", "[", "str", "(", "value", ")", ".", "rjust", "(", "spacing", ")", "for", "value", ",", "spacing", "in", "zip", "(", "atom", ".", "_ctab_data", ".", "values", "(", ")", ",", "counter", ".", "values", "(", ")", ")", "]", ")", "for", "atom", "in", "self", "[", "key", "]", "]", ")", "return", "'{}\\n'", ".", "format", "(", "ctab_atom_block", ")" ]
Create atom block in `CTfile` format. :param str key: Ctab atom block key. :return: Ctab atom block. :rtype: :py:class:`str`
[ "Create", "atom", "block", "in", "CTfile", "format", "." ]
eae864126cd9102207df5d363a3222256a0f1396
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L360-L371
train
MoseleyBioinformaticsLab/ctfile
ctfile/ctfile.py
Ctab._to_ctfile_bond_block
def _to_ctfile_bond_block(self, key): """Create bond block in `CTfile` format. :param str key: Ctab atom block key. :return: Ctab bond block. :rtype: :py:class:`str` """ counter = OrderedCounter(Bond.bond_block_format) ctab_bond_block = '\n'.join([''.join([str(value).rjust(spacing) for value, spacing in zip(bond._ctab_data.values(), counter.values())]) for bond in self[key]]) return '{}\n'.format(ctab_bond_block)
python
def _to_ctfile_bond_block(self, key): """Create bond block in `CTfile` format. :param str key: Ctab atom block key. :return: Ctab bond block. :rtype: :py:class:`str` """ counter = OrderedCounter(Bond.bond_block_format) ctab_bond_block = '\n'.join([''.join([str(value).rjust(spacing) for value, spacing in zip(bond._ctab_data.values(), counter.values())]) for bond in self[key]]) return '{}\n'.format(ctab_bond_block)
[ "def", "_to_ctfile_bond_block", "(", "self", ",", "key", ")", ":", "counter", "=", "OrderedCounter", "(", "Bond", ".", "bond_block_format", ")", "ctab_bond_block", "=", "'\\n'", ".", "join", "(", "[", "''", ".", "join", "(", "[", "str", "(", "value", ")", ".", "rjust", "(", "spacing", ")", "for", "value", ",", "spacing", "in", "zip", "(", "bond", ".", "_ctab_data", ".", "values", "(", ")", ",", "counter", ".", "values", "(", ")", ")", "]", ")", "for", "bond", "in", "self", "[", "key", "]", "]", ")", "return", "'{}\\n'", ".", "format", "(", "ctab_bond_block", ")" ]
Create bond block in `CTfile` format. :param str key: Ctab atom block key. :return: Ctab bond block. :rtype: :py:class:`str`
[ "Create", "bond", "block", "in", "CTfile", "format", "." ]
eae864126cd9102207df5d363a3222256a0f1396
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L373-L384
train
MoseleyBioinformaticsLab/ctfile
ctfile/ctfile.py
Ctab._to_ctfile_property_block
def _to_ctfile_property_block(self): """Create ctab properties block in `CTfile` format from atom-specific properties. :return: Ctab property block. :rtype: :py:class:`str` """ ctab_properties_data = defaultdict(list) for atom in self.atoms: for ctab_property_key, ctab_property_value in atom._ctab_property_data.items(): ctab_properties_data[ctab_property_key].append(OrderedDict( zip(self.ctab_conf[self.version][ctab_property_key]['values'], [atom.atom_number, ctab_property_value]))) ctab_property_lines = [] for ctab_property_key, ctab_property_value in ctab_properties_data.items(): for entry in ctab_property_value: ctab_property_line = '{} {}{}'.format(self.ctab_conf[self.version][ctab_property_key]['fmt'], 1, ''.join([str(value).rjust(4) for value in entry.values()])) ctab_property_lines.append(ctab_property_line) if ctab_property_lines: return '{}\n'.format('\n'.join(ctab_property_lines)) return ''
python
def _to_ctfile_property_block(self): """Create ctab properties block in `CTfile` format from atom-specific properties. :return: Ctab property block. :rtype: :py:class:`str` """ ctab_properties_data = defaultdict(list) for atom in self.atoms: for ctab_property_key, ctab_property_value in atom._ctab_property_data.items(): ctab_properties_data[ctab_property_key].append(OrderedDict( zip(self.ctab_conf[self.version][ctab_property_key]['values'], [atom.atom_number, ctab_property_value]))) ctab_property_lines = [] for ctab_property_key, ctab_property_value in ctab_properties_data.items(): for entry in ctab_property_value: ctab_property_line = '{} {}{}'.format(self.ctab_conf[self.version][ctab_property_key]['fmt'], 1, ''.join([str(value).rjust(4) for value in entry.values()])) ctab_property_lines.append(ctab_property_line) if ctab_property_lines: return '{}\n'.format('\n'.join(ctab_property_lines)) return ''
[ "def", "_to_ctfile_property_block", "(", "self", ")", ":", "ctab_properties_data", "=", "defaultdict", "(", "list", ")", "for", "atom", "in", "self", ".", "atoms", ":", "for", "ctab_property_key", ",", "ctab_property_value", "in", "atom", ".", "_ctab_property_data", ".", "items", "(", ")", ":", "ctab_properties_data", "[", "ctab_property_key", "]", ".", "append", "(", "OrderedDict", "(", "zip", "(", "self", ".", "ctab_conf", "[", "self", ".", "version", "]", "[", "ctab_property_key", "]", "[", "'values'", "]", ",", "[", "atom", ".", "atom_number", ",", "ctab_property_value", "]", ")", ")", ")", "ctab_property_lines", "=", "[", "]", "for", "ctab_property_key", ",", "ctab_property_value", "in", "ctab_properties_data", ".", "items", "(", ")", ":", "for", "entry", "in", "ctab_property_value", ":", "ctab_property_line", "=", "'{} {}{}'", ".", "format", "(", "self", ".", "ctab_conf", "[", "self", ".", "version", "]", "[", "ctab_property_key", "]", "[", "'fmt'", "]", ",", "1", ",", "''", ".", "join", "(", "[", "str", "(", "value", ")", ".", "rjust", "(", "4", ")", "for", "value", "in", "entry", ".", "values", "(", ")", "]", ")", ")", "ctab_property_lines", ".", "append", "(", "ctab_property_line", ")", "if", "ctab_property_lines", ":", "return", "'{}\\n'", ".", "format", "(", "'\\n'", ".", "join", "(", "ctab_property_lines", ")", ")", "return", "''" ]
Create ctab properties block in `CTfile` format from atom-specific properties. :return: Ctab property block. :rtype: :py:class:`str`
[ "Create", "ctab", "properties", "block", "in", "CTfile", "format", "from", "atom", "-", "specific", "properties", "." ]
eae864126cd9102207df5d363a3222256a0f1396
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L386-L408
train
MoseleyBioinformaticsLab/ctfile
ctfile/ctfile.py
Ctab.delete_atom
def delete_atom(self, *atom_numbers): """Delete atoms by atom number. :param str atom_numbers: :return: None. :rtype: :py:obj:`None` """ for atom_number in atom_numbers: deletion_atom = self.atom_by_number(atom_number=atom_number) # update atom numbers for atom in self.atoms: if int(atom.atom_number) > int(atom_number): atom.atom_number = str(int(atom.atom_number) - 1) # find index of a bond to remove and update ctab data dict with new atom numbers for index, bond in enumerate(self.bonds): bond.update_atom_numbers() if atom_number in {bond.first_atom_number, bond.second_atom_number}: self.bonds.remove(bond) # remove atom from neighbors list for atom in self.atoms: if deletion_atom in atom.neighbors: atom.neighbors.remove(deletion_atom) self.atoms.remove(deletion_atom)
python
def delete_atom(self, *atom_numbers): """Delete atoms by atom number. :param str atom_numbers: :return: None. :rtype: :py:obj:`None` """ for atom_number in atom_numbers: deletion_atom = self.atom_by_number(atom_number=atom_number) # update atom numbers for atom in self.atoms: if int(atom.atom_number) > int(atom_number): atom.atom_number = str(int(atom.atom_number) - 1) # find index of a bond to remove and update ctab data dict with new atom numbers for index, bond in enumerate(self.bonds): bond.update_atom_numbers() if atom_number in {bond.first_atom_number, bond.second_atom_number}: self.bonds.remove(bond) # remove atom from neighbors list for atom in self.atoms: if deletion_atom in atom.neighbors: atom.neighbors.remove(deletion_atom) self.atoms.remove(deletion_atom)
[ "def", "delete_atom", "(", "self", ",", "*", "atom_numbers", ")", ":", "for", "atom_number", "in", "atom_numbers", ":", "deletion_atom", "=", "self", ".", "atom_by_number", "(", "atom_number", "=", "atom_number", ")", "# update atom numbers", "for", "atom", "in", "self", ".", "atoms", ":", "if", "int", "(", "atom", ".", "atom_number", ")", ">", "int", "(", "atom_number", ")", ":", "atom", ".", "atom_number", "=", "str", "(", "int", "(", "atom", ".", "atom_number", ")", "-", "1", ")", "# find index of a bond to remove and update ctab data dict with new atom numbers", "for", "index", ",", "bond", "in", "enumerate", "(", "self", ".", "bonds", ")", ":", "bond", ".", "update_atom_numbers", "(", ")", "if", "atom_number", "in", "{", "bond", ".", "first_atom_number", ",", "bond", ".", "second_atom_number", "}", ":", "self", ".", "bonds", ".", "remove", "(", "bond", ")", "# remove atom from neighbors list", "for", "atom", "in", "self", ".", "atoms", ":", "if", "deletion_atom", "in", "atom", ".", "neighbors", ":", "atom", ".", "neighbors", ".", "remove", "(", "deletion_atom", ")", "self", ".", "atoms", ".", "remove", "(", "deletion_atom", ")" ]
Delete atoms by atom number. :param str atom_numbers: :return: None. :rtype: :py:obj:`None`
[ "Delete", "atoms", "by", "atom", "number", "." ]
eae864126cd9102207df5d363a3222256a0f1396
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L522-L548
train
MoseleyBioinformaticsLab/ctfile
ctfile/ctfile.py
SDfile.from_molfile
def from_molfile(cls, molfile, data=None): """Construct new ``SDfile`` object from ``Molfile`` object. :param molfile: ``Molfile`` object. :type molfile: :class:`~ctfile.ctfile.Molfile`. :return: ``SDfile`` object. :rtype: :class:`~ctfile.ctfile.SDfile`. """ if not data: data = OrderedDict() if not isinstance(molfile, Molfile): raise ValueError('Not a Molfile type: "{}"'.format(type(molfile))) if not isinstance(data, dict): raise ValueError('Not a dict type: "{}"'.format(type(data))) sdfile = cls() sdfile['1'] = OrderedDict() sdfile['1']['molfile'] = molfile sdfile['1']['data'] = data return sdfile
python
def from_molfile(cls, molfile, data=None): """Construct new ``SDfile`` object from ``Molfile`` object. :param molfile: ``Molfile`` object. :type molfile: :class:`~ctfile.ctfile.Molfile`. :return: ``SDfile`` object. :rtype: :class:`~ctfile.ctfile.SDfile`. """ if not data: data = OrderedDict() if not isinstance(molfile, Molfile): raise ValueError('Not a Molfile type: "{}"'.format(type(molfile))) if not isinstance(data, dict): raise ValueError('Not a dict type: "{}"'.format(type(data))) sdfile = cls() sdfile['1'] = OrderedDict() sdfile['1']['molfile'] = molfile sdfile['1']['data'] = data return sdfile
[ "def", "from_molfile", "(", "cls", ",", "molfile", ",", "data", "=", "None", ")", ":", "if", "not", "data", ":", "data", "=", "OrderedDict", "(", ")", "if", "not", "isinstance", "(", "molfile", ",", "Molfile", ")", ":", "raise", "ValueError", "(", "'Not a Molfile type: \"{}\"'", ".", "format", "(", "type", "(", "molfile", ")", ")", ")", "if", "not", "isinstance", "(", "data", ",", "dict", ")", ":", "raise", "ValueError", "(", "'Not a dict type: \"{}\"'", ".", "format", "(", "type", "(", "data", ")", ")", ")", "sdfile", "=", "cls", "(", ")", "sdfile", "[", "'1'", "]", "=", "OrderedDict", "(", ")", "sdfile", "[", "'1'", "]", "[", "'molfile'", "]", "=", "molfile", "sdfile", "[", "'1'", "]", "[", "'data'", "]", "=", "data", "return", "sdfile" ]
Construct new ``SDfile`` object from ``Molfile`` object. :param molfile: ``Molfile`` object. :type molfile: :class:`~ctfile.ctfile.Molfile`. :return: ``SDfile`` object. :rtype: :class:`~ctfile.ctfile.SDfile`.
[ "Construct", "new", "SDfile", "object", "from", "Molfile", "object", "." ]
eae864126cd9102207df5d363a3222256a0f1396
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L768-L789
train
MoseleyBioinformaticsLab/ctfile
ctfile/ctfile.py
SDfile.add_data
def add_data(self, id, key, value): """Add new data item. :param str id: Entry id within ``SDfile``. :param str key: Data item key. :param str value: Data item value. :return: None. :rtype: :py:obj:`None`. """ self[str(id)]['data'].setdefault(key, []) self[str(id)]['data'][key].append(value)
python
def add_data(self, id, key, value): """Add new data item. :param str id: Entry id within ``SDfile``. :param str key: Data item key. :param str value: Data item value. :return: None. :rtype: :py:obj:`None`. """ self[str(id)]['data'].setdefault(key, []) self[str(id)]['data'][key].append(value)
[ "def", "add_data", "(", "self", ",", "id", ",", "key", ",", "value", ")", ":", "self", "[", "str", "(", "id", ")", "]", "[", "'data'", "]", ".", "setdefault", "(", "key", ",", "[", "]", ")", "self", "[", "str", "(", "id", ")", "]", "[", "'data'", "]", "[", "key", "]", ".", "append", "(", "value", ")" ]
Add new data item. :param str id: Entry id within ``SDfile``. :param str key: Data item key. :param str value: Data item value. :return: None. :rtype: :py:obj:`None`.
[ "Add", "new", "data", "item", "." ]
eae864126cd9102207df5d363a3222256a0f1396
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L791-L801
train
MoseleyBioinformaticsLab/ctfile
ctfile/ctfile.py
SDfile.add_molfile
def add_molfile(self, molfile, data): """Add ``Molfile`` and data to ``SDfile`` object. :param molfile: ``Molfile`` instance. :type molfile: :class:`~ctfile.ctfile.Molfile`. :param dict data: Data associated with ``Molfile`` instance. :return: None. :rtype: :py:obj:`None`. """ if not isinstance(molfile, Molfile): raise ValueError('Not a Molfile type: "{}"'.format(type(molfile))) if not isinstance(data, dict): raise ValueError('Not a dict type: "{}"'.format(type(data))) entry_ids = sorted(self.keys(), key=lambda x: int(x)) if entry_ids: last_entry_id = str(entry_ids[-1]) else: last_entry_id = '0' new_entry_id = str(int(last_entry_id) + 1) self[new_entry_id] = OrderedDict() self[new_entry_id]['molfile'] = molfile self[new_entry_id]['data'] = data
python
def add_molfile(self, molfile, data): """Add ``Molfile`` and data to ``SDfile`` object. :param molfile: ``Molfile`` instance. :type molfile: :class:`~ctfile.ctfile.Molfile`. :param dict data: Data associated with ``Molfile`` instance. :return: None. :rtype: :py:obj:`None`. """ if not isinstance(molfile, Molfile): raise ValueError('Not a Molfile type: "{}"'.format(type(molfile))) if not isinstance(data, dict): raise ValueError('Not a dict type: "{}"'.format(type(data))) entry_ids = sorted(self.keys(), key=lambda x: int(x)) if entry_ids: last_entry_id = str(entry_ids[-1]) else: last_entry_id = '0' new_entry_id = str(int(last_entry_id) + 1) self[new_entry_id] = OrderedDict() self[new_entry_id]['molfile'] = molfile self[new_entry_id]['data'] = data
[ "def", "add_molfile", "(", "self", ",", "molfile", ",", "data", ")", ":", "if", "not", "isinstance", "(", "molfile", ",", "Molfile", ")", ":", "raise", "ValueError", "(", "'Not a Molfile type: \"{}\"'", ".", "format", "(", "type", "(", "molfile", ")", ")", ")", "if", "not", "isinstance", "(", "data", ",", "dict", ")", ":", "raise", "ValueError", "(", "'Not a dict type: \"{}\"'", ".", "format", "(", "type", "(", "data", ")", ")", ")", "entry_ids", "=", "sorted", "(", "self", ".", "keys", "(", ")", ",", "key", "=", "lambda", "x", ":", "int", "(", "x", ")", ")", "if", "entry_ids", ":", "last_entry_id", "=", "str", "(", "entry_ids", "[", "-", "1", "]", ")", "else", ":", "last_entry_id", "=", "'0'", "new_entry_id", "=", "str", "(", "int", "(", "last_entry_id", ")", "+", "1", ")", "self", "[", "new_entry_id", "]", "=", "OrderedDict", "(", ")", "self", "[", "new_entry_id", "]", "[", "'molfile'", "]", "=", "molfile", "self", "[", "new_entry_id", "]", "[", "'data'", "]", "=", "data" ]
Add ``Molfile`` and data to ``SDfile`` object. :param molfile: ``Molfile`` instance. :type molfile: :class:`~ctfile.ctfile.Molfile`. :param dict data: Data associated with ``Molfile`` instance. :return: None. :rtype: :py:obj:`None`.
[ "Add", "Molfile", "and", "data", "to", "SDfile", "object", "." ]
eae864126cd9102207df5d363a3222256a0f1396
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L803-L827
train
MoseleyBioinformaticsLab/ctfile
ctfile/ctfile.py
SDfile.add_sdfile
def add_sdfile(self, sdfile): """Add new ``SDfile`` to current ``SDfile``. :param sdfile: ``SDfile`` instance. :return: None. :rtype: :py:obj:`None`. """ if not isinstance(sdfile, SDfile): raise ValueError('Not a SDfile type: "{}"'.format(type(sdfile))) for entry_id in sdfile: self.add_molfile(molfile=sdfile[entry_id]['molfile'], data=sdfile[entry_id]['data'])
python
def add_sdfile(self, sdfile): """Add new ``SDfile`` to current ``SDfile``. :param sdfile: ``SDfile`` instance. :return: None. :rtype: :py:obj:`None`. """ if not isinstance(sdfile, SDfile): raise ValueError('Not a SDfile type: "{}"'.format(type(sdfile))) for entry_id in sdfile: self.add_molfile(molfile=sdfile[entry_id]['molfile'], data=sdfile[entry_id]['data'])
[ "def", "add_sdfile", "(", "self", ",", "sdfile", ")", ":", "if", "not", "isinstance", "(", "sdfile", ",", "SDfile", ")", ":", "raise", "ValueError", "(", "'Not a SDfile type: \"{}\"'", ".", "format", "(", "type", "(", "sdfile", ")", ")", ")", "for", "entry_id", "in", "sdfile", ":", "self", ".", "add_molfile", "(", "molfile", "=", "sdfile", "[", "entry_id", "]", "[", "'molfile'", "]", ",", "data", "=", "sdfile", "[", "entry_id", "]", "[", "'data'", "]", ")" ]
Add new ``SDfile`` to current ``SDfile``. :param sdfile: ``SDfile`` instance. :return: None. :rtype: :py:obj:`None`.
[ "Add", "new", "SDfile", "to", "current", "SDfile", "." ]
eae864126cd9102207df5d363a3222256a0f1396
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L829-L841
train
MoseleyBioinformaticsLab/ctfile
ctfile/ctfile.py
Atom.neighbor_atoms
def neighbor_atoms(self, atom_symbol=None): """Access neighbor atoms. :param str atom_symbol: Atom symbol. :return: List of neighbor atoms. :rtype: :py:class:`list`. """ if not atom_symbol: return self.neighbors else: return [atom for atom in self.neighbors if atom['atom_symbol'] == atom_symbol]
python
def neighbor_atoms(self, atom_symbol=None): """Access neighbor atoms. :param str atom_symbol: Atom symbol. :return: List of neighbor atoms. :rtype: :py:class:`list`. """ if not atom_symbol: return self.neighbors else: return [atom for atom in self.neighbors if atom['atom_symbol'] == atom_symbol]
[ "def", "neighbor_atoms", "(", "self", ",", "atom_symbol", "=", "None", ")", ":", "if", "not", "atom_symbol", ":", "return", "self", ".", "neighbors", "else", ":", "return", "[", "atom", "for", "atom", "in", "self", ".", "neighbors", "if", "atom", "[", "'atom_symbol'", "]", "==", "atom_symbol", "]" ]
Access neighbor atoms. :param str atom_symbol: Atom symbol. :return: List of neighbor atoms. :rtype: :py:class:`list`.
[ "Access", "neighbor", "atoms", "." ]
eae864126cd9102207df5d363a3222256a0f1396
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L983-L993
train
MoseleyBioinformaticsLab/ctfile
ctfile/ctfile.py
Bond.update_atom_numbers
def update_atom_numbers(self): """Update links "first_atom_number" -> "second_atom_number" :return: None. :rtype: :py:obj:`None`. """ self._ctab_data['first_atom_number'] = self.first_atom.atom_number self._ctab_data['second_atom_number'] = self.second_atom.atom_number
python
def update_atom_numbers(self): """Update links "first_atom_number" -> "second_atom_number" :return: None. :rtype: :py:obj:`None`. """ self._ctab_data['first_atom_number'] = self.first_atom.atom_number self._ctab_data['second_atom_number'] = self.second_atom.atom_number
[ "def", "update_atom_numbers", "(", "self", ")", ":", "self", ".", "_ctab_data", "[", "'first_atom_number'", "]", "=", "self", ".", "first_atom", ".", "atom_number", "self", ".", "_ctab_data", "[", "'second_atom_number'", "]", "=", "self", ".", "second_atom", ".", "atom_number" ]
Update links "first_atom_number" -> "second_atom_number" :return: None. :rtype: :py:obj:`None`.
[ "Update", "links", "first_atom_number", "-", ">", "second_atom_number" ]
eae864126cd9102207df5d363a3222256a0f1396
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L1111-L1118
train
MoseleyBioinformaticsLab/ctfile
ctfile/ctfile.py
CtabAtomBondEncoder.default
def default(self, o): """Default encoder. :param o: Atom or Bond instance. :type o: :class:`~ctfile.ctfile.Atom` or :class:`~ctfile.ctfile.Bond`. :return: Dictionary that contains information required for atom and bond block of ``Ctab``. :rtype: :py:class:`collections.OrderedDict` """ if isinstance(o, Atom) or isinstance(o, Bond): return o._ctab_data else: return o.__dict__
python
def default(self, o): """Default encoder. :param o: Atom or Bond instance. :type o: :class:`~ctfile.ctfile.Atom` or :class:`~ctfile.ctfile.Bond`. :return: Dictionary that contains information required for atom and bond block of ``Ctab``. :rtype: :py:class:`collections.OrderedDict` """ if isinstance(o, Atom) or isinstance(o, Bond): return o._ctab_data else: return o.__dict__
[ "def", "default", "(", "self", ",", "o", ")", ":", "if", "isinstance", "(", "o", ",", "Atom", ")", "or", "isinstance", "(", "o", ",", "Bond", ")", ":", "return", "o", ".", "_ctab_data", "else", ":", "return", "o", ".", "__dict__" ]
Default encoder. :param o: Atom or Bond instance. :type o: :class:`~ctfile.ctfile.Atom` or :class:`~ctfile.ctfile.Bond`. :return: Dictionary that contains information required for atom and bond block of ``Ctab``. :rtype: :py:class:`collections.OrderedDict`
[ "Default", "encoder", "." ]
eae864126cd9102207df5d363a3222256a0f1396
https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L1144-L1155
train
product-definition-center/pdc-client
pdc_client/config.py
ServerConfigManager.get
def get(self, server): """ Returns ServerConfig instance with configuration given server. :raises ServerConfigConflictError: if configuration directory contains configuration for same server multiple times :raises ServerConfigMissingUrlError: if URL is not specified in the configuration :raises ServerConfigNotFoundError: if configuration for given server is not found """ server_config = self.config.get(server) try: while server_config is None: new_config = self._read_next_config() server_config = new_config.get(server) new_config.update(self.config) self.config = new_config except StopIteration: return _default_server_configuration(server) if CONFIG_URL_KEY_NAME not in server_config: message = "'%s' must be specified in configuration for '%s'" \ % (CONFIG_URL_KEY_NAME, server) raise ServerConfigMissingUrlError(message) return ServerConfig(server_config)
python
def get(self, server): """ Returns ServerConfig instance with configuration given server. :raises ServerConfigConflictError: if configuration directory contains configuration for same server multiple times :raises ServerConfigMissingUrlError: if URL is not specified in the configuration :raises ServerConfigNotFoundError: if configuration for given server is not found """ server_config = self.config.get(server) try: while server_config is None: new_config = self._read_next_config() server_config = new_config.get(server) new_config.update(self.config) self.config = new_config except StopIteration: return _default_server_configuration(server) if CONFIG_URL_KEY_NAME not in server_config: message = "'%s' must be specified in configuration for '%s'" \ % (CONFIG_URL_KEY_NAME, server) raise ServerConfigMissingUrlError(message) return ServerConfig(server_config)
[ "def", "get", "(", "self", ",", "server", ")", ":", "server_config", "=", "self", ".", "config", ".", "get", "(", "server", ")", "try", ":", "while", "server_config", "is", "None", ":", "new_config", "=", "self", ".", "_read_next_config", "(", ")", "server_config", "=", "new_config", ".", "get", "(", "server", ")", "new_config", ".", "update", "(", "self", ".", "config", ")", "self", ".", "config", "=", "new_config", "except", "StopIteration", ":", "return", "_default_server_configuration", "(", "server", ")", "if", "CONFIG_URL_KEY_NAME", "not", "in", "server_config", ":", "message", "=", "\"'%s' must be specified in configuration for '%s'\"", "%", "(", "CONFIG_URL_KEY_NAME", ",", "server", ")", "raise", "ServerConfigMissingUrlError", "(", "message", ")", "return", "ServerConfig", "(", "server_config", ")" ]
Returns ServerConfig instance with configuration given server. :raises ServerConfigConflictError: if configuration directory contains configuration for same server multiple times :raises ServerConfigMissingUrlError: if URL is not specified in the configuration :raises ServerConfigNotFoundError: if configuration for given server is not found
[ "Returns", "ServerConfig", "instance", "with", "configuration", "given", "server", "." ]
7236fd8b72e675ebb321bbe337289d9fbeb6119f
https://github.com/product-definition-center/pdc-client/blob/7236fd8b72e675ebb321bbe337289d9fbeb6119f/pdc_client/config.py#L94-L121
train
chriso/gauged
gauged/structures/float_array.py
FloatArray.free
def free(self): """Free the underlying C array""" if self._ptr is None: return Gauged.array_free(self.ptr) FloatArray.ALLOCATIONS -= 1 self._ptr = None
python
def free(self): """Free the underlying C array""" if self._ptr is None: return Gauged.array_free(self.ptr) FloatArray.ALLOCATIONS -= 1 self._ptr = None
[ "def", "free", "(", "self", ")", ":", "if", "self", ".", "_ptr", "is", "None", ":", "return", "Gauged", ".", "array_free", "(", "self", ".", "ptr", ")", "FloatArray", ".", "ALLOCATIONS", "-=", "1", "self", ".", "_ptr", "=", "None" ]
Free the underlying C array
[ "Free", "the", "underlying", "C", "array" ]
cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/structures/float_array.py#L59-L65
train
glormph/msstitch
src/app/actions/mzidtsv/quant.py
generate_psms_quanted
def generate_psms_quanted(quantdb, tsvfn, isob_header, oldheader, isobaric=False, precursor=False): """Takes dbfn and connects, gets quants for each line in tsvfn, sorts them in line by using keys in quantheader list.""" allquants, sqlfields = quantdb.select_all_psm_quants(isobaric, precursor) quant = next(allquants) for rownr, psm in enumerate(readers.generate_tsv_psms(tsvfn, oldheader)): outpsm = {x: y for x, y in psm.items()} if precursor: pquant = quant[sqlfields['precursor']] if pquant is None: pquant = 'NA' outpsm.update({mzidtsvdata.HEADER_PRECURSOR_QUANT: str(pquant)}) if isobaric: isoquants = {} while quant[0] == rownr: isoquants.update({quant[sqlfields['isochan']]: str(quant[sqlfields['isoquant']])}) try: quant = next(allquants) except StopIteration: # last PSM, break from while loop or it is not yielded at all break outpsm.update(get_quant_NAs(isoquants, isob_header)) else: try: quant = next(allquants) except StopIteration: # last PSM, needs explicit yield/break or it will not be yielded yield outpsm break yield outpsm
python
def generate_psms_quanted(quantdb, tsvfn, isob_header, oldheader, isobaric=False, precursor=False): """Takes dbfn and connects, gets quants for each line in tsvfn, sorts them in line by using keys in quantheader list.""" allquants, sqlfields = quantdb.select_all_psm_quants(isobaric, precursor) quant = next(allquants) for rownr, psm in enumerate(readers.generate_tsv_psms(tsvfn, oldheader)): outpsm = {x: y for x, y in psm.items()} if precursor: pquant = quant[sqlfields['precursor']] if pquant is None: pquant = 'NA' outpsm.update({mzidtsvdata.HEADER_PRECURSOR_QUANT: str(pquant)}) if isobaric: isoquants = {} while quant[0] == rownr: isoquants.update({quant[sqlfields['isochan']]: str(quant[sqlfields['isoquant']])}) try: quant = next(allquants) except StopIteration: # last PSM, break from while loop or it is not yielded at all break outpsm.update(get_quant_NAs(isoquants, isob_header)) else: try: quant = next(allquants) except StopIteration: # last PSM, needs explicit yield/break or it will not be yielded yield outpsm break yield outpsm
[ "def", "generate_psms_quanted", "(", "quantdb", ",", "tsvfn", ",", "isob_header", ",", "oldheader", ",", "isobaric", "=", "False", ",", "precursor", "=", "False", ")", ":", "allquants", ",", "sqlfields", "=", "quantdb", ".", "select_all_psm_quants", "(", "isobaric", ",", "precursor", ")", "quant", "=", "next", "(", "allquants", ")", "for", "rownr", ",", "psm", "in", "enumerate", "(", "readers", ".", "generate_tsv_psms", "(", "tsvfn", ",", "oldheader", ")", ")", ":", "outpsm", "=", "{", "x", ":", "y", "for", "x", ",", "y", "in", "psm", ".", "items", "(", ")", "}", "if", "precursor", ":", "pquant", "=", "quant", "[", "sqlfields", "[", "'precursor'", "]", "]", "if", "pquant", "is", "None", ":", "pquant", "=", "'NA'", "outpsm", ".", "update", "(", "{", "mzidtsvdata", ".", "HEADER_PRECURSOR_QUANT", ":", "str", "(", "pquant", ")", "}", ")", "if", "isobaric", ":", "isoquants", "=", "{", "}", "while", "quant", "[", "0", "]", "==", "rownr", ":", "isoquants", ".", "update", "(", "{", "quant", "[", "sqlfields", "[", "'isochan'", "]", "]", ":", "str", "(", "quant", "[", "sqlfields", "[", "'isoquant'", "]", "]", ")", "}", ")", "try", ":", "quant", "=", "next", "(", "allquants", ")", "except", "StopIteration", ":", "# last PSM, break from while loop or it is not yielded at all", "break", "outpsm", ".", "update", "(", "get_quant_NAs", "(", "isoquants", ",", "isob_header", ")", ")", "else", ":", "try", ":", "quant", "=", "next", "(", "allquants", ")", "except", "StopIteration", ":", "# last PSM, needs explicit yield/break or it will not be yielded", "yield", "outpsm", "break", "yield", "outpsm" ]
Takes dbfn and connects, gets quants for each line in tsvfn, sorts them in line by using keys in quantheader list.
[ "Takes", "dbfn", "and", "connects", "gets", "quants", "for", "each", "line", "in", "tsvfn", "sorts", "them", "in", "line", "by", "using", "keys", "in", "quantheader", "list", "." ]
ded7e5cbd813d7797dc9d42805778266e59ff042
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mzidtsv/quant.py#L5-L36
train
EnigmaBridge/jbossply
jbossply/jbossparser.py
JbossLexer.t_escaped_BACKSPACE_CHAR
def t_escaped_BACKSPACE_CHAR(self, t): r'\x62' # 'b' t.lexer.pop_state() t.value = unichr(0x0008) return t
python
def t_escaped_BACKSPACE_CHAR(self, t): r'\x62' # 'b' t.lexer.pop_state() t.value = unichr(0x0008) return t
[ "def", "t_escaped_BACKSPACE_CHAR", "(", "self", ",", "t", ")", ":", "# 'b'", "t", ".", "lexer", ".", "pop_state", "(", ")", "t", ".", "value", "=", "unichr", "(", "0x0008", ")", "return", "t" ]
r'\x62
[ "r", "\\", "x62" ]
44b30b15982cae781f0c356fab7263751b20b4d0
https://github.com/EnigmaBridge/jbossply/blob/44b30b15982cae781f0c356fab7263751b20b4d0/jbossply/jbossparser.py#L173-L177
train
EnigmaBridge/jbossply
jbossply/jbossparser.py
JbossLexer.t_escaped_FORM_FEED_CHAR
def t_escaped_FORM_FEED_CHAR(self, t): r'\x66' # 'f' t.lexer.pop_state() t.value = unichr(0x000c) return t
python
def t_escaped_FORM_FEED_CHAR(self, t): r'\x66' # 'f' t.lexer.pop_state() t.value = unichr(0x000c) return t
[ "def", "t_escaped_FORM_FEED_CHAR", "(", "self", ",", "t", ")", ":", "# 'f'", "t", ".", "lexer", ".", "pop_state", "(", ")", "t", ".", "value", "=", "unichr", "(", "0x000c", ")", "return", "t" ]
r'\x66
[ "r", "\\", "x66" ]
44b30b15982cae781f0c356fab7263751b20b4d0
https://github.com/EnigmaBridge/jbossply/blob/44b30b15982cae781f0c356fab7263751b20b4d0/jbossply/jbossparser.py#L179-L183
train
EnigmaBridge/jbossply
jbossply/jbossparser.py
JbossLexer.t_escaped_CARRIAGE_RETURN_CHAR
def t_escaped_CARRIAGE_RETURN_CHAR(self, t): r'\x72' # 'r' t.lexer.pop_state() t.value = unichr(0x000d) return t
python
def t_escaped_CARRIAGE_RETURN_CHAR(self, t): r'\x72' # 'r' t.lexer.pop_state() t.value = unichr(0x000d) return t
[ "def", "t_escaped_CARRIAGE_RETURN_CHAR", "(", "self", ",", "t", ")", ":", "# 'r'", "t", ".", "lexer", ".", "pop_state", "(", ")", "t", ".", "value", "=", "unichr", "(", "0x000d", ")", "return", "t" ]
r'\x72
[ "r", "\\", "x72" ]
44b30b15982cae781f0c356fab7263751b20b4d0
https://github.com/EnigmaBridge/jbossply/blob/44b30b15982cae781f0c356fab7263751b20b4d0/jbossply/jbossparser.py#L185-L189
train
EnigmaBridge/jbossply
jbossply/jbossparser.py
JbossLexer.t_escaped_LINE_FEED_CHAR
def t_escaped_LINE_FEED_CHAR(self, t): r'\x6E' # 'n' t.lexer.pop_state() t.value = unichr(0x000a) return t
python
def t_escaped_LINE_FEED_CHAR(self, t): r'\x6E' # 'n' t.lexer.pop_state() t.value = unichr(0x000a) return t
[ "def", "t_escaped_LINE_FEED_CHAR", "(", "self", ",", "t", ")", ":", "# 'n'", "t", ".", "lexer", ".", "pop_state", "(", ")", "t", ".", "value", "=", "unichr", "(", "0x000a", ")", "return", "t" ]
r'\x6E
[ "r", "\\", "x6E" ]
44b30b15982cae781f0c356fab7263751b20b4d0
https://github.com/EnigmaBridge/jbossply/blob/44b30b15982cae781f0c356fab7263751b20b4d0/jbossply/jbossparser.py#L191-L195
train
EnigmaBridge/jbossply
jbossply/jbossparser.py
JbossLexer.t_escaped_TAB_CHAR
def t_escaped_TAB_CHAR(self, t): r'\x74' # 't' t.lexer.pop_state() t.value = unichr(0x0009) return t
python
def t_escaped_TAB_CHAR(self, t): r'\x74' # 't' t.lexer.pop_state() t.value = unichr(0x0009) return t
[ "def", "t_escaped_TAB_CHAR", "(", "self", ",", "t", ")", ":", "# 't'", "t", ".", "lexer", ".", "pop_state", "(", ")", "t", ".", "value", "=", "unichr", "(", "0x0009", ")", "return", "t" ]
r'\x74
[ "r", "\\", "x74" ]
44b30b15982cae781f0c356fab7263751b20b4d0
https://github.com/EnigmaBridge/jbossply/blob/44b30b15982cae781f0c356fab7263751b20b4d0/jbossply/jbossparser.py#L197-L201
train
glormph/msstitch
src/app/readers/mzidplus.py
get_mzid_specfile_ids
def get_mzid_specfile_ids(mzidfn, namespace): """Returns mzid spectra data filenames and their IDs used in the mzIdentML file as a dict. Keys == IDs, values == fns""" sid_fn = {} for specdata in mzid_specdata_generator(mzidfn, namespace): sid_fn[specdata.attrib['id']] = specdata.attrib['name'] return sid_fn
python
def get_mzid_specfile_ids(mzidfn, namespace): """Returns mzid spectra data filenames and their IDs used in the mzIdentML file as a dict. Keys == IDs, values == fns""" sid_fn = {} for specdata in mzid_specdata_generator(mzidfn, namespace): sid_fn[specdata.attrib['id']] = specdata.attrib['name'] return sid_fn
[ "def", "get_mzid_specfile_ids", "(", "mzidfn", ",", "namespace", ")", ":", "sid_fn", "=", "{", "}", "for", "specdata", "in", "mzid_specdata_generator", "(", "mzidfn", ",", "namespace", ")", ":", "sid_fn", "[", "specdata", ".", "attrib", "[", "'id'", "]", "]", "=", "specdata", ".", "attrib", "[", "'name'", "]", "return", "sid_fn" ]
Returns mzid spectra data filenames and their IDs used in the mzIdentML file as a dict. Keys == IDs, values == fns
[ "Returns", "mzid", "spectra", "data", "filenames", "and", "their", "IDs", "used", "in", "the", "mzIdentML", "file", "as", "a", "dict", ".", "Keys", "==", "IDs", "values", "==", "fns" ]
ded7e5cbd813d7797dc9d42805778266e59ff042
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/readers/mzidplus.py#L96-L102
train
glormph/msstitch
src/app/readers/mzidplus.py
get_specidentitem_percolator_data
def get_specidentitem_percolator_data(item, xmlns): """Loop through SpecIdentificationItem children. Find percolator data by matching to a dict lookup. Return a dict containing percolator data""" percomap = {'{0}userParam'.format(xmlns): PERCO_HEADERMAP, } percodata = {} for child in item: try: percoscore = percomap[child.tag][child.attrib['name']] except KeyError: continue else: percodata[percoscore] = child.attrib['value'] outkeys = [y for x in list(percomap.values()) for y in list(x.values())] for key in outkeys: try: percodata[key] except KeyError: percodata[key] = 'NA' return percodata
python
def get_specidentitem_percolator_data(item, xmlns): """Loop through SpecIdentificationItem children. Find percolator data by matching to a dict lookup. Return a dict containing percolator data""" percomap = {'{0}userParam'.format(xmlns): PERCO_HEADERMAP, } percodata = {} for child in item: try: percoscore = percomap[child.tag][child.attrib['name']] except KeyError: continue else: percodata[percoscore] = child.attrib['value'] outkeys = [y for x in list(percomap.values()) for y in list(x.values())] for key in outkeys: try: percodata[key] except KeyError: percodata[key] = 'NA' return percodata
[ "def", "get_specidentitem_percolator_data", "(", "item", ",", "xmlns", ")", ":", "percomap", "=", "{", "'{0}userParam'", ".", "format", "(", "xmlns", ")", ":", "PERCO_HEADERMAP", ",", "}", "percodata", "=", "{", "}", "for", "child", "in", "item", ":", "try", ":", "percoscore", "=", "percomap", "[", "child", ".", "tag", "]", "[", "child", ".", "attrib", "[", "'name'", "]", "]", "except", "KeyError", ":", "continue", "else", ":", "percodata", "[", "percoscore", "]", "=", "child", ".", "attrib", "[", "'value'", "]", "outkeys", "=", "[", "y", "for", "x", "in", "list", "(", "percomap", ".", "values", "(", ")", ")", "for", "y", "in", "list", "(", "x", ".", "values", "(", ")", ")", "]", "for", "key", "in", "outkeys", ":", "try", ":", "percodata", "[", "key", "]", "except", "KeyError", ":", "percodata", "[", "key", "]", "=", "'NA'", "return", "percodata" ]
Loop through SpecIdentificationItem children. Find percolator data by matching to a dict lookup. Return a dict containing percolator data
[ "Loop", "through", "SpecIdentificationItem", "children", ".", "Find", "percolator", "data", "by", "matching", "to", "a", "dict", "lookup", ".", "Return", "a", "dict", "containing", "percolator", "data" ]
ded7e5cbd813d7797dc9d42805778266e59ff042
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/readers/mzidplus.py#L115-L134
train
Erotemic/utool
utool/util_sysreq.py
locate_path
def locate_path(dname, recurse_down=True): """ Search for a path """ tried_fpaths = [] root_dir = os.getcwd() while root_dir is not None: dpath = join(root_dir, dname) if exists(dpath): return dpath else: tried_fpaths.append(dpath) _new_root = dirname(root_dir) if _new_root == root_dir: root_dir = None break else: root_dir = _new_root if not recurse_down: break msg = 'Cannot locate dname=%r' % (dname,) msg = ('\n[sysreq!] Checked: '.join(tried_fpaths)) print(msg) raise ImportError(msg)
python
def locate_path(dname, recurse_down=True): """ Search for a path """ tried_fpaths = [] root_dir = os.getcwd() while root_dir is not None: dpath = join(root_dir, dname) if exists(dpath): return dpath else: tried_fpaths.append(dpath) _new_root = dirname(root_dir) if _new_root == root_dir: root_dir = None break else: root_dir = _new_root if not recurse_down: break msg = 'Cannot locate dname=%r' % (dname,) msg = ('\n[sysreq!] Checked: '.join(tried_fpaths)) print(msg) raise ImportError(msg)
[ "def", "locate_path", "(", "dname", ",", "recurse_down", "=", "True", ")", ":", "tried_fpaths", "=", "[", "]", "root_dir", "=", "os", ".", "getcwd", "(", ")", "while", "root_dir", "is", "not", "None", ":", "dpath", "=", "join", "(", "root_dir", ",", "dname", ")", "if", "exists", "(", "dpath", ")", ":", "return", "dpath", "else", ":", "tried_fpaths", ".", "append", "(", "dpath", ")", "_new_root", "=", "dirname", "(", "root_dir", ")", "if", "_new_root", "==", "root_dir", ":", "root_dir", "=", "None", "break", "else", ":", "root_dir", "=", "_new_root", "if", "not", "recurse_down", ":", "break", "msg", "=", "'Cannot locate dname=%r'", "%", "(", "dname", ",", ")", "msg", "=", "(", "'\\n[sysreq!] Checked: '", ".", "join", "(", "tried_fpaths", ")", ")", "print", "(", "msg", ")", "raise", "ImportError", "(", "msg", ")" ]
Search for a path
[ "Search", "for", "a", "path" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_sysreq.py#L116-L137
train
Erotemic/utool
utool/util_sysreq.py
total_purge_developed_repo
def total_purge_developed_repo(repodir): r""" Outputs commands to help purge a repo Args: repodir (str): path to developed repository CommandLine: python -m utool.util_sysreq total_purge_installed_repo --show Ignore: repodir = ut.truepath('~/code/Lasagne') Example: >>> # DISABLE_DOCTEST >>> from utool.util_sysreq import * # NOQA >>> import utool as ut >>> repodir = ut.get_argval('--repodir', default=None) >>> result = total_purge_installed_repo(repodir) """ assert repodir is not None import utool as ut import os repo = ut.util_git.Repo(dpath=repodir) user = os.environ['USER'] fmtdict = dict( user=user, modname=repo.modname, reponame=repo.reponame, dpath=repo.dpath, global_site_pkgs=ut.get_global_dist_packages_dir(), local_site_pkgs=ut.get_local_dist_packages_dir(), venv_site_pkgs=ut.get_site_packages_dir(), ) commands = [_.format(**fmtdict) for _ in [ 'pip uninstall {modname}', 'sudo -H pip uninstall {modname}', 'sudo pip uninstall {modname}', 'easy_install -m {modname}', 'cd {dpath} && python setup.py develop --uninstall', # If they still exist try chowning to current user 'sudo chown -R {user}:{user} {dpath}', ]] print('Normal uninstall commands') print('\n'.join(commands)) possible_link_paths = [_.format(**fmtdict) for _ in [ '{dpath}/{modname}.egg-info', '{dpath}/build', '{venv_site_pkgs}/{reponame}.egg-info', '{local_site_pkgs}/{reponame}.egg-info', '{venv_site_pkgs}/{reponame}.egg-info', ]] from os.path import exists, basename existing_link_paths = [path for path in possible_link_paths] print('# Delete paths and eggs') for path in existing_link_paths: if exists(path): if ut.get_file_info(path)['owner'] != user: print('sudo /bin/rm -rf {path}'.format(path=path)) else: print('/bin/rm -rf {path}'.format(path=path)) #ut.delete(path) print('# Make sure nothing is in the easy install paths') easyinstall_paths = [_.format(**fmtdict) for _ in [ '{venv_site_pkgs}/easy-install.pth', '{local_site_pkgs}/easy-install.pth', '{venv_site_pkgs}/easy-install.pth', ]] for path in easyinstall_paths: if exists(path): easy_install_list = ut.readfrom(path, verbose=False).strip().split('\n') easy_install_list_ = [basename(p) for p in easy_install_list] index1 = ut.listfind(easy_install_list_, repo.reponame) index2 = ut.listfind(easy_install_list_, repo.modname) if index1 is not None or index2 is not None: print('Found at index1=%r, index=%r' % (index1, index2)) if ut.get_file_info(path)['owner'] != user: print('sudo gvim {path}'.format(path=path)) else: print('gvim {path}'.format(path=path)) checkcmds = [_.format(**fmtdict) for _ in [ 'python -c "import {modname}; print({modname}.__file__)"' ]] import sys assert repo.modname not in sys.modules print("# CHECK STATUS") for cmd in checkcmds: print(cmd)
python
def total_purge_developed_repo(repodir): r""" Outputs commands to help purge a repo Args: repodir (str): path to developed repository CommandLine: python -m utool.util_sysreq total_purge_installed_repo --show Ignore: repodir = ut.truepath('~/code/Lasagne') Example: >>> # DISABLE_DOCTEST >>> from utool.util_sysreq import * # NOQA >>> import utool as ut >>> repodir = ut.get_argval('--repodir', default=None) >>> result = total_purge_installed_repo(repodir) """ assert repodir is not None import utool as ut import os repo = ut.util_git.Repo(dpath=repodir) user = os.environ['USER'] fmtdict = dict( user=user, modname=repo.modname, reponame=repo.reponame, dpath=repo.dpath, global_site_pkgs=ut.get_global_dist_packages_dir(), local_site_pkgs=ut.get_local_dist_packages_dir(), venv_site_pkgs=ut.get_site_packages_dir(), ) commands = [_.format(**fmtdict) for _ in [ 'pip uninstall {modname}', 'sudo -H pip uninstall {modname}', 'sudo pip uninstall {modname}', 'easy_install -m {modname}', 'cd {dpath} && python setup.py develop --uninstall', # If they still exist try chowning to current user 'sudo chown -R {user}:{user} {dpath}', ]] print('Normal uninstall commands') print('\n'.join(commands)) possible_link_paths = [_.format(**fmtdict) for _ in [ '{dpath}/{modname}.egg-info', '{dpath}/build', '{venv_site_pkgs}/{reponame}.egg-info', '{local_site_pkgs}/{reponame}.egg-info', '{venv_site_pkgs}/{reponame}.egg-info', ]] from os.path import exists, basename existing_link_paths = [path for path in possible_link_paths] print('# Delete paths and eggs') for path in existing_link_paths: if exists(path): if ut.get_file_info(path)['owner'] != user: print('sudo /bin/rm -rf {path}'.format(path=path)) else: print('/bin/rm -rf {path}'.format(path=path)) #ut.delete(path) print('# Make sure nothing is in the easy install paths') easyinstall_paths = [_.format(**fmtdict) for _ in [ '{venv_site_pkgs}/easy-install.pth', '{local_site_pkgs}/easy-install.pth', '{venv_site_pkgs}/easy-install.pth', ]] for path in easyinstall_paths: if exists(path): easy_install_list = ut.readfrom(path, verbose=False).strip().split('\n') easy_install_list_ = [basename(p) for p in easy_install_list] index1 = ut.listfind(easy_install_list_, repo.reponame) index2 = ut.listfind(easy_install_list_, repo.modname) if index1 is not None or index2 is not None: print('Found at index1=%r, index=%r' % (index1, index2)) if ut.get_file_info(path)['owner'] != user: print('sudo gvim {path}'.format(path=path)) else: print('gvim {path}'.format(path=path)) checkcmds = [_.format(**fmtdict) for _ in [ 'python -c "import {modname}; print({modname}.__file__)"' ]] import sys assert repo.modname not in sys.modules print("# CHECK STATUS") for cmd in checkcmds: print(cmd)
[ "def", "total_purge_developed_repo", "(", "repodir", ")", ":", "assert", "repodir", "is", "not", "None", "import", "utool", "as", "ut", "import", "os", "repo", "=", "ut", ".", "util_git", ".", "Repo", "(", "dpath", "=", "repodir", ")", "user", "=", "os", ".", "environ", "[", "'USER'", "]", "fmtdict", "=", "dict", "(", "user", "=", "user", ",", "modname", "=", "repo", ".", "modname", ",", "reponame", "=", "repo", ".", "reponame", ",", "dpath", "=", "repo", ".", "dpath", ",", "global_site_pkgs", "=", "ut", ".", "get_global_dist_packages_dir", "(", ")", ",", "local_site_pkgs", "=", "ut", ".", "get_local_dist_packages_dir", "(", ")", ",", "venv_site_pkgs", "=", "ut", ".", "get_site_packages_dir", "(", ")", ",", ")", "commands", "=", "[", "_", ".", "format", "(", "*", "*", "fmtdict", ")", "for", "_", "in", "[", "'pip uninstall {modname}'", ",", "'sudo -H pip uninstall {modname}'", ",", "'sudo pip uninstall {modname}'", ",", "'easy_install -m {modname}'", ",", "'cd {dpath} && python setup.py develop --uninstall'", ",", "# If they still exist try chowning to current user", "'sudo chown -R {user}:{user} {dpath}'", ",", "]", "]", "print", "(", "'Normal uninstall commands'", ")", "print", "(", "'\\n'", ".", "join", "(", "commands", ")", ")", "possible_link_paths", "=", "[", "_", ".", "format", "(", "*", "*", "fmtdict", ")", "for", "_", "in", "[", "'{dpath}/{modname}.egg-info'", ",", "'{dpath}/build'", ",", "'{venv_site_pkgs}/{reponame}.egg-info'", ",", "'{local_site_pkgs}/{reponame}.egg-info'", ",", "'{venv_site_pkgs}/{reponame}.egg-info'", ",", "]", "]", "from", "os", ".", "path", "import", "exists", ",", "basename", "existing_link_paths", "=", "[", "path", "for", "path", "in", "possible_link_paths", "]", "print", "(", "'# Delete paths and eggs'", ")", "for", "path", "in", "existing_link_paths", ":", "if", "exists", "(", "path", ")", ":", "if", "ut", ".", "get_file_info", "(", "path", ")", "[", "'owner'", "]", "!=", "user", ":", "print", "(", "'sudo /bin/rm -rf {path}'", ".", "format", "(", "path", "=", "path", ")", ")", "else", ":", "print", "(", "'/bin/rm -rf {path}'", ".", "format", "(", "path", "=", "path", ")", ")", "#ut.delete(path)", "print", "(", "'# Make sure nothing is in the easy install paths'", ")", "easyinstall_paths", "=", "[", "_", ".", "format", "(", "*", "*", "fmtdict", ")", "for", "_", "in", "[", "'{venv_site_pkgs}/easy-install.pth'", ",", "'{local_site_pkgs}/easy-install.pth'", ",", "'{venv_site_pkgs}/easy-install.pth'", ",", "]", "]", "for", "path", "in", "easyinstall_paths", ":", "if", "exists", "(", "path", ")", ":", "easy_install_list", "=", "ut", ".", "readfrom", "(", "path", ",", "verbose", "=", "False", ")", ".", "strip", "(", ")", ".", "split", "(", "'\\n'", ")", "easy_install_list_", "=", "[", "basename", "(", "p", ")", "for", "p", "in", "easy_install_list", "]", "index1", "=", "ut", ".", "listfind", "(", "easy_install_list_", ",", "repo", ".", "reponame", ")", "index2", "=", "ut", ".", "listfind", "(", "easy_install_list_", ",", "repo", ".", "modname", ")", "if", "index1", "is", "not", "None", "or", "index2", "is", "not", "None", ":", "print", "(", "'Found at index1=%r, index=%r'", "%", "(", "index1", ",", "index2", ")", ")", "if", "ut", ".", "get_file_info", "(", "path", ")", "[", "'owner'", "]", "!=", "user", ":", "print", "(", "'sudo gvim {path}'", ".", "format", "(", "path", "=", "path", ")", ")", "else", ":", "print", "(", "'gvim {path}'", ".", "format", "(", "path", "=", "path", ")", ")", "checkcmds", "=", "[", "_", ".", "format", "(", "*", "*", "fmtdict", ")", "for", "_", "in", "[", "'python -c \"import {modname}; print({modname}.__file__)\"'", "]", "]", "import", "sys", "assert", "repo", ".", "modname", "not", "in", "sys", ".", "modules", "print", "(", "\"# CHECK STATUS\"", ")", "for", "cmd", "in", "checkcmds", ":", "print", "(", "cmd", ")" ]
r""" Outputs commands to help purge a repo Args: repodir (str): path to developed repository CommandLine: python -m utool.util_sysreq total_purge_installed_repo --show Ignore: repodir = ut.truepath('~/code/Lasagne') Example: >>> # DISABLE_DOCTEST >>> from utool.util_sysreq import * # NOQA >>> import utool as ut >>> repodir = ut.get_argval('--repodir', default=None) >>> result = total_purge_installed_repo(repodir)
[ "r", "Outputs", "commands", "to", "help", "purge", "a", "repo" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_sysreq.py#L151-L244
train
Erotemic/utool
utool/DynamicStruct.py
DynStruct.add_dict
def add_dict(self, dyn_dict): 'Adds a dictionary to the prefs' if not isinstance(dyn_dict, dict): raise Exception('DynStruct.add_dict expects a dictionary.' + 'Recieved: ' + six.text_type(type(dyn_dict))) for (key, val) in six.iteritems(dyn_dict): self[key] = val
python
def add_dict(self, dyn_dict): 'Adds a dictionary to the prefs' if not isinstance(dyn_dict, dict): raise Exception('DynStruct.add_dict expects a dictionary.' + 'Recieved: ' + six.text_type(type(dyn_dict))) for (key, val) in six.iteritems(dyn_dict): self[key] = val
[ "def", "add_dict", "(", "self", ",", "dyn_dict", ")", ":", "if", "not", "isinstance", "(", "dyn_dict", ",", "dict", ")", ":", "raise", "Exception", "(", "'DynStruct.add_dict expects a dictionary.'", "+", "'Recieved: '", "+", "six", ".", "text_type", "(", "type", "(", "dyn_dict", ")", ")", ")", "for", "(", "key", ",", "val", ")", "in", "six", ".", "iteritems", "(", "dyn_dict", ")", ":", "self", "[", "key", "]", "=", "val" ]
Adds a dictionary to the prefs
[ "Adds", "a", "dictionary", "to", "the", "prefs" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/DynamicStruct.py#L52-L58
train
Erotemic/utool
utool/DynamicStruct.py
DynStruct.to_dict
def to_dict(self): """Converts dynstruct to a dictionary. """ dyn_dict = {} for (key, val) in six.iteritems(self.__dict__): if key not in self._printable_exclude: dyn_dict[key] = val return dyn_dict
python
def to_dict(self): """Converts dynstruct to a dictionary. """ dyn_dict = {} for (key, val) in six.iteritems(self.__dict__): if key not in self._printable_exclude: dyn_dict[key] = val return dyn_dict
[ "def", "to_dict", "(", "self", ")", ":", "dyn_dict", "=", "{", "}", "for", "(", "key", ",", "val", ")", "in", "six", ".", "iteritems", "(", "self", ".", "__dict__", ")", ":", "if", "key", "not", "in", "self", ".", "_printable_exclude", ":", "dyn_dict", "[", "key", "]", "=", "val", "return", "dyn_dict" ]
Converts dynstruct to a dictionary.
[ "Converts", "dynstruct", "to", "a", "dictionary", "." ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/DynamicStruct.py#L60-L66
train
Erotemic/utool
utool/DynamicStruct.py
DynStruct.execstr
def execstr(self, local_name): """returns a string which when evaluated will add the stored variables to the current namespace localname is the name of the variable in the current scope * use locals().update(dyn.to_dict()) instead """ execstr = '' for (key, val) in six.iteritems(self.__dict__): if key not in self._printable_exclude: execstr += key + ' = ' + local_name + '.' + key + '\n' return execstr
python
def execstr(self, local_name): """returns a string which when evaluated will add the stored variables to the current namespace localname is the name of the variable in the current scope * use locals().update(dyn.to_dict()) instead """ execstr = '' for (key, val) in six.iteritems(self.__dict__): if key not in self._printable_exclude: execstr += key + ' = ' + local_name + '.' + key + '\n' return execstr
[ "def", "execstr", "(", "self", ",", "local_name", ")", ":", "execstr", "=", "''", "for", "(", "key", ",", "val", ")", "in", "six", ".", "iteritems", "(", "self", ".", "__dict__", ")", ":", "if", "key", "not", "in", "self", ".", "_printable_exclude", ":", "execstr", "+=", "key", "+", "' = '", "+", "local_name", "+", "'.'", "+", "key", "+", "'\\n'", "return", "execstr" ]
returns a string which when evaluated will add the stored variables to the current namespace localname is the name of the variable in the current scope * use locals().update(dyn.to_dict()) instead
[ "returns", "a", "string", "which", "when", "evaluated", "will", "add", "the", "stored", "variables", "to", "the", "current", "namespace" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/DynamicStruct.py#L88-L99
train
glormph/msstitch
src/app/lookups/sqlite/proteingroups.py
ProteinGroupDB.get_proteins_for_peptide
def get_proteins_for_peptide(self, psm_id): """Returns list of proteins for a passed psm_id""" protsql = self.get_sql_select(['protein_acc'], 'protein_psm') protsql = '{0} WHERE psm_id=?'.format(protsql) cursor = self.get_cursor() proteins = cursor.execute(protsql, psm_id).fetchall() return [x[0] for x in proteins]
python
def get_proteins_for_peptide(self, psm_id): """Returns list of proteins for a passed psm_id""" protsql = self.get_sql_select(['protein_acc'], 'protein_psm') protsql = '{0} WHERE psm_id=?'.format(protsql) cursor = self.get_cursor() proteins = cursor.execute(protsql, psm_id).fetchall() return [x[0] for x in proteins]
[ "def", "get_proteins_for_peptide", "(", "self", ",", "psm_id", ")", ":", "protsql", "=", "self", ".", "get_sql_select", "(", "[", "'protein_acc'", "]", ",", "'protein_psm'", ")", "protsql", "=", "'{0} WHERE psm_id=?'", ".", "format", "(", "protsql", ")", "cursor", "=", "self", ".", "get_cursor", "(", ")", "proteins", "=", "cursor", ".", "execute", "(", "protsql", ",", "psm_id", ")", ".", "fetchall", "(", ")", "return", "[", "x", "[", "0", "]", "for", "x", "in", "proteins", "]" ]
Returns list of proteins for a passed psm_id
[ "Returns", "list", "of", "proteins", "for", "a", "passed", "psm_id" ]
ded7e5cbd813d7797dc9d42805778266e59ff042
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/lookups/sqlite/proteingroups.py#L79-L85
train
flyte/xbee-helper
xbee_helper/device.py
raise_if_error
def raise_if_error(frame): """ Checks a frame and raises the relevant exception if required. """ if "status" not in frame or frame["status"] == b"\x00": return codes_and_exceptions = { b"\x01": exceptions.ZigBeeUnknownError, b"\x02": exceptions.ZigBeeInvalidCommand, b"\x03": exceptions.ZigBeeInvalidParameter, b"\x04": exceptions.ZigBeeTxFailure } if frame["status"] in codes_and_exceptions: raise codes_and_exceptions[frame["status"]]() raise exceptions.ZigBeeUnknownStatus()
python
def raise_if_error(frame): """ Checks a frame and raises the relevant exception if required. """ if "status" not in frame or frame["status"] == b"\x00": return codes_and_exceptions = { b"\x01": exceptions.ZigBeeUnknownError, b"\x02": exceptions.ZigBeeInvalidCommand, b"\x03": exceptions.ZigBeeInvalidParameter, b"\x04": exceptions.ZigBeeTxFailure } if frame["status"] in codes_and_exceptions: raise codes_and_exceptions[frame["status"]]() raise exceptions.ZigBeeUnknownStatus()
[ "def", "raise_if_error", "(", "frame", ")", ":", "if", "\"status\"", "not", "in", "frame", "or", "frame", "[", "\"status\"", "]", "==", "b\"\\x00\"", ":", "return", "codes_and_exceptions", "=", "{", "b\"\\x01\"", ":", "exceptions", ".", "ZigBeeUnknownError", ",", "b\"\\x02\"", ":", "exceptions", ".", "ZigBeeInvalidCommand", ",", "b\"\\x03\"", ":", "exceptions", ".", "ZigBeeInvalidParameter", ",", "b\"\\x04\"", ":", "exceptions", ".", "ZigBeeTxFailure", "}", "if", "frame", "[", "\"status\"", "]", "in", "codes_and_exceptions", ":", "raise", "codes_and_exceptions", "[", "frame", "[", "\"status\"", "]", "]", "(", ")", "raise", "exceptions", ".", "ZigBeeUnknownStatus", "(", ")" ]
Checks a frame and raises the relevant exception if required.
[ "Checks", "a", "frame", "and", "raises", "the", "relevant", "exception", "if", "required", "." ]
8b47675ad44d8a57defea459682d129379af348d
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L25-L39
train
flyte/xbee-helper
xbee_helper/device.py
hex_to_int
def hex_to_int(value): """ Convert hex string like "\x0A\xE3" to 2787. """ if version_info.major >= 3: return int.from_bytes(value, "big") return int(value.encode("hex"), 16)
python
def hex_to_int(value): """ Convert hex string like "\x0A\xE3" to 2787. """ if version_info.major >= 3: return int.from_bytes(value, "big") return int(value.encode("hex"), 16)
[ "def", "hex_to_int", "(", "value", ")", ":", "if", "version_info", ".", "major", ">=", "3", ":", "return", "int", ".", "from_bytes", "(", "value", ",", "\"big\"", ")", "return", "int", "(", "value", ".", "encode", "(", "\"hex\"", ")", ",", "16", ")" ]
Convert hex string like "\x0A\xE3" to 2787.
[ "Convert", "hex", "string", "like", "\\", "x0A", "\\", "xE3", "to", "2787", "." ]
8b47675ad44d8a57defea459682d129379af348d
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L42-L48
train
flyte/xbee-helper
xbee_helper/device.py
adc_to_percentage
def adc_to_percentage(value, max_volts, clamp=True): """ Convert the ADC raw value to a percentage. """ percentage = (100.0 / const.ADC_MAX_VAL) * value return max(min(100, percentage), 0) if clamp else percentage
python
def adc_to_percentage(value, max_volts, clamp=True): """ Convert the ADC raw value to a percentage. """ percentage = (100.0 / const.ADC_MAX_VAL) * value return max(min(100, percentage), 0) if clamp else percentage
[ "def", "adc_to_percentage", "(", "value", ",", "max_volts", ",", "clamp", "=", "True", ")", ":", "percentage", "=", "(", "100.0", "/", "const", ".", "ADC_MAX_VAL", ")", "*", "value", "return", "max", "(", "min", "(", "100", ",", "percentage", ")", ",", "0", ")", "if", "clamp", "else", "percentage" ]
Convert the ADC raw value to a percentage.
[ "Convert", "the", "ADC", "raw", "value", "to", "a", "percentage", "." ]
8b47675ad44d8a57defea459682d129379af348d
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L51-L56
train
flyte/xbee-helper
xbee_helper/device.py
convert_adc
def convert_adc(value, output_type, max_volts): """ Converts the output from the ADC into the desired type. """ return { const.ADC_RAW: lambda x: x, const.ADC_PERCENTAGE: adc_to_percentage, const.ADC_VOLTS: adc_to_volts, const.ADC_MILLIVOLTS: adc_to_millivolts }[output_type](value, max_volts)
python
def convert_adc(value, output_type, max_volts): """ Converts the output from the ADC into the desired type. """ return { const.ADC_RAW: lambda x: x, const.ADC_PERCENTAGE: adc_to_percentage, const.ADC_VOLTS: adc_to_volts, const.ADC_MILLIVOLTS: adc_to_millivolts }[output_type](value, max_volts)
[ "def", "convert_adc", "(", "value", ",", "output_type", ",", "max_volts", ")", ":", "return", "{", "const", ".", "ADC_RAW", ":", "lambda", "x", ":", "x", ",", "const", ".", "ADC_PERCENTAGE", ":", "adc_to_percentage", ",", "const", ".", "ADC_VOLTS", ":", "adc_to_volts", ",", "const", ".", "ADC_MILLIVOLTS", ":", "adc_to_millivolts", "}", "[", "output_type", "]", "(", "value", ",", "max_volts", ")" ]
Converts the output from the ADC into the desired type.
[ "Converts", "the", "output", "from", "the", "ADC", "into", "the", "desired", "type", "." ]
8b47675ad44d8a57defea459682d129379af348d
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L73-L82
train
flyte/xbee-helper
xbee_helper/device.py
ZigBee._frame_received
def _frame_received(self, frame): """ Put the frame into the _rx_frames dict with a key of the frame_id. """ try: self._rx_frames[frame["frame_id"]] = frame except KeyError: # Has no frame_id, ignore? pass _LOGGER.debug("Frame received: %s", frame) # Give the frame to any interested functions for handler in self._rx_handlers: handler(frame)
python
def _frame_received(self, frame): """ Put the frame into the _rx_frames dict with a key of the frame_id. """ try: self._rx_frames[frame["frame_id"]] = frame except KeyError: # Has no frame_id, ignore? pass _LOGGER.debug("Frame received: %s", frame) # Give the frame to any interested functions for handler in self._rx_handlers: handler(frame)
[ "def", "_frame_received", "(", "self", ",", "frame", ")", ":", "try", ":", "self", ".", "_rx_frames", "[", "frame", "[", "\"frame_id\"", "]", "]", "=", "frame", "except", "KeyError", ":", "# Has no frame_id, ignore?", "pass", "_LOGGER", ".", "debug", "(", "\"Frame received: %s\"", ",", "frame", ")", "# Give the frame to any interested functions", "for", "handler", "in", "self", ".", "_rx_handlers", ":", "handler", "(", "frame", ")" ]
Put the frame into the _rx_frames dict with a key of the frame_id.
[ "Put", "the", "frame", "into", "the", "_rx_frames", "dict", "with", "a", "key", "of", "the", "frame_id", "." ]
8b47675ad44d8a57defea459682d129379af348d
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L122-L134
train
flyte/xbee-helper
xbee_helper/device.py
ZigBee._send
def _send(self, **kwargs): """ Send a frame to either the local ZigBee or a remote device. """ if kwargs.get("dest_addr_long") is not None: self.zb.remote_at(**kwargs) else: self.zb.at(**kwargs)
python
def _send(self, **kwargs): """ Send a frame to either the local ZigBee or a remote device. """ if kwargs.get("dest_addr_long") is not None: self.zb.remote_at(**kwargs) else: self.zb.at(**kwargs)
[ "def", "_send", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ".", "get", "(", "\"dest_addr_long\"", ")", "is", "not", "None", ":", "self", ".", "zb", ".", "remote_at", "(", "*", "*", "kwargs", ")", "else", ":", "self", ".", "zb", ".", "at", "(", "*", "*", "kwargs", ")" ]
Send a frame to either the local ZigBee or a remote device.
[ "Send", "a", "frame", "to", "either", "the", "local", "ZigBee", "or", "a", "remote", "device", "." ]
8b47675ad44d8a57defea459682d129379af348d
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L136-L143
train
flyte/xbee-helper
xbee_helper/device.py
ZigBee._send_and_wait
def _send_and_wait(self, **kwargs): """ Send a frame to either the local ZigBee or a remote device and wait for a pre-defined amount of time for its response. """ frame_id = self.next_frame_id kwargs.update(dict(frame_id=frame_id)) self._send(**kwargs) timeout = datetime.now() + const.RX_TIMEOUT while datetime.now() < timeout: try: frame = self._rx_frames.pop(frame_id) raise_if_error(frame) return frame except KeyError: sleep(0.1) continue _LOGGER.exception( "Did not receive response within configured timeout period.") raise exceptions.ZigBeeResponseTimeout()
python
def _send_and_wait(self, **kwargs): """ Send a frame to either the local ZigBee or a remote device and wait for a pre-defined amount of time for its response. """ frame_id = self.next_frame_id kwargs.update(dict(frame_id=frame_id)) self._send(**kwargs) timeout = datetime.now() + const.RX_TIMEOUT while datetime.now() < timeout: try: frame = self._rx_frames.pop(frame_id) raise_if_error(frame) return frame except KeyError: sleep(0.1) continue _LOGGER.exception( "Did not receive response within configured timeout period.") raise exceptions.ZigBeeResponseTimeout()
[ "def", "_send_and_wait", "(", "self", ",", "*", "*", "kwargs", ")", ":", "frame_id", "=", "self", ".", "next_frame_id", "kwargs", ".", "update", "(", "dict", "(", "frame_id", "=", "frame_id", ")", ")", "self", ".", "_send", "(", "*", "*", "kwargs", ")", "timeout", "=", "datetime", ".", "now", "(", ")", "+", "const", ".", "RX_TIMEOUT", "while", "datetime", ".", "now", "(", ")", "<", "timeout", ":", "try", ":", "frame", "=", "self", ".", "_rx_frames", ".", "pop", "(", "frame_id", ")", "raise_if_error", "(", "frame", ")", "return", "frame", "except", "KeyError", ":", "sleep", "(", "0.1", ")", "continue", "_LOGGER", ".", "exception", "(", "\"Did not receive response within configured timeout period.\"", ")", "raise", "exceptions", ".", "ZigBeeResponseTimeout", "(", ")" ]
Send a frame to either the local ZigBee or a remote device and wait for a pre-defined amount of time for its response.
[ "Send", "a", "frame", "to", "either", "the", "local", "ZigBee", "or", "a", "remote", "device", "and", "wait", "for", "a", "pre", "-", "defined", "amount", "of", "time", "for", "its", "response", "." ]
8b47675ad44d8a57defea459682d129379af348d
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L145-L164
train
flyte/xbee-helper
xbee_helper/device.py
ZigBee._get_parameter
def _get_parameter(self, parameter, dest_addr_long=None): """ Fetches and returns the value of the specified parameter. """ frame = self._send_and_wait( command=parameter, dest_addr_long=dest_addr_long) return frame["parameter"]
python
def _get_parameter(self, parameter, dest_addr_long=None): """ Fetches and returns the value of the specified parameter. """ frame = self._send_and_wait( command=parameter, dest_addr_long=dest_addr_long) return frame["parameter"]
[ "def", "_get_parameter", "(", "self", ",", "parameter", ",", "dest_addr_long", "=", "None", ")", ":", "frame", "=", "self", ".", "_send_and_wait", "(", "command", "=", "parameter", ",", "dest_addr_long", "=", "dest_addr_long", ")", "return", "frame", "[", "\"parameter\"", "]" ]
Fetches and returns the value of the specified parameter.
[ "Fetches", "and", "returns", "the", "value", "of", "the", "specified", "parameter", "." ]
8b47675ad44d8a57defea459682d129379af348d
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L166-L172
train
flyte/xbee-helper
xbee_helper/device.py
ZigBee.get_sample
def get_sample(self, dest_addr_long=None): """ Initiate a sample and return its data. """ frame = self._send_and_wait( command=b"IS", dest_addr_long=dest_addr_long) if "parameter" in frame: # @TODO: Is there always one value? Is it always a list? return frame["parameter"][0] return {}
python
def get_sample(self, dest_addr_long=None): """ Initiate a sample and return its data. """ frame = self._send_and_wait( command=b"IS", dest_addr_long=dest_addr_long) if "parameter" in frame: # @TODO: Is there always one value? Is it always a list? return frame["parameter"][0] return {}
[ "def", "get_sample", "(", "self", ",", "dest_addr_long", "=", "None", ")", ":", "frame", "=", "self", ".", "_send_and_wait", "(", "command", "=", "b\"IS\"", ",", "dest_addr_long", "=", "dest_addr_long", ")", "if", "\"parameter\"", "in", "frame", ":", "# @TODO: Is there always one value? Is it always a list?", "return", "frame", "[", "\"parameter\"", "]", "[", "0", "]", "return", "{", "}" ]
Initiate a sample and return its data.
[ "Initiate", "a", "sample", "and", "return", "its", "data", "." ]
8b47675ad44d8a57defea459682d129379af348d
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L188-L197
train
flyte/xbee-helper
xbee_helper/device.py
ZigBee.read_digital_pin
def read_digital_pin(self, pin_number, dest_addr_long=None): """ Fetches a sample and returns the boolean value of the requested digital pin. """ sample = self.get_sample(dest_addr_long=dest_addr_long) try: return sample[const.DIGITAL_PINS[pin_number]] except KeyError: raise exceptions.ZigBeePinNotConfigured( "Pin %s (%s) is not configured as a digital input or output." % (pin_number, const.IO_PIN_COMMANDS[pin_number]))
python
def read_digital_pin(self, pin_number, dest_addr_long=None): """ Fetches a sample and returns the boolean value of the requested digital pin. """ sample = self.get_sample(dest_addr_long=dest_addr_long) try: return sample[const.DIGITAL_PINS[pin_number]] except KeyError: raise exceptions.ZigBeePinNotConfigured( "Pin %s (%s) is not configured as a digital input or output." % (pin_number, const.IO_PIN_COMMANDS[pin_number]))
[ "def", "read_digital_pin", "(", "self", ",", "pin_number", ",", "dest_addr_long", "=", "None", ")", ":", "sample", "=", "self", ".", "get_sample", "(", "dest_addr_long", "=", "dest_addr_long", ")", "try", ":", "return", "sample", "[", "const", ".", "DIGITAL_PINS", "[", "pin_number", "]", "]", "except", "KeyError", ":", "raise", "exceptions", ".", "ZigBeePinNotConfigured", "(", "\"Pin %s (%s) is not configured as a digital input or output.\"", "%", "(", "pin_number", ",", "const", ".", "IO_PIN_COMMANDS", "[", "pin_number", "]", ")", ")" ]
Fetches a sample and returns the boolean value of the requested digital pin.
[ "Fetches", "a", "sample", "and", "returns", "the", "boolean", "value", "of", "the", "requested", "digital", "pin", "." ]
8b47675ad44d8a57defea459682d129379af348d
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L199-L210
train
flyte/xbee-helper
xbee_helper/device.py
ZigBee.set_gpio_pin
def set_gpio_pin(self, pin_number, setting, dest_addr_long=None): """ Set a gpio pin setting. """ assert setting in const.GPIO_SETTINGS.values() self._send_and_wait( command=const.IO_PIN_COMMANDS[pin_number], parameter=setting.value, dest_addr_long=dest_addr_long)
python
def set_gpio_pin(self, pin_number, setting, dest_addr_long=None): """ Set a gpio pin setting. """ assert setting in const.GPIO_SETTINGS.values() self._send_and_wait( command=const.IO_PIN_COMMANDS[pin_number], parameter=setting.value, dest_addr_long=dest_addr_long)
[ "def", "set_gpio_pin", "(", "self", ",", "pin_number", ",", "setting", ",", "dest_addr_long", "=", "None", ")", ":", "assert", "setting", "in", "const", ".", "GPIO_SETTINGS", ".", "values", "(", ")", "self", ".", "_send_and_wait", "(", "command", "=", "const", ".", "IO_PIN_COMMANDS", "[", "pin_number", "]", ",", "parameter", "=", "setting", ".", "value", ",", "dest_addr_long", "=", "dest_addr_long", ")" ]
Set a gpio pin setting.
[ "Set", "a", "gpio", "pin", "setting", "." ]
8b47675ad44d8a57defea459682d129379af348d
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L237-L245
train
flyte/xbee-helper
xbee_helper/device.py
ZigBee.get_gpio_pin
def get_gpio_pin(self, pin_number, dest_addr_long=None): """ Get a gpio pin setting. """ frame = self._send_and_wait( command=const.IO_PIN_COMMANDS[pin_number], dest_addr_long=dest_addr_long ) value = frame["parameter"] return const.GPIO_SETTINGS[value]
python
def get_gpio_pin(self, pin_number, dest_addr_long=None): """ Get a gpio pin setting. """ frame = self._send_and_wait( command=const.IO_PIN_COMMANDS[pin_number], dest_addr_long=dest_addr_long ) value = frame["parameter"] return const.GPIO_SETTINGS[value]
[ "def", "get_gpio_pin", "(", "self", ",", "pin_number", ",", "dest_addr_long", "=", "None", ")", ":", "frame", "=", "self", ".", "_send_and_wait", "(", "command", "=", "const", ".", "IO_PIN_COMMANDS", "[", "pin_number", "]", ",", "dest_addr_long", "=", "dest_addr_long", ")", "value", "=", "frame", "[", "\"parameter\"", "]", "return", "const", ".", "GPIO_SETTINGS", "[", "value", "]" ]
Get a gpio pin setting.
[ "Get", "a", "gpio", "pin", "setting", "." ]
8b47675ad44d8a57defea459682d129379af348d
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L247-L256
train
flyte/xbee-helper
xbee_helper/device.py
ZigBee.get_supply_voltage
def get_supply_voltage(self, dest_addr_long=None): """ Fetches the value of %V and returns it as volts. """ value = self._get_parameter(b"%V", dest_addr_long=dest_addr_long) return (hex_to_int(value) * (1200/1024.0)) / 1000
python
def get_supply_voltage(self, dest_addr_long=None): """ Fetches the value of %V and returns it as volts. """ value = self._get_parameter(b"%V", dest_addr_long=dest_addr_long) return (hex_to_int(value) * (1200/1024.0)) / 1000
[ "def", "get_supply_voltage", "(", "self", ",", "dest_addr_long", "=", "None", ")", ":", "value", "=", "self", ".", "_get_parameter", "(", "b\"%V\"", ",", "dest_addr_long", "=", "dest_addr_long", ")", "return", "(", "hex_to_int", "(", "value", ")", "*", "(", "1200", "/", "1024.0", ")", ")", "/", "1000" ]
Fetches the value of %V and returns it as volts.
[ "Fetches", "the", "value", "of", "%V", "and", "returns", "it", "as", "volts", "." ]
8b47675ad44d8a57defea459682d129379af348d
https://github.com/flyte/xbee-helper/blob/8b47675ad44d8a57defea459682d129379af348d/xbee_helper/device.py#L258-L263
train
Erotemic/utool
utool/util_set.py
OrderedSet.add
def add(self, key): """ Store new key in a new link at the end of the linked list """ if key not in self._map: self._map[key] = link = _Link() root = self._root last = root.prev link.prev, link.next, link.key = last, root, key last.next = root.prev = weakref.proxy(link)
python
def add(self, key): """ Store new key in a new link at the end of the linked list """ if key not in self._map: self._map[key] = link = _Link() root = self._root last = root.prev link.prev, link.next, link.key = last, root, key last.next = root.prev = weakref.proxy(link)
[ "def", "add", "(", "self", ",", "key", ")", ":", "if", "key", "not", "in", "self", ".", "_map", ":", "self", ".", "_map", "[", "key", "]", "=", "link", "=", "_Link", "(", ")", "root", "=", "self", ".", "_root", "last", "=", "root", ".", "prev", "link", ".", "prev", ",", "link", ".", "next", ",", "link", ".", "key", "=", "last", ",", "root", ",", "key", "last", ".", "next", "=", "root", ".", "prev", "=", "weakref", ".", "proxy", "(", "link", ")" ]
Store new key in a new link at the end of the linked list
[ "Store", "new", "key", "in", "a", "new", "link", "at", "the", "end", "of", "the", "linked", "list" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_set.py#L43-L50
train
Erotemic/utool
utool/util_set.py
OrderedSet.index
def index(self, item): """ Find the index of `item` in the OrderedSet Example: >>> # ENABLE_DOCTEST >>> import utool as ut >>> self = ut.oset([1, 2, 3]) >>> assert self.index(1) == 0 >>> assert self.index(2) == 1 >>> assert self.index(3) == 2 >>> ut.assert_raises(ValueError, self.index, 4) """ for count, other in enumerate(self): if item == other: return count raise ValueError('%r is not in OrderedSet' % (item,))
python
def index(self, item): """ Find the index of `item` in the OrderedSet Example: >>> # ENABLE_DOCTEST >>> import utool as ut >>> self = ut.oset([1, 2, 3]) >>> assert self.index(1) == 0 >>> assert self.index(2) == 1 >>> assert self.index(3) == 2 >>> ut.assert_raises(ValueError, self.index, 4) """ for count, other in enumerate(self): if item == other: return count raise ValueError('%r is not in OrderedSet' % (item,))
[ "def", "index", "(", "self", ",", "item", ")", ":", "for", "count", ",", "other", "in", "enumerate", "(", "self", ")", ":", "if", "item", "==", "other", ":", "return", "count", "raise", "ValueError", "(", "'%r is not in OrderedSet'", "%", "(", "item", ",", ")", ")" ]
Find the index of `item` in the OrderedSet Example: >>> # ENABLE_DOCTEST >>> import utool as ut >>> self = ut.oset([1, 2, 3]) >>> assert self.index(1) == 0 >>> assert self.index(2) == 1 >>> assert self.index(3) == 2 >>> ut.assert_raises(ValueError, self.index, 4)
[ "Find", "the", "index", "of", "item", "in", "the", "OrderedSet" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_set.py#L138-L154
train
chriso/gauged
gauged/gauged.py
Gauged.value
def value(self, key, timestamp=None, namespace=None): """Get the value of a gauge at the specified time""" return self.make_context(key=key, end=timestamp, namespace=namespace).value()
python
def value(self, key, timestamp=None, namespace=None): """Get the value of a gauge at the specified time""" return self.make_context(key=key, end=timestamp, namespace=namespace).value()
[ "def", "value", "(", "self", ",", "key", ",", "timestamp", "=", "None", ",", "namespace", "=", "None", ")", ":", "return", "self", ".", "make_context", "(", "key", "=", "key", ",", "end", "=", "timestamp", ",", "namespace", "=", "namespace", ")", ".", "value", "(", ")" ]
Get the value of a gauge at the specified time
[ "Get", "the", "value", "of", "a", "gauge", "at", "the", "specified", "time" ]
cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/gauged.py#L68-L71
train
chriso/gauged
gauged/gauged.py
Gauged.aggregate
def aggregate(self, key, aggregate, start=None, end=None, namespace=None, percentile=None): """Get an aggregate of all gauge data stored in the specified date range""" return self.make_context(key=key, aggregate=aggregate, start=start, end=end, namespace=namespace, percentile=percentile).aggregate()
python
def aggregate(self, key, aggregate, start=None, end=None, namespace=None, percentile=None): """Get an aggregate of all gauge data stored in the specified date range""" return self.make_context(key=key, aggregate=aggregate, start=start, end=end, namespace=namespace, percentile=percentile).aggregate()
[ "def", "aggregate", "(", "self", ",", "key", ",", "aggregate", ",", "start", "=", "None", ",", "end", "=", "None", ",", "namespace", "=", "None", ",", "percentile", "=", "None", ")", ":", "return", "self", ".", "make_context", "(", "key", "=", "key", ",", "aggregate", "=", "aggregate", ",", "start", "=", "start", ",", "end", "=", "end", ",", "namespace", "=", "namespace", ",", "percentile", "=", "percentile", ")", ".", "aggregate", "(", ")" ]
Get an aggregate of all gauge data stored in the specified date range
[ "Get", "an", "aggregate", "of", "all", "gauge", "data", "stored", "in", "the", "specified", "date", "range" ]
cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/gauged.py#L73-L79
train
chriso/gauged
gauged/gauged.py
Gauged.value_series
def value_series(self, key, start=None, end=None, interval=None, namespace=None, cache=None): """Get a time series of gauge values""" return self.make_context(key=key, start=start, end=end, interval=interval, namespace=namespace, cache=cache).value_series()
python
def value_series(self, key, start=None, end=None, interval=None, namespace=None, cache=None): """Get a time series of gauge values""" return self.make_context(key=key, start=start, end=end, interval=interval, namespace=namespace, cache=cache).value_series()
[ "def", "value_series", "(", "self", ",", "key", ",", "start", "=", "None", ",", "end", "=", "None", ",", "interval", "=", "None", ",", "namespace", "=", "None", ",", "cache", "=", "None", ")", ":", "return", "self", ".", "make_context", "(", "key", "=", "key", ",", "start", "=", "start", ",", "end", "=", "end", ",", "interval", "=", "interval", ",", "namespace", "=", "namespace", ",", "cache", "=", "cache", ")", ".", "value_series", "(", ")" ]
Get a time series of gauge values
[ "Get", "a", "time", "series", "of", "gauge", "values" ]
cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/gauged.py#L81-L86
train
chriso/gauged
gauged/gauged.py
Gauged.aggregate_series
def aggregate_series(self, key, aggregate, start=None, end=None, interval=None, namespace=None, cache=None, percentile=None): """Get a time series of gauge aggregates""" return self.make_context(key=key, aggregate=aggregate, start=start, end=end, interval=interval, namespace=namespace, cache=cache, percentile=percentile).aggregate_series()
python
def aggregate_series(self, key, aggregate, start=None, end=None, interval=None, namespace=None, cache=None, percentile=None): """Get a time series of gauge aggregates""" return self.make_context(key=key, aggregate=aggregate, start=start, end=end, interval=interval, namespace=namespace, cache=cache, percentile=percentile).aggregate_series()
[ "def", "aggregate_series", "(", "self", ",", "key", ",", "aggregate", ",", "start", "=", "None", ",", "end", "=", "None", ",", "interval", "=", "None", ",", "namespace", "=", "None", ",", "cache", "=", "None", ",", "percentile", "=", "None", ")", ":", "return", "self", ".", "make_context", "(", "key", "=", "key", ",", "aggregate", "=", "aggregate", ",", "start", "=", "start", ",", "end", "=", "end", ",", "interval", "=", "interval", ",", "namespace", "=", "namespace", ",", "cache", "=", "cache", ",", "percentile", "=", "percentile", ")", ".", "aggregate_series", "(", ")" ]
Get a time series of gauge aggregates
[ "Get", "a", "time", "series", "of", "gauge", "aggregates" ]
cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/gauged.py#L88-L95
train
chriso/gauged
gauged/gauged.py
Gauged.keys
def keys(self, prefix=None, limit=None, offset=None, namespace=None): """Get gauge keys""" return self.make_context(prefix=prefix, limit=limit, offset=offset, namespace=namespace).keys()
python
def keys(self, prefix=None, limit=None, offset=None, namespace=None): """Get gauge keys""" return self.make_context(prefix=prefix, limit=limit, offset=offset, namespace=namespace).keys()
[ "def", "keys", "(", "self", ",", "prefix", "=", "None", ",", "limit", "=", "None", ",", "offset", "=", "None", ",", "namespace", "=", "None", ")", ":", "return", "self", ".", "make_context", "(", "prefix", "=", "prefix", ",", "limit", "=", "limit", ",", "offset", "=", "offset", ",", "namespace", "=", "namespace", ")", ".", "keys", "(", ")" ]
Get gauge keys
[ "Get", "gauge", "keys" ]
cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/gauged.py#L97-L100
train
chriso/gauged
gauged/gauged.py
Gauged.statistics
def statistics(self, start=None, end=None, namespace=None): """Get write statistics for the specified namespace and date range""" return self.make_context(start=start, end=end, namespace=namespace).statistics()
python
def statistics(self, start=None, end=None, namespace=None): """Get write statistics for the specified namespace and date range""" return self.make_context(start=start, end=end, namespace=namespace).statistics()
[ "def", "statistics", "(", "self", ",", "start", "=", "None", ",", "end", "=", "None", ",", "namespace", "=", "None", ")", ":", "return", "self", ".", "make_context", "(", "start", "=", "start", ",", "end", "=", "end", ",", "namespace", "=", "namespace", ")", ".", "statistics", "(", ")" ]
Get write statistics for the specified namespace and date range
[ "Get", "write", "statistics", "for", "the", "specified", "namespace", "and", "date", "range" ]
cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/gauged.py#L106-L109
train
chriso/gauged
gauged/gauged.py
Gauged.sync
def sync(self): """Create the necessary schema""" self.driver.create_schema() self.driver.set_metadata({ 'current_version': Gauged.VERSION, 'initial_version': Gauged.VERSION, 'block_size': self.config.block_size, 'resolution': self.config.resolution, 'created_at': long(time() * 1000) }, replace=False)
python
def sync(self): """Create the necessary schema""" self.driver.create_schema() self.driver.set_metadata({ 'current_version': Gauged.VERSION, 'initial_version': Gauged.VERSION, 'block_size': self.config.block_size, 'resolution': self.config.resolution, 'created_at': long(time() * 1000) }, replace=False)
[ "def", "sync", "(", "self", ")", ":", "self", ".", "driver", ".", "create_schema", "(", ")", "self", ".", "driver", ".", "set_metadata", "(", "{", "'current_version'", ":", "Gauged", ".", "VERSION", ",", "'initial_version'", ":", "Gauged", ".", "VERSION", ",", "'block_size'", ":", "self", ".", "config", ".", "block_size", ",", "'resolution'", ":", "self", ".", "config", ".", "resolution", ",", "'created_at'", ":", "long", "(", "time", "(", ")", "*", "1000", ")", "}", ",", "replace", "=", "False", ")" ]
Create the necessary schema
[ "Create", "the", "necessary", "schema" ]
cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/gauged.py#L111-L120
train
chriso/gauged
gauged/gauged.py
Gauged.make_context
def make_context(self, **kwargs): """Create a new context for reading data""" self.check_schema() return Context(self.driver, self.config, **kwargs)
python
def make_context(self, **kwargs): """Create a new context for reading data""" self.check_schema() return Context(self.driver, self.config, **kwargs)
[ "def", "make_context", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "check_schema", "(", ")", "return", "Context", "(", "self", ".", "driver", ",", "self", ".", "config", ",", "*", "*", "kwargs", ")" ]
Create a new context for reading data
[ "Create", "a", "new", "context", "for", "reading", "data" ]
cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/gauged.py#L130-L133
train
chriso/gauged
gauged/gauged.py
Gauged.check_schema
def check_schema(self): """Check the schema exists and matches configuration""" if self.valid_schema: return config = self.config metadata = self.metadata() if 'current_version' not in metadata: raise GaugedSchemaError('Gauged schema not found, ' 'try a gauged.sync()') if metadata['current_version'] != Gauged.VERSION: msg = 'The schema is version %s while this Gauged is version %s. ' msg += 'Try upgrading Gauged and/or running gauged_migrate.py' msg = msg % (metadata['current_version'], Gauged.VERSION) raise GaugedVersionMismatchError(msg) expected_block_size = '%s/%s' % (config.block_size, config.resolution) block_size = '%s/%s' % (metadata['block_size'], metadata['resolution']) if block_size != expected_block_size: msg = 'Expected %s and got %s' % (expected_block_size, block_size) warn(msg, GaugedBlockSizeMismatch) self.valid_schema = True
python
def check_schema(self): """Check the schema exists and matches configuration""" if self.valid_schema: return config = self.config metadata = self.metadata() if 'current_version' not in metadata: raise GaugedSchemaError('Gauged schema not found, ' 'try a gauged.sync()') if metadata['current_version'] != Gauged.VERSION: msg = 'The schema is version %s while this Gauged is version %s. ' msg += 'Try upgrading Gauged and/or running gauged_migrate.py' msg = msg % (metadata['current_version'], Gauged.VERSION) raise GaugedVersionMismatchError(msg) expected_block_size = '%s/%s' % (config.block_size, config.resolution) block_size = '%s/%s' % (metadata['block_size'], metadata['resolution']) if block_size != expected_block_size: msg = 'Expected %s and got %s' % (expected_block_size, block_size) warn(msg, GaugedBlockSizeMismatch) self.valid_schema = True
[ "def", "check_schema", "(", "self", ")", ":", "if", "self", ".", "valid_schema", ":", "return", "config", "=", "self", ".", "config", "metadata", "=", "self", ".", "metadata", "(", ")", "if", "'current_version'", "not", "in", "metadata", ":", "raise", "GaugedSchemaError", "(", "'Gauged schema not found, '", "'try a gauged.sync()'", ")", "if", "metadata", "[", "'current_version'", "]", "!=", "Gauged", ".", "VERSION", ":", "msg", "=", "'The schema is version %s while this Gauged is version %s. '", "msg", "+=", "'Try upgrading Gauged and/or running gauged_migrate.py'", "msg", "=", "msg", "%", "(", "metadata", "[", "'current_version'", "]", ",", "Gauged", ".", "VERSION", ")", "raise", "GaugedVersionMismatchError", "(", "msg", ")", "expected_block_size", "=", "'%s/%s'", "%", "(", "config", ".", "block_size", ",", "config", ".", "resolution", ")", "block_size", "=", "'%s/%s'", "%", "(", "metadata", "[", "'block_size'", "]", ",", "metadata", "[", "'resolution'", "]", ")", "if", "block_size", "!=", "expected_block_size", ":", "msg", "=", "'Expected %s and got %s'", "%", "(", "expected_block_size", ",", "block_size", ")", "warn", "(", "msg", ",", "GaugedBlockSizeMismatch", ")", "self", ".", "valid_schema", "=", "True" ]
Check the schema exists and matches configuration
[ "Check", "the", "schema", "exists", "and", "matches", "configuration" ]
cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/gauged.py#L135-L154
train
Erotemic/utool
utool/util_graph.py
nx_dag_node_rank
def nx_dag_node_rank(graph, nodes=None): """ Returns rank of nodes that define the "level" each node is on in a topological sort. This is the same as the Graphviz dot rank. Ignore: simple_graph = ut.simplify_graph(exi_graph) adj_dict = ut.nx_to_adj_dict(simple_graph) import plottool as pt pt.qt4ensure() pt.show_nx(graph) Example: >>> # ENABLE_DOCTEST >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> adj_dict = {0: [5], 1: [5], 2: [1], 3: [4], 4: [0], 5: [], 6: [4], 7: [9], 8: [6], 9: [1]} >>> nodes = [2, 1, 5] >>> f_graph = ut.nx_from_adj_dict(adj_dict, nx.DiGraph) >>> graph = f_graph.reverse() >>> #ranks = ut.nx_dag_node_rank(graph, nodes) >>> ranks = ut.nx_dag_node_rank(graph, nodes) >>> result = ('ranks = %r' % (ranks,)) >>> print(result) ranks = [3, 2, 1] """ import utool as ut source = list(ut.nx_source_nodes(graph))[0] longest_paths = dict([(target, dag_longest_path(graph, source, target)) for target in graph.nodes()]) node_to_rank = ut.map_dict_vals(len, longest_paths) if nodes is None: return node_to_rank else: ranks = ut.dict_take(node_to_rank, nodes) return ranks
python
def nx_dag_node_rank(graph, nodes=None): """ Returns rank of nodes that define the "level" each node is on in a topological sort. This is the same as the Graphviz dot rank. Ignore: simple_graph = ut.simplify_graph(exi_graph) adj_dict = ut.nx_to_adj_dict(simple_graph) import plottool as pt pt.qt4ensure() pt.show_nx(graph) Example: >>> # ENABLE_DOCTEST >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> adj_dict = {0: [5], 1: [5], 2: [1], 3: [4], 4: [0], 5: [], 6: [4], 7: [9], 8: [6], 9: [1]} >>> nodes = [2, 1, 5] >>> f_graph = ut.nx_from_adj_dict(adj_dict, nx.DiGraph) >>> graph = f_graph.reverse() >>> #ranks = ut.nx_dag_node_rank(graph, nodes) >>> ranks = ut.nx_dag_node_rank(graph, nodes) >>> result = ('ranks = %r' % (ranks,)) >>> print(result) ranks = [3, 2, 1] """ import utool as ut source = list(ut.nx_source_nodes(graph))[0] longest_paths = dict([(target, dag_longest_path(graph, source, target)) for target in graph.nodes()]) node_to_rank = ut.map_dict_vals(len, longest_paths) if nodes is None: return node_to_rank else: ranks = ut.dict_take(node_to_rank, nodes) return ranks
[ "def", "nx_dag_node_rank", "(", "graph", ",", "nodes", "=", "None", ")", ":", "import", "utool", "as", "ut", "source", "=", "list", "(", "ut", ".", "nx_source_nodes", "(", "graph", ")", ")", "[", "0", "]", "longest_paths", "=", "dict", "(", "[", "(", "target", ",", "dag_longest_path", "(", "graph", ",", "source", ",", "target", ")", ")", "for", "target", "in", "graph", ".", "nodes", "(", ")", "]", ")", "node_to_rank", "=", "ut", ".", "map_dict_vals", "(", "len", ",", "longest_paths", ")", "if", "nodes", "is", "None", ":", "return", "node_to_rank", "else", ":", "ranks", "=", "ut", ".", "dict_take", "(", "node_to_rank", ",", "nodes", ")", "return", "ranks" ]
Returns rank of nodes that define the "level" each node is on in a topological sort. This is the same as the Graphviz dot rank. Ignore: simple_graph = ut.simplify_graph(exi_graph) adj_dict = ut.nx_to_adj_dict(simple_graph) import plottool as pt pt.qt4ensure() pt.show_nx(graph) Example: >>> # ENABLE_DOCTEST >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> adj_dict = {0: [5], 1: [5], 2: [1], 3: [4], 4: [0], 5: [], 6: [4], 7: [9], 8: [6], 9: [1]} >>> nodes = [2, 1, 5] >>> f_graph = ut.nx_from_adj_dict(adj_dict, nx.DiGraph) >>> graph = f_graph.reverse() >>> #ranks = ut.nx_dag_node_rank(graph, nodes) >>> ranks = ut.nx_dag_node_rank(graph, nodes) >>> result = ('ranks = %r' % (ranks,)) >>> print(result) ranks = [3, 2, 1]
[ "Returns", "rank", "of", "nodes", "that", "define", "the", "level", "each", "node", "is", "on", "in", "a", "topological", "sort", ".", "This", "is", "the", "same", "as", "the", "Graphviz", "dot", "rank", "." ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L243-L278
train
Erotemic/utool
utool/util_graph.py
nx_all_nodes_between
def nx_all_nodes_between(graph, source, target, data=False): """ Find all nodes with on paths between source and target. """ import utool as ut if source is None: # assume there is a single source sources = list(ut.nx_source_nodes(graph)) assert len(sources) == 1, ( 'specify source if there is not only one') source = sources[0] if target is None: # assume there is a single source sinks = list(ut.nx_sink_nodes(graph)) assert len(sinks) == 1, ( 'specify sink if there is not only one') target = sinks[0] all_simple_paths = list(nx.all_simple_paths(graph, source, target)) nodes = sorted(set.union(*map(set, all_simple_paths))) return nodes
python
def nx_all_nodes_between(graph, source, target, data=False): """ Find all nodes with on paths between source and target. """ import utool as ut if source is None: # assume there is a single source sources = list(ut.nx_source_nodes(graph)) assert len(sources) == 1, ( 'specify source if there is not only one') source = sources[0] if target is None: # assume there is a single source sinks = list(ut.nx_sink_nodes(graph)) assert len(sinks) == 1, ( 'specify sink if there is not only one') target = sinks[0] all_simple_paths = list(nx.all_simple_paths(graph, source, target)) nodes = sorted(set.union(*map(set, all_simple_paths))) return nodes
[ "def", "nx_all_nodes_between", "(", "graph", ",", "source", ",", "target", ",", "data", "=", "False", ")", ":", "import", "utool", "as", "ut", "if", "source", "is", "None", ":", "# assume there is a single source", "sources", "=", "list", "(", "ut", ".", "nx_source_nodes", "(", "graph", ")", ")", "assert", "len", "(", "sources", ")", "==", "1", ",", "(", "'specify source if there is not only one'", ")", "source", "=", "sources", "[", "0", "]", "if", "target", "is", "None", ":", "# assume there is a single source", "sinks", "=", "list", "(", "ut", ".", "nx_sink_nodes", "(", "graph", ")", ")", "assert", "len", "(", "sinks", ")", "==", "1", ",", "(", "'specify sink if there is not only one'", ")", "target", "=", "sinks", "[", "0", "]", "all_simple_paths", "=", "list", "(", "nx", ".", "all_simple_paths", "(", "graph", ",", "source", ",", "target", ")", ")", "nodes", "=", "sorted", "(", "set", ".", "union", "(", "*", "map", "(", "set", ",", "all_simple_paths", ")", ")", ")", "return", "nodes" ]
Find all nodes with on paths between source and target.
[ "Find", "all", "nodes", "with", "on", "paths", "between", "source", "and", "target", "." ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L281-L300
train
Erotemic/utool
utool/util_graph.py
nx_all_simple_edge_paths
def nx_all_simple_edge_paths(G, source, target, cutoff=None, keys=False, data=False): """ Returns each path from source to target as a list of edges. This function is meant to be used with MultiGraphs or MultiDiGraphs. When ``keys`` is True each edge in the path is returned with its unique key identifier. In this case it is possible to distinguish between different paths along different edges between the same two nodes. Derived from simple_paths.py in networkx """ if cutoff is None: cutoff = len(G) - 1 if cutoff < 1: return import utool as ut import six visited_nodes = [source] visited_edges = [] if G.is_multigraph(): get_neighbs = ut.partial(G.edges, keys=keys, data=data) else: get_neighbs = ut.partial(G.edges, data=data) edge_stack = [iter(get_neighbs(source))] while edge_stack: children_edges = edge_stack[-1] child_edge = six.next(children_edges, None) if child_edge is None: edge_stack.pop() visited_nodes.pop() if len(visited_edges) > 0: visited_edges.pop() elif len(visited_nodes) < cutoff: child_node = child_edge[1] if child_node == target: yield visited_edges + [child_edge] elif child_node not in visited_nodes: visited_nodes.append(child_node) visited_edges.append(child_edge) edge_stack.append(iter(get_neighbs(child_node))) else: for edge in [child_edge] + list(children_edges): if edge[1] == target: yield visited_edges + [edge] edge_stack.pop() visited_nodes.pop() if len(visited_edges) > 0: visited_edges.pop()
python
def nx_all_simple_edge_paths(G, source, target, cutoff=None, keys=False, data=False): """ Returns each path from source to target as a list of edges. This function is meant to be used with MultiGraphs or MultiDiGraphs. When ``keys`` is True each edge in the path is returned with its unique key identifier. In this case it is possible to distinguish between different paths along different edges between the same two nodes. Derived from simple_paths.py in networkx """ if cutoff is None: cutoff = len(G) - 1 if cutoff < 1: return import utool as ut import six visited_nodes = [source] visited_edges = [] if G.is_multigraph(): get_neighbs = ut.partial(G.edges, keys=keys, data=data) else: get_neighbs = ut.partial(G.edges, data=data) edge_stack = [iter(get_neighbs(source))] while edge_stack: children_edges = edge_stack[-1] child_edge = six.next(children_edges, None) if child_edge is None: edge_stack.pop() visited_nodes.pop() if len(visited_edges) > 0: visited_edges.pop() elif len(visited_nodes) < cutoff: child_node = child_edge[1] if child_node == target: yield visited_edges + [child_edge] elif child_node not in visited_nodes: visited_nodes.append(child_node) visited_edges.append(child_edge) edge_stack.append(iter(get_neighbs(child_node))) else: for edge in [child_edge] + list(children_edges): if edge[1] == target: yield visited_edges + [edge] edge_stack.pop() visited_nodes.pop() if len(visited_edges) > 0: visited_edges.pop()
[ "def", "nx_all_simple_edge_paths", "(", "G", ",", "source", ",", "target", ",", "cutoff", "=", "None", ",", "keys", "=", "False", ",", "data", "=", "False", ")", ":", "if", "cutoff", "is", "None", ":", "cutoff", "=", "len", "(", "G", ")", "-", "1", "if", "cutoff", "<", "1", ":", "return", "import", "utool", "as", "ut", "import", "six", "visited_nodes", "=", "[", "source", "]", "visited_edges", "=", "[", "]", "if", "G", ".", "is_multigraph", "(", ")", ":", "get_neighbs", "=", "ut", ".", "partial", "(", "G", ".", "edges", ",", "keys", "=", "keys", ",", "data", "=", "data", ")", "else", ":", "get_neighbs", "=", "ut", ".", "partial", "(", "G", ".", "edges", ",", "data", "=", "data", ")", "edge_stack", "=", "[", "iter", "(", "get_neighbs", "(", "source", ")", ")", "]", "while", "edge_stack", ":", "children_edges", "=", "edge_stack", "[", "-", "1", "]", "child_edge", "=", "six", ".", "next", "(", "children_edges", ",", "None", ")", "if", "child_edge", "is", "None", ":", "edge_stack", ".", "pop", "(", ")", "visited_nodes", ".", "pop", "(", ")", "if", "len", "(", "visited_edges", ")", ">", "0", ":", "visited_edges", ".", "pop", "(", ")", "elif", "len", "(", "visited_nodes", ")", "<", "cutoff", ":", "child_node", "=", "child_edge", "[", "1", "]", "if", "child_node", "==", "target", ":", "yield", "visited_edges", "+", "[", "child_edge", "]", "elif", "child_node", "not", "in", "visited_nodes", ":", "visited_nodes", ".", "append", "(", "child_node", ")", "visited_edges", ".", "append", "(", "child_edge", ")", "edge_stack", ".", "append", "(", "iter", "(", "get_neighbs", "(", "child_node", ")", ")", ")", "else", ":", "for", "edge", "in", "[", "child_edge", "]", "+", "list", "(", "children_edges", ")", ":", "if", "edge", "[", "1", "]", "==", "target", ":", "yield", "visited_edges", "+", "[", "edge", "]", "edge_stack", ".", "pop", "(", ")", "visited_nodes", ".", "pop", "(", ")", "if", "len", "(", "visited_edges", ")", ">", "0", ":", "visited_edges", ".", "pop", "(", ")" ]
Returns each path from source to target as a list of edges. This function is meant to be used with MultiGraphs or MultiDiGraphs. When ``keys`` is True each edge in the path is returned with its unique key identifier. In this case it is possible to distinguish between different paths along different edges between the same two nodes. Derived from simple_paths.py in networkx
[ "Returns", "each", "path", "from", "source", "to", "target", "as", "a", "list", "of", "edges", "." ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L303-L351
train
Erotemic/utool
utool/util_graph.py
nx_delete_node_attr
def nx_delete_node_attr(graph, name, nodes=None): """ Removes node attributes Doctest: >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> G = nx.karate_club_graph() >>> nx.set_node_attributes(G, name='foo', values='bar') >>> datas = nx.get_node_attributes(G, 'club') >>> assert len(nx.get_node_attributes(G, 'club')) == 34 >>> assert len(nx.get_node_attributes(G, 'foo')) == 34 >>> ut.nx_delete_node_attr(G, ['club', 'foo'], nodes=[1, 2]) >>> assert len(nx.get_node_attributes(G, 'club')) == 32 >>> assert len(nx.get_node_attributes(G, 'foo')) == 32 >>> ut.nx_delete_node_attr(G, ['club']) >>> assert len(nx.get_node_attributes(G, 'club')) == 0 >>> assert len(nx.get_node_attributes(G, 'foo')) == 32 """ if nodes is None: nodes = list(graph.nodes()) removed = 0 # names = [name] if not isinstance(name, list) else name node_dict = nx_node_dict(graph) if isinstance(name, list): for node in nodes: for name_ in name: try: del node_dict[node][name_] removed += 1 except KeyError: pass else: for node in nodes: try: del node_dict[node][name] removed += 1 except KeyError: pass return removed
python
def nx_delete_node_attr(graph, name, nodes=None): """ Removes node attributes Doctest: >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> G = nx.karate_club_graph() >>> nx.set_node_attributes(G, name='foo', values='bar') >>> datas = nx.get_node_attributes(G, 'club') >>> assert len(nx.get_node_attributes(G, 'club')) == 34 >>> assert len(nx.get_node_attributes(G, 'foo')) == 34 >>> ut.nx_delete_node_attr(G, ['club', 'foo'], nodes=[1, 2]) >>> assert len(nx.get_node_attributes(G, 'club')) == 32 >>> assert len(nx.get_node_attributes(G, 'foo')) == 32 >>> ut.nx_delete_node_attr(G, ['club']) >>> assert len(nx.get_node_attributes(G, 'club')) == 0 >>> assert len(nx.get_node_attributes(G, 'foo')) == 32 """ if nodes is None: nodes = list(graph.nodes()) removed = 0 # names = [name] if not isinstance(name, list) else name node_dict = nx_node_dict(graph) if isinstance(name, list): for node in nodes: for name_ in name: try: del node_dict[node][name_] removed += 1 except KeyError: pass else: for node in nodes: try: del node_dict[node][name] removed += 1 except KeyError: pass return removed
[ "def", "nx_delete_node_attr", "(", "graph", ",", "name", ",", "nodes", "=", "None", ")", ":", "if", "nodes", "is", "None", ":", "nodes", "=", "list", "(", "graph", ".", "nodes", "(", ")", ")", "removed", "=", "0", "# names = [name] if not isinstance(name, list) else name", "node_dict", "=", "nx_node_dict", "(", "graph", ")", "if", "isinstance", "(", "name", ",", "list", ")", ":", "for", "node", "in", "nodes", ":", "for", "name_", "in", "name", ":", "try", ":", "del", "node_dict", "[", "node", "]", "[", "name_", "]", "removed", "+=", "1", "except", "KeyError", ":", "pass", "else", ":", "for", "node", "in", "nodes", ":", "try", ":", "del", "node_dict", "[", "node", "]", "[", "name", "]", "removed", "+=", "1", "except", "KeyError", ":", "pass", "return", "removed" ]
Removes node attributes Doctest: >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> G = nx.karate_club_graph() >>> nx.set_node_attributes(G, name='foo', values='bar') >>> datas = nx.get_node_attributes(G, 'club') >>> assert len(nx.get_node_attributes(G, 'club')) == 34 >>> assert len(nx.get_node_attributes(G, 'foo')) == 34 >>> ut.nx_delete_node_attr(G, ['club', 'foo'], nodes=[1, 2]) >>> assert len(nx.get_node_attributes(G, 'club')) == 32 >>> assert len(nx.get_node_attributes(G, 'foo')) == 32 >>> ut.nx_delete_node_attr(G, ['club']) >>> assert len(nx.get_node_attributes(G, 'club')) == 0 >>> assert len(nx.get_node_attributes(G, 'foo')) == 32
[ "Removes", "node", "attributes" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L561-L601
train
Erotemic/utool
utool/util_graph.py
nx_delete_edge_attr
def nx_delete_edge_attr(graph, name, edges=None): """ Removes an attributes from specific edges in the graph Doctest: >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> G = nx.karate_club_graph() >>> nx.set_edge_attributes(G, name='spam', values='eggs') >>> nx.set_edge_attributes(G, name='foo', values='bar') >>> assert len(nx.get_edge_attributes(G, 'spam')) == 78 >>> assert len(nx.get_edge_attributes(G, 'foo')) == 78 >>> ut.nx_delete_edge_attr(G, ['spam', 'foo'], edges=[(1, 2)]) >>> assert len(nx.get_edge_attributes(G, 'spam')) == 77 >>> assert len(nx.get_edge_attributes(G, 'foo')) == 77 >>> ut.nx_delete_edge_attr(G, ['spam']) >>> assert len(nx.get_edge_attributes(G, 'spam')) == 0 >>> assert len(nx.get_edge_attributes(G, 'foo')) == 77 Doctest: >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> G = nx.MultiGraph() >>> G.add_edges_from([(1, 2), (2, 3), (3, 4), (4, 5), (4, 5), (1, 2)]) >>> nx.set_edge_attributes(G, name='spam', values='eggs') >>> nx.set_edge_attributes(G, name='foo', values='bar') >>> assert len(nx.get_edge_attributes(G, 'spam')) == 6 >>> assert len(nx.get_edge_attributes(G, 'foo')) == 6 >>> ut.nx_delete_edge_attr(G, ['spam', 'foo'], edges=[(1, 2, 0)]) >>> assert len(nx.get_edge_attributes(G, 'spam')) == 5 >>> assert len(nx.get_edge_attributes(G, 'foo')) == 5 >>> ut.nx_delete_edge_attr(G, ['spam']) >>> assert len(nx.get_edge_attributes(G, 'spam')) == 0 >>> assert len(nx.get_edge_attributes(G, 'foo')) == 5 """ removed = 0 keys = [name] if not isinstance(name, (list, tuple)) else name if edges is None: if graph.is_multigraph(): edges = graph.edges(keys=True) else: edges = graph.edges() if graph.is_multigraph(): for u, v, k in edges: for key_ in keys: try: del graph[u][v][k][key_] removed += 1 except KeyError: pass else: for u, v in edges: for key_ in keys: try: del graph[u][v][key_] removed += 1 except KeyError: pass return removed
python
def nx_delete_edge_attr(graph, name, edges=None): """ Removes an attributes from specific edges in the graph Doctest: >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> G = nx.karate_club_graph() >>> nx.set_edge_attributes(G, name='spam', values='eggs') >>> nx.set_edge_attributes(G, name='foo', values='bar') >>> assert len(nx.get_edge_attributes(G, 'spam')) == 78 >>> assert len(nx.get_edge_attributes(G, 'foo')) == 78 >>> ut.nx_delete_edge_attr(G, ['spam', 'foo'], edges=[(1, 2)]) >>> assert len(nx.get_edge_attributes(G, 'spam')) == 77 >>> assert len(nx.get_edge_attributes(G, 'foo')) == 77 >>> ut.nx_delete_edge_attr(G, ['spam']) >>> assert len(nx.get_edge_attributes(G, 'spam')) == 0 >>> assert len(nx.get_edge_attributes(G, 'foo')) == 77 Doctest: >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> G = nx.MultiGraph() >>> G.add_edges_from([(1, 2), (2, 3), (3, 4), (4, 5), (4, 5), (1, 2)]) >>> nx.set_edge_attributes(G, name='spam', values='eggs') >>> nx.set_edge_attributes(G, name='foo', values='bar') >>> assert len(nx.get_edge_attributes(G, 'spam')) == 6 >>> assert len(nx.get_edge_attributes(G, 'foo')) == 6 >>> ut.nx_delete_edge_attr(G, ['spam', 'foo'], edges=[(1, 2, 0)]) >>> assert len(nx.get_edge_attributes(G, 'spam')) == 5 >>> assert len(nx.get_edge_attributes(G, 'foo')) == 5 >>> ut.nx_delete_edge_attr(G, ['spam']) >>> assert len(nx.get_edge_attributes(G, 'spam')) == 0 >>> assert len(nx.get_edge_attributes(G, 'foo')) == 5 """ removed = 0 keys = [name] if not isinstance(name, (list, tuple)) else name if edges is None: if graph.is_multigraph(): edges = graph.edges(keys=True) else: edges = graph.edges() if graph.is_multigraph(): for u, v, k in edges: for key_ in keys: try: del graph[u][v][k][key_] removed += 1 except KeyError: pass else: for u, v in edges: for key_ in keys: try: del graph[u][v][key_] removed += 1 except KeyError: pass return removed
[ "def", "nx_delete_edge_attr", "(", "graph", ",", "name", ",", "edges", "=", "None", ")", ":", "removed", "=", "0", "keys", "=", "[", "name", "]", "if", "not", "isinstance", "(", "name", ",", "(", "list", ",", "tuple", ")", ")", "else", "name", "if", "edges", "is", "None", ":", "if", "graph", ".", "is_multigraph", "(", ")", ":", "edges", "=", "graph", ".", "edges", "(", "keys", "=", "True", ")", "else", ":", "edges", "=", "graph", ".", "edges", "(", ")", "if", "graph", ".", "is_multigraph", "(", ")", ":", "for", "u", ",", "v", ",", "k", "in", "edges", ":", "for", "key_", "in", "keys", ":", "try", ":", "del", "graph", "[", "u", "]", "[", "v", "]", "[", "k", "]", "[", "key_", "]", "removed", "+=", "1", "except", "KeyError", ":", "pass", "else", ":", "for", "u", ",", "v", "in", "edges", ":", "for", "key_", "in", "keys", ":", "try", ":", "del", "graph", "[", "u", "]", "[", "v", "]", "[", "key_", "]", "removed", "+=", "1", "except", "KeyError", ":", "pass", "return", "removed" ]
Removes an attributes from specific edges in the graph Doctest: >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> G = nx.karate_club_graph() >>> nx.set_edge_attributes(G, name='spam', values='eggs') >>> nx.set_edge_attributes(G, name='foo', values='bar') >>> assert len(nx.get_edge_attributes(G, 'spam')) == 78 >>> assert len(nx.get_edge_attributes(G, 'foo')) == 78 >>> ut.nx_delete_edge_attr(G, ['spam', 'foo'], edges=[(1, 2)]) >>> assert len(nx.get_edge_attributes(G, 'spam')) == 77 >>> assert len(nx.get_edge_attributes(G, 'foo')) == 77 >>> ut.nx_delete_edge_attr(G, ['spam']) >>> assert len(nx.get_edge_attributes(G, 'spam')) == 0 >>> assert len(nx.get_edge_attributes(G, 'foo')) == 77 Doctest: >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> G = nx.MultiGraph() >>> G.add_edges_from([(1, 2), (2, 3), (3, 4), (4, 5), (4, 5), (1, 2)]) >>> nx.set_edge_attributes(G, name='spam', values='eggs') >>> nx.set_edge_attributes(G, name='foo', values='bar') >>> assert len(nx.get_edge_attributes(G, 'spam')) == 6 >>> assert len(nx.get_edge_attributes(G, 'foo')) == 6 >>> ut.nx_delete_edge_attr(G, ['spam', 'foo'], edges=[(1, 2, 0)]) >>> assert len(nx.get_edge_attributes(G, 'spam')) == 5 >>> assert len(nx.get_edge_attributes(G, 'foo')) == 5 >>> ut.nx_delete_edge_attr(G, ['spam']) >>> assert len(nx.get_edge_attributes(G, 'spam')) == 0 >>> assert len(nx.get_edge_attributes(G, 'foo')) == 5
[ "Removes", "an", "attributes", "from", "specific", "edges", "in", "the", "graph" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L605-L663
train
Erotemic/utool
utool/util_graph.py
nx_gen_node_values
def nx_gen_node_values(G, key, nodes, default=util_const.NoParam): """ Generates attributes values of specific nodes """ node_dict = nx_node_dict(G) if default is util_const.NoParam: return (node_dict[n][key] for n in nodes) else: return (node_dict[n].get(key, default) for n in nodes)
python
def nx_gen_node_values(G, key, nodes, default=util_const.NoParam): """ Generates attributes values of specific nodes """ node_dict = nx_node_dict(G) if default is util_const.NoParam: return (node_dict[n][key] for n in nodes) else: return (node_dict[n].get(key, default) for n in nodes)
[ "def", "nx_gen_node_values", "(", "G", ",", "key", ",", "nodes", ",", "default", "=", "util_const", ".", "NoParam", ")", ":", "node_dict", "=", "nx_node_dict", "(", "G", ")", "if", "default", "is", "util_const", ".", "NoParam", ":", "return", "(", "node_dict", "[", "n", "]", "[", "key", "]", "for", "n", "in", "nodes", ")", "else", ":", "return", "(", "node_dict", "[", "n", "]", ".", "get", "(", "key", ",", "default", ")", "for", "n", "in", "nodes", ")" ]
Generates attributes values of specific nodes
[ "Generates", "attributes", "values", "of", "specific", "nodes" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L748-L756
train
Erotemic/utool
utool/util_graph.py
nx_gen_node_attrs
def nx_gen_node_attrs(G, key, nodes=None, default=util_const.NoParam, on_missing='error', on_keyerr='default'): """ Improved generator version of nx.get_node_attributes Args: on_missing (str): Strategy for handling nodes missing from G. Can be {'error', 'default', 'filter'}. defaults to 'error'. on_keyerr (str): Strategy for handling keys missing from node dicts. Can be {'error', 'default', 'filter'}. defaults to 'default' if default is specified, otherwise defaults to 'error'. Notes: strategies are: error - raises an error if key or node does not exist default - returns node, but uses value specified by default filter - skips the node Example: >>> # ENABLE_DOCTEST >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> G = nx.Graph([(1, 2), (2, 3)]) >>> nx.set_node_attributes(G, name='part', values={1: 'bar', 3: 'baz'}) >>> nodes = [1, 2, 3, 4] >>> # >>> assert len(list(ut.nx_gen_node_attrs(G, 'part', default=None, on_missing='error', on_keyerr='default'))) == 3 >>> assert len(list(ut.nx_gen_node_attrs(G, 'part', default=None, on_missing='error', on_keyerr='filter'))) == 2 >>> ut.assert_raises(KeyError, list, ut.nx_gen_node_attrs(G, 'part', on_missing='error', on_keyerr='error')) >>> # >>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='filter', on_keyerr='default'))) == 3 >>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='filter', on_keyerr='filter'))) == 2 >>> ut.assert_raises(KeyError, list, ut.nx_gen_node_attrs(G, 'part', nodes, on_missing='filter', on_keyerr='error')) >>> # >>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='default', on_keyerr='default'))) == 4 >>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='default', on_keyerr='filter'))) == 2 >>> ut.assert_raises(KeyError, list, ut.nx_gen_node_attrs(G, 'part', nodes, on_missing='default', on_keyerr='error')) Example: >>> # DISABLE_DOCTEST >>> # ALL CASES >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> G = nx.Graph([(1, 2), (2, 3)]) >>> nx.set_node_attributes(G, name='full', values={1: 'A', 2: 'B', 3: 'C'}) >>> nx.set_node_attributes(G, name='part', values={1: 'bar', 3: 'baz'}) >>> nodes = [1, 2, 3, 4] >>> attrs = dict(ut.nx_gen_node_attrs(G, 'full')) >>> input_grid = { >>> 'nodes': [None, (1, 2, 3, 4)], >>> 'key': ['part', 'full'], >>> 'default': [util_const.NoParam, None], >>> } >>> inputs = ut.all_dict_combinations(input_grid) >>> kw_grid = { >>> 'on_missing': ['error', 'default', 'filter'], >>> 'on_keyerr': ['error', 'default', 'filter'], >>> } >>> kws = ut.all_dict_combinations(kw_grid) >>> for in_ in inputs: >>> for kw in kws: >>> kw2 = ut.dict_union(kw, in_) >>> #print(kw2) >>> on_missing = kw['on_missing'] >>> on_keyerr = kw['on_keyerr'] >>> if on_keyerr == 'default' and in_['default'] is util_const.NoParam: >>> on_keyerr = 'error' >>> will_miss = False >>> will_keyerr = False >>> if on_missing == 'error': >>> if in_['key'] == 'part' and in_['nodes'] is not None: >>> will_miss = True >>> if in_['key'] == 'full' and in_['nodes'] is not None: >>> will_miss = True >>> if on_keyerr == 'error': >>> if in_['key'] == 'part': >>> will_keyerr = True >>> if on_missing == 'default': >>> if in_['key'] == 'full' and in_['nodes'] is not None: >>> will_keyerr = True >>> want_error = will_miss or will_keyerr >>> gen = ut.nx_gen_node_attrs(G, **kw2) >>> try: >>> attrs = list(gen) >>> except KeyError: >>> if not want_error: >>> raise AssertionError('should not have errored') >>> else: >>> if want_error: >>> raise AssertionError('should have errored') """ if on_missing is None: on_missing = 'error' if default is util_const.NoParam and on_keyerr == 'default': on_keyerr = 'error' if nodes is None: nodes = G.nodes() # Generate `node_data` nodes and data dictionary node_dict = nx_node_dict(G) if on_missing == 'error': node_data = ((n, node_dict[n]) for n in nodes) elif on_missing == 'filter': node_data = ((n, node_dict[n]) for n in nodes if n in G) elif on_missing == 'default': node_data = ((n, node_dict.get(n, {})) for n in nodes) else: raise KeyError('on_missing={} must be error, filter or default'.format( on_missing)) # Get `node_attrs` desired value out of dictionary if on_keyerr == 'error': node_attrs = ((n, d[key]) for n, d in node_data) elif on_keyerr == 'filter': node_attrs = ((n, d[key]) for n, d in node_data if key in d) elif on_keyerr == 'default': node_attrs = ((n, d.get(key, default)) for n, d in node_data) else: raise KeyError('on_keyerr={} must be error filter or default'.format(on_keyerr)) return node_attrs
python
def nx_gen_node_attrs(G, key, nodes=None, default=util_const.NoParam, on_missing='error', on_keyerr='default'): """ Improved generator version of nx.get_node_attributes Args: on_missing (str): Strategy for handling nodes missing from G. Can be {'error', 'default', 'filter'}. defaults to 'error'. on_keyerr (str): Strategy for handling keys missing from node dicts. Can be {'error', 'default', 'filter'}. defaults to 'default' if default is specified, otherwise defaults to 'error'. Notes: strategies are: error - raises an error if key or node does not exist default - returns node, but uses value specified by default filter - skips the node Example: >>> # ENABLE_DOCTEST >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> G = nx.Graph([(1, 2), (2, 3)]) >>> nx.set_node_attributes(G, name='part', values={1: 'bar', 3: 'baz'}) >>> nodes = [1, 2, 3, 4] >>> # >>> assert len(list(ut.nx_gen_node_attrs(G, 'part', default=None, on_missing='error', on_keyerr='default'))) == 3 >>> assert len(list(ut.nx_gen_node_attrs(G, 'part', default=None, on_missing='error', on_keyerr='filter'))) == 2 >>> ut.assert_raises(KeyError, list, ut.nx_gen_node_attrs(G, 'part', on_missing='error', on_keyerr='error')) >>> # >>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='filter', on_keyerr='default'))) == 3 >>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='filter', on_keyerr='filter'))) == 2 >>> ut.assert_raises(KeyError, list, ut.nx_gen_node_attrs(G, 'part', nodes, on_missing='filter', on_keyerr='error')) >>> # >>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='default', on_keyerr='default'))) == 4 >>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='default', on_keyerr='filter'))) == 2 >>> ut.assert_raises(KeyError, list, ut.nx_gen_node_attrs(G, 'part', nodes, on_missing='default', on_keyerr='error')) Example: >>> # DISABLE_DOCTEST >>> # ALL CASES >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> G = nx.Graph([(1, 2), (2, 3)]) >>> nx.set_node_attributes(G, name='full', values={1: 'A', 2: 'B', 3: 'C'}) >>> nx.set_node_attributes(G, name='part', values={1: 'bar', 3: 'baz'}) >>> nodes = [1, 2, 3, 4] >>> attrs = dict(ut.nx_gen_node_attrs(G, 'full')) >>> input_grid = { >>> 'nodes': [None, (1, 2, 3, 4)], >>> 'key': ['part', 'full'], >>> 'default': [util_const.NoParam, None], >>> } >>> inputs = ut.all_dict_combinations(input_grid) >>> kw_grid = { >>> 'on_missing': ['error', 'default', 'filter'], >>> 'on_keyerr': ['error', 'default', 'filter'], >>> } >>> kws = ut.all_dict_combinations(kw_grid) >>> for in_ in inputs: >>> for kw in kws: >>> kw2 = ut.dict_union(kw, in_) >>> #print(kw2) >>> on_missing = kw['on_missing'] >>> on_keyerr = kw['on_keyerr'] >>> if on_keyerr == 'default' and in_['default'] is util_const.NoParam: >>> on_keyerr = 'error' >>> will_miss = False >>> will_keyerr = False >>> if on_missing == 'error': >>> if in_['key'] == 'part' and in_['nodes'] is not None: >>> will_miss = True >>> if in_['key'] == 'full' and in_['nodes'] is not None: >>> will_miss = True >>> if on_keyerr == 'error': >>> if in_['key'] == 'part': >>> will_keyerr = True >>> if on_missing == 'default': >>> if in_['key'] == 'full' and in_['nodes'] is not None: >>> will_keyerr = True >>> want_error = will_miss or will_keyerr >>> gen = ut.nx_gen_node_attrs(G, **kw2) >>> try: >>> attrs = list(gen) >>> except KeyError: >>> if not want_error: >>> raise AssertionError('should not have errored') >>> else: >>> if want_error: >>> raise AssertionError('should have errored') """ if on_missing is None: on_missing = 'error' if default is util_const.NoParam and on_keyerr == 'default': on_keyerr = 'error' if nodes is None: nodes = G.nodes() # Generate `node_data` nodes and data dictionary node_dict = nx_node_dict(G) if on_missing == 'error': node_data = ((n, node_dict[n]) for n in nodes) elif on_missing == 'filter': node_data = ((n, node_dict[n]) for n in nodes if n in G) elif on_missing == 'default': node_data = ((n, node_dict.get(n, {})) for n in nodes) else: raise KeyError('on_missing={} must be error, filter or default'.format( on_missing)) # Get `node_attrs` desired value out of dictionary if on_keyerr == 'error': node_attrs = ((n, d[key]) for n, d in node_data) elif on_keyerr == 'filter': node_attrs = ((n, d[key]) for n, d in node_data if key in d) elif on_keyerr == 'default': node_attrs = ((n, d.get(key, default)) for n, d in node_data) else: raise KeyError('on_keyerr={} must be error filter or default'.format(on_keyerr)) return node_attrs
[ "def", "nx_gen_node_attrs", "(", "G", ",", "key", ",", "nodes", "=", "None", ",", "default", "=", "util_const", ".", "NoParam", ",", "on_missing", "=", "'error'", ",", "on_keyerr", "=", "'default'", ")", ":", "if", "on_missing", "is", "None", ":", "on_missing", "=", "'error'", "if", "default", "is", "util_const", ".", "NoParam", "and", "on_keyerr", "==", "'default'", ":", "on_keyerr", "=", "'error'", "if", "nodes", "is", "None", ":", "nodes", "=", "G", ".", "nodes", "(", ")", "# Generate `node_data` nodes and data dictionary", "node_dict", "=", "nx_node_dict", "(", "G", ")", "if", "on_missing", "==", "'error'", ":", "node_data", "=", "(", "(", "n", ",", "node_dict", "[", "n", "]", ")", "for", "n", "in", "nodes", ")", "elif", "on_missing", "==", "'filter'", ":", "node_data", "=", "(", "(", "n", ",", "node_dict", "[", "n", "]", ")", "for", "n", "in", "nodes", "if", "n", "in", "G", ")", "elif", "on_missing", "==", "'default'", ":", "node_data", "=", "(", "(", "n", ",", "node_dict", ".", "get", "(", "n", ",", "{", "}", ")", ")", "for", "n", "in", "nodes", ")", "else", ":", "raise", "KeyError", "(", "'on_missing={} must be error, filter or default'", ".", "format", "(", "on_missing", ")", ")", "# Get `node_attrs` desired value out of dictionary", "if", "on_keyerr", "==", "'error'", ":", "node_attrs", "=", "(", "(", "n", ",", "d", "[", "key", "]", ")", "for", "n", ",", "d", "in", "node_data", ")", "elif", "on_keyerr", "==", "'filter'", ":", "node_attrs", "=", "(", "(", "n", ",", "d", "[", "key", "]", ")", "for", "n", ",", "d", "in", "node_data", "if", "key", "in", "d", ")", "elif", "on_keyerr", "==", "'default'", ":", "node_attrs", "=", "(", "(", "n", ",", "d", ".", "get", "(", "key", ",", "default", ")", ")", "for", "n", ",", "d", "in", "node_data", ")", "else", ":", "raise", "KeyError", "(", "'on_keyerr={} must be error filter or default'", ".", "format", "(", "on_keyerr", ")", ")", "return", "node_attrs" ]
Improved generator version of nx.get_node_attributes Args: on_missing (str): Strategy for handling nodes missing from G. Can be {'error', 'default', 'filter'}. defaults to 'error'. on_keyerr (str): Strategy for handling keys missing from node dicts. Can be {'error', 'default', 'filter'}. defaults to 'default' if default is specified, otherwise defaults to 'error'. Notes: strategies are: error - raises an error if key or node does not exist default - returns node, but uses value specified by default filter - skips the node Example: >>> # ENABLE_DOCTEST >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> G = nx.Graph([(1, 2), (2, 3)]) >>> nx.set_node_attributes(G, name='part', values={1: 'bar', 3: 'baz'}) >>> nodes = [1, 2, 3, 4] >>> # >>> assert len(list(ut.nx_gen_node_attrs(G, 'part', default=None, on_missing='error', on_keyerr='default'))) == 3 >>> assert len(list(ut.nx_gen_node_attrs(G, 'part', default=None, on_missing='error', on_keyerr='filter'))) == 2 >>> ut.assert_raises(KeyError, list, ut.nx_gen_node_attrs(G, 'part', on_missing='error', on_keyerr='error')) >>> # >>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='filter', on_keyerr='default'))) == 3 >>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='filter', on_keyerr='filter'))) == 2 >>> ut.assert_raises(KeyError, list, ut.nx_gen_node_attrs(G, 'part', nodes, on_missing='filter', on_keyerr='error')) >>> # >>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='default', on_keyerr='default'))) == 4 >>> assert len(list(ut.nx_gen_node_attrs(G, 'part', nodes, default=None, on_missing='default', on_keyerr='filter'))) == 2 >>> ut.assert_raises(KeyError, list, ut.nx_gen_node_attrs(G, 'part', nodes, on_missing='default', on_keyerr='error')) Example: >>> # DISABLE_DOCTEST >>> # ALL CASES >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> G = nx.Graph([(1, 2), (2, 3)]) >>> nx.set_node_attributes(G, name='full', values={1: 'A', 2: 'B', 3: 'C'}) >>> nx.set_node_attributes(G, name='part', values={1: 'bar', 3: 'baz'}) >>> nodes = [1, 2, 3, 4] >>> attrs = dict(ut.nx_gen_node_attrs(G, 'full')) >>> input_grid = { >>> 'nodes': [None, (1, 2, 3, 4)], >>> 'key': ['part', 'full'], >>> 'default': [util_const.NoParam, None], >>> } >>> inputs = ut.all_dict_combinations(input_grid) >>> kw_grid = { >>> 'on_missing': ['error', 'default', 'filter'], >>> 'on_keyerr': ['error', 'default', 'filter'], >>> } >>> kws = ut.all_dict_combinations(kw_grid) >>> for in_ in inputs: >>> for kw in kws: >>> kw2 = ut.dict_union(kw, in_) >>> #print(kw2) >>> on_missing = kw['on_missing'] >>> on_keyerr = kw['on_keyerr'] >>> if on_keyerr == 'default' and in_['default'] is util_const.NoParam: >>> on_keyerr = 'error' >>> will_miss = False >>> will_keyerr = False >>> if on_missing == 'error': >>> if in_['key'] == 'part' and in_['nodes'] is not None: >>> will_miss = True >>> if in_['key'] == 'full' and in_['nodes'] is not None: >>> will_miss = True >>> if on_keyerr == 'error': >>> if in_['key'] == 'part': >>> will_keyerr = True >>> if on_missing == 'default': >>> if in_['key'] == 'full' and in_['nodes'] is not None: >>> will_keyerr = True >>> want_error = will_miss or will_keyerr >>> gen = ut.nx_gen_node_attrs(G, **kw2) >>> try: >>> attrs = list(gen) >>> except KeyError: >>> if not want_error: >>> raise AssertionError('should not have errored') >>> else: >>> if want_error: >>> raise AssertionError('should have errored')
[ "Improved", "generator", "version", "of", "nx", ".", "get_node_attributes" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L759-L877
train
Erotemic/utool
utool/util_graph.py
nx_gen_edge_values
def nx_gen_edge_values(G, key, edges=None, default=util_const.NoParam, on_missing='error', on_keyerr='default'): """ Generates attributes values of specific edges Args: on_missing (str): Strategy for handling nodes missing from G. Can be {'error', 'default'}. defaults to 'error'. on_keyerr (str): Strategy for handling keys missing from node dicts. Can be {'error', 'default'}. defaults to 'default' if default is specified, otherwise defaults to 'error'. """ if edges is None: edges = G.edges() if on_missing is None: on_missing = 'error' if on_keyerr is None: on_keyerr = 'default' if default is util_const.NoParam and on_keyerr == 'default': on_keyerr = 'error' # Generate `data_iter` edges and data dictionary if on_missing == 'error': data_iter = (G.adj[u][v] for u, v in edges) elif on_missing == 'default': data_iter = (G.adj[u][v] if G.has_edge(u, v) else {} for u, v in edges) else: raise KeyError('on_missing={} must be error, filter or default'.format( on_missing)) # Get `value_iter` desired value out of dictionary if on_keyerr == 'error': value_iter = (d[key] for d in data_iter) elif on_keyerr == 'default': value_iter = (d.get(key, default) for d in data_iter) else: raise KeyError('on_keyerr={} must be error or default'.format(on_keyerr)) return value_iter
python
def nx_gen_edge_values(G, key, edges=None, default=util_const.NoParam, on_missing='error', on_keyerr='default'): """ Generates attributes values of specific edges Args: on_missing (str): Strategy for handling nodes missing from G. Can be {'error', 'default'}. defaults to 'error'. on_keyerr (str): Strategy for handling keys missing from node dicts. Can be {'error', 'default'}. defaults to 'default' if default is specified, otherwise defaults to 'error'. """ if edges is None: edges = G.edges() if on_missing is None: on_missing = 'error' if on_keyerr is None: on_keyerr = 'default' if default is util_const.NoParam and on_keyerr == 'default': on_keyerr = 'error' # Generate `data_iter` edges and data dictionary if on_missing == 'error': data_iter = (G.adj[u][v] for u, v in edges) elif on_missing == 'default': data_iter = (G.adj[u][v] if G.has_edge(u, v) else {} for u, v in edges) else: raise KeyError('on_missing={} must be error, filter or default'.format( on_missing)) # Get `value_iter` desired value out of dictionary if on_keyerr == 'error': value_iter = (d[key] for d in data_iter) elif on_keyerr == 'default': value_iter = (d.get(key, default) for d in data_iter) else: raise KeyError('on_keyerr={} must be error or default'.format(on_keyerr)) return value_iter
[ "def", "nx_gen_edge_values", "(", "G", ",", "key", ",", "edges", "=", "None", ",", "default", "=", "util_const", ".", "NoParam", ",", "on_missing", "=", "'error'", ",", "on_keyerr", "=", "'default'", ")", ":", "if", "edges", "is", "None", ":", "edges", "=", "G", ".", "edges", "(", ")", "if", "on_missing", "is", "None", ":", "on_missing", "=", "'error'", "if", "on_keyerr", "is", "None", ":", "on_keyerr", "=", "'default'", "if", "default", "is", "util_const", ".", "NoParam", "and", "on_keyerr", "==", "'default'", ":", "on_keyerr", "=", "'error'", "# Generate `data_iter` edges and data dictionary", "if", "on_missing", "==", "'error'", ":", "data_iter", "=", "(", "G", ".", "adj", "[", "u", "]", "[", "v", "]", "for", "u", ",", "v", "in", "edges", ")", "elif", "on_missing", "==", "'default'", ":", "data_iter", "=", "(", "G", ".", "adj", "[", "u", "]", "[", "v", "]", "if", "G", ".", "has_edge", "(", "u", ",", "v", ")", "else", "{", "}", "for", "u", ",", "v", "in", "edges", ")", "else", ":", "raise", "KeyError", "(", "'on_missing={} must be error, filter or default'", ".", "format", "(", "on_missing", ")", ")", "# Get `value_iter` desired value out of dictionary", "if", "on_keyerr", "==", "'error'", ":", "value_iter", "=", "(", "d", "[", "key", "]", "for", "d", "in", "data_iter", ")", "elif", "on_keyerr", "==", "'default'", ":", "value_iter", "=", "(", "d", ".", "get", "(", "key", ",", "default", ")", "for", "d", "in", "data_iter", ")", "else", ":", "raise", "KeyError", "(", "'on_keyerr={} must be error or default'", ".", "format", "(", "on_keyerr", ")", ")", "return", "value_iter" ]
Generates attributes values of specific edges Args: on_missing (str): Strategy for handling nodes missing from G. Can be {'error', 'default'}. defaults to 'error'. on_keyerr (str): Strategy for handling keys missing from node dicts. Can be {'error', 'default'}. defaults to 'default' if default is specified, otherwise defaults to 'error'.
[ "Generates", "attributes", "values", "of", "specific", "edges" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L880-L916
train
Erotemic/utool
utool/util_graph.py
nx_gen_edge_attrs
def nx_gen_edge_attrs(G, key, edges=None, default=util_const.NoParam, on_missing='error', on_keyerr='default'): """ Improved generator version of nx.get_edge_attributes Args: on_missing (str): Strategy for handling nodes missing from G. Can be {'error', 'default', 'filter'}. defaults to 'error'. is on_missing is not error, then we allow any edge even if the endpoints are not in the graph. on_keyerr (str): Strategy for handling keys missing from node dicts. Can be {'error', 'default', 'filter'}. defaults to 'default' if default is specified, otherwise defaults to 'error'. Example: >>> # ENABLE_DOCTEST >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> G = nx.Graph([(1, 2), (2, 3), (3, 4)]) >>> nx.set_edge_attributes(G, name='part', values={(1, 2): 'bar', (2, 3): 'baz'}) >>> edges = [(1, 2), (2, 3), (3, 4), (4, 5)] >>> func = ut.partial(ut.nx_gen_edge_attrs, G, 'part', default=None) >>> # >>> assert len(list(func(on_missing='error', on_keyerr='default'))) == 3 >>> assert len(list(func(on_missing='error', on_keyerr='filter'))) == 2 >>> ut.assert_raises(KeyError, list, func(on_missing='error', on_keyerr='error')) >>> # >>> assert len(list(func(edges, on_missing='filter', on_keyerr='default'))) == 3 >>> assert len(list(func(edges, on_missing='filter', on_keyerr='filter'))) == 2 >>> ut.assert_raises(KeyError, list, func(edges, on_missing='filter', on_keyerr='error')) >>> # >>> assert len(list(func(edges, on_missing='default', on_keyerr='default'))) == 4 >>> assert len(list(func(edges, on_missing='default', on_keyerr='filter'))) == 2 >>> ut.assert_raises(KeyError, list, func(edges, on_missing='default', on_keyerr='error')) """ if on_missing is None: on_missing = 'error' if default is util_const.NoParam and on_keyerr == 'default': on_keyerr = 'error' if edges is None: if G.is_multigraph(): raise NotImplementedError('') # uvk_iter = G.edges(keys=True) else: edges = G.edges() # Generate `edge_data` edges and data dictionary if on_missing == 'error': edge_data = (((u, v), G.adj[u][v]) for u, v in edges) elif on_missing == 'filter': edge_data = (((u, v), G.adj[u][v]) for u, v in edges if G.has_edge(u, v)) elif on_missing == 'default': edge_data = (((u, v), G.adj[u][v]) if G.has_edge(u, v) else ((u, v), {}) for u, v in edges) else: raise KeyError('on_missing={}'.format(on_missing)) # Get `edge_attrs` desired value out of dictionary if on_keyerr == 'error': edge_attrs = ((e, d[key]) for e, d in edge_data) elif on_keyerr == 'filter': edge_attrs = ((e, d[key]) for e, d in edge_data if key in d) elif on_keyerr == 'default': edge_attrs = ((e, d.get(key, default)) for e, d in edge_data) else: raise KeyError('on_keyerr={}'.format(on_keyerr)) return edge_attrs
python
def nx_gen_edge_attrs(G, key, edges=None, default=util_const.NoParam, on_missing='error', on_keyerr='default'): """ Improved generator version of nx.get_edge_attributes Args: on_missing (str): Strategy for handling nodes missing from G. Can be {'error', 'default', 'filter'}. defaults to 'error'. is on_missing is not error, then we allow any edge even if the endpoints are not in the graph. on_keyerr (str): Strategy for handling keys missing from node dicts. Can be {'error', 'default', 'filter'}. defaults to 'default' if default is specified, otherwise defaults to 'error'. Example: >>> # ENABLE_DOCTEST >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> G = nx.Graph([(1, 2), (2, 3), (3, 4)]) >>> nx.set_edge_attributes(G, name='part', values={(1, 2): 'bar', (2, 3): 'baz'}) >>> edges = [(1, 2), (2, 3), (3, 4), (4, 5)] >>> func = ut.partial(ut.nx_gen_edge_attrs, G, 'part', default=None) >>> # >>> assert len(list(func(on_missing='error', on_keyerr='default'))) == 3 >>> assert len(list(func(on_missing='error', on_keyerr='filter'))) == 2 >>> ut.assert_raises(KeyError, list, func(on_missing='error', on_keyerr='error')) >>> # >>> assert len(list(func(edges, on_missing='filter', on_keyerr='default'))) == 3 >>> assert len(list(func(edges, on_missing='filter', on_keyerr='filter'))) == 2 >>> ut.assert_raises(KeyError, list, func(edges, on_missing='filter', on_keyerr='error')) >>> # >>> assert len(list(func(edges, on_missing='default', on_keyerr='default'))) == 4 >>> assert len(list(func(edges, on_missing='default', on_keyerr='filter'))) == 2 >>> ut.assert_raises(KeyError, list, func(edges, on_missing='default', on_keyerr='error')) """ if on_missing is None: on_missing = 'error' if default is util_const.NoParam and on_keyerr == 'default': on_keyerr = 'error' if edges is None: if G.is_multigraph(): raise NotImplementedError('') # uvk_iter = G.edges(keys=True) else: edges = G.edges() # Generate `edge_data` edges and data dictionary if on_missing == 'error': edge_data = (((u, v), G.adj[u][v]) for u, v in edges) elif on_missing == 'filter': edge_data = (((u, v), G.adj[u][v]) for u, v in edges if G.has_edge(u, v)) elif on_missing == 'default': edge_data = (((u, v), G.adj[u][v]) if G.has_edge(u, v) else ((u, v), {}) for u, v in edges) else: raise KeyError('on_missing={}'.format(on_missing)) # Get `edge_attrs` desired value out of dictionary if on_keyerr == 'error': edge_attrs = ((e, d[key]) for e, d in edge_data) elif on_keyerr == 'filter': edge_attrs = ((e, d[key]) for e, d in edge_data if key in d) elif on_keyerr == 'default': edge_attrs = ((e, d.get(key, default)) for e, d in edge_data) else: raise KeyError('on_keyerr={}'.format(on_keyerr)) return edge_attrs
[ "def", "nx_gen_edge_attrs", "(", "G", ",", "key", ",", "edges", "=", "None", ",", "default", "=", "util_const", ".", "NoParam", ",", "on_missing", "=", "'error'", ",", "on_keyerr", "=", "'default'", ")", ":", "if", "on_missing", "is", "None", ":", "on_missing", "=", "'error'", "if", "default", "is", "util_const", ".", "NoParam", "and", "on_keyerr", "==", "'default'", ":", "on_keyerr", "=", "'error'", "if", "edges", "is", "None", ":", "if", "G", ".", "is_multigraph", "(", ")", ":", "raise", "NotImplementedError", "(", "''", ")", "# uvk_iter = G.edges(keys=True)", "else", ":", "edges", "=", "G", ".", "edges", "(", ")", "# Generate `edge_data` edges and data dictionary", "if", "on_missing", "==", "'error'", ":", "edge_data", "=", "(", "(", "(", "u", ",", "v", ")", ",", "G", ".", "adj", "[", "u", "]", "[", "v", "]", ")", "for", "u", ",", "v", "in", "edges", ")", "elif", "on_missing", "==", "'filter'", ":", "edge_data", "=", "(", "(", "(", "u", ",", "v", ")", ",", "G", ".", "adj", "[", "u", "]", "[", "v", "]", ")", "for", "u", ",", "v", "in", "edges", "if", "G", ".", "has_edge", "(", "u", ",", "v", ")", ")", "elif", "on_missing", "==", "'default'", ":", "edge_data", "=", "(", "(", "(", "u", ",", "v", ")", ",", "G", ".", "adj", "[", "u", "]", "[", "v", "]", ")", "if", "G", ".", "has_edge", "(", "u", ",", "v", ")", "else", "(", "(", "u", ",", "v", ")", ",", "{", "}", ")", "for", "u", ",", "v", "in", "edges", ")", "else", ":", "raise", "KeyError", "(", "'on_missing={}'", ".", "format", "(", "on_missing", ")", ")", "# Get `edge_attrs` desired value out of dictionary", "if", "on_keyerr", "==", "'error'", ":", "edge_attrs", "=", "(", "(", "e", ",", "d", "[", "key", "]", ")", "for", "e", ",", "d", "in", "edge_data", ")", "elif", "on_keyerr", "==", "'filter'", ":", "edge_attrs", "=", "(", "(", "e", ",", "d", "[", "key", "]", ")", "for", "e", ",", "d", "in", "edge_data", "if", "key", "in", "d", ")", "elif", "on_keyerr", "==", "'default'", ":", "edge_attrs", "=", "(", "(", "e", ",", "d", ".", "get", "(", "key", ",", "default", ")", ")", "for", "e", ",", "d", "in", "edge_data", ")", "else", ":", "raise", "KeyError", "(", "'on_keyerr={}'", ".", "format", "(", "on_keyerr", ")", ")", "return", "edge_attrs" ]
Improved generator version of nx.get_edge_attributes Args: on_missing (str): Strategy for handling nodes missing from G. Can be {'error', 'default', 'filter'}. defaults to 'error'. is on_missing is not error, then we allow any edge even if the endpoints are not in the graph. on_keyerr (str): Strategy for handling keys missing from node dicts. Can be {'error', 'default', 'filter'}. defaults to 'default' if default is specified, otherwise defaults to 'error'. Example: >>> # ENABLE_DOCTEST >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> G = nx.Graph([(1, 2), (2, 3), (3, 4)]) >>> nx.set_edge_attributes(G, name='part', values={(1, 2): 'bar', (2, 3): 'baz'}) >>> edges = [(1, 2), (2, 3), (3, 4), (4, 5)] >>> func = ut.partial(ut.nx_gen_edge_attrs, G, 'part', default=None) >>> # >>> assert len(list(func(on_missing='error', on_keyerr='default'))) == 3 >>> assert len(list(func(on_missing='error', on_keyerr='filter'))) == 2 >>> ut.assert_raises(KeyError, list, func(on_missing='error', on_keyerr='error')) >>> # >>> assert len(list(func(edges, on_missing='filter', on_keyerr='default'))) == 3 >>> assert len(list(func(edges, on_missing='filter', on_keyerr='filter'))) == 2 >>> ut.assert_raises(KeyError, list, func(edges, on_missing='filter', on_keyerr='error')) >>> # >>> assert len(list(func(edges, on_missing='default', on_keyerr='default'))) == 4 >>> assert len(list(func(edges, on_missing='default', on_keyerr='filter'))) == 2 >>> ut.assert_raises(KeyError, list, func(edges, on_missing='default', on_keyerr='error'))
[ "Improved", "generator", "version", "of", "nx", ".", "get_edge_attributes" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L923-L989
train
Erotemic/utool
utool/util_graph.py
nx_minimum_weight_component
def nx_minimum_weight_component(graph, weight='weight'): """ A minimum weight component is an MST + all negative edges """ mwc = nx.minimum_spanning_tree(graph, weight=weight) # negative edges only reduce the total weight neg_edges = (e for e, w in nx_gen_edge_attrs(graph, weight) if w < 0) mwc.add_edges_from(neg_edges) return mwc
python
def nx_minimum_weight_component(graph, weight='weight'): """ A minimum weight component is an MST + all negative edges """ mwc = nx.minimum_spanning_tree(graph, weight=weight) # negative edges only reduce the total weight neg_edges = (e for e, w in nx_gen_edge_attrs(graph, weight) if w < 0) mwc.add_edges_from(neg_edges) return mwc
[ "def", "nx_minimum_weight_component", "(", "graph", ",", "weight", "=", "'weight'", ")", ":", "mwc", "=", "nx", ".", "minimum_spanning_tree", "(", "graph", ",", "weight", "=", "weight", ")", "# negative edges only reduce the total weight", "neg_edges", "=", "(", "e", "for", "e", ",", "w", "in", "nx_gen_edge_attrs", "(", "graph", ",", "weight", ")", "if", "w", "<", "0", ")", "mwc", ".", "add_edges_from", "(", "neg_edges", ")", "return", "mwc" ]
A minimum weight component is an MST + all negative edges
[ "A", "minimum", "weight", "component", "is", "an", "MST", "+", "all", "negative", "edges" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L1037-L1043
train
Erotemic/utool
utool/util_graph.py
nx_ensure_agraph_color
def nx_ensure_agraph_color(graph): """ changes colors to hex strings on graph attrs """ from plottool import color_funcs import plottool as pt #import six def _fix_agraph_color(data): try: orig_color = data.get('color', None) alpha = data.get('alpha', None) color = orig_color if color is None and alpha is not None: color = [0, 0, 0] if color is not None: color = pt.ensure_nonhex_color(color) #if isinstance(color, np.ndarray): # color = color.tolist() color = list(color_funcs.ensure_base255(color)) if alpha is not None: if len(color) == 3: color += [int(alpha * 255)] else: color[3] = int(alpha * 255) color = tuple(color) if len(color) == 3: data['color'] = '#%02x%02x%02x' % color else: data['color'] = '#%02x%02x%02x%02x' % color except Exception as ex: import utool as ut ut.printex(ex, keys=['color', 'orig_color', 'data']) raise for node, node_data in graph.nodes(data=True): data = node_data _fix_agraph_color(data) for u, v, edge_data in graph.edges(data=True): data = edge_data _fix_agraph_color(data)
python
def nx_ensure_agraph_color(graph): """ changes colors to hex strings on graph attrs """ from plottool import color_funcs import plottool as pt #import six def _fix_agraph_color(data): try: orig_color = data.get('color', None) alpha = data.get('alpha', None) color = orig_color if color is None and alpha is not None: color = [0, 0, 0] if color is not None: color = pt.ensure_nonhex_color(color) #if isinstance(color, np.ndarray): # color = color.tolist() color = list(color_funcs.ensure_base255(color)) if alpha is not None: if len(color) == 3: color += [int(alpha * 255)] else: color[3] = int(alpha * 255) color = tuple(color) if len(color) == 3: data['color'] = '#%02x%02x%02x' % color else: data['color'] = '#%02x%02x%02x%02x' % color except Exception as ex: import utool as ut ut.printex(ex, keys=['color', 'orig_color', 'data']) raise for node, node_data in graph.nodes(data=True): data = node_data _fix_agraph_color(data) for u, v, edge_data in graph.edges(data=True): data = edge_data _fix_agraph_color(data)
[ "def", "nx_ensure_agraph_color", "(", "graph", ")", ":", "from", "plottool", "import", "color_funcs", "import", "plottool", "as", "pt", "#import six", "def", "_fix_agraph_color", "(", "data", ")", ":", "try", ":", "orig_color", "=", "data", ".", "get", "(", "'color'", ",", "None", ")", "alpha", "=", "data", ".", "get", "(", "'alpha'", ",", "None", ")", "color", "=", "orig_color", "if", "color", "is", "None", "and", "alpha", "is", "not", "None", ":", "color", "=", "[", "0", ",", "0", ",", "0", "]", "if", "color", "is", "not", "None", ":", "color", "=", "pt", ".", "ensure_nonhex_color", "(", "color", ")", "#if isinstance(color, np.ndarray):", "# color = color.tolist()", "color", "=", "list", "(", "color_funcs", ".", "ensure_base255", "(", "color", ")", ")", "if", "alpha", "is", "not", "None", ":", "if", "len", "(", "color", ")", "==", "3", ":", "color", "+=", "[", "int", "(", "alpha", "*", "255", ")", "]", "else", ":", "color", "[", "3", "]", "=", "int", "(", "alpha", "*", "255", ")", "color", "=", "tuple", "(", "color", ")", "if", "len", "(", "color", ")", "==", "3", ":", "data", "[", "'color'", "]", "=", "'#%02x%02x%02x'", "%", "color", "else", ":", "data", "[", "'color'", "]", "=", "'#%02x%02x%02x%02x'", "%", "color", "except", "Exception", "as", "ex", ":", "import", "utool", "as", "ut", "ut", ".", "printex", "(", "ex", ",", "keys", "=", "[", "'color'", ",", "'orig_color'", ",", "'data'", "]", ")", "raise", "for", "node", ",", "node_data", "in", "graph", ".", "nodes", "(", "data", "=", "True", ")", ":", "data", "=", "node_data", "_fix_agraph_color", "(", "data", ")", "for", "u", ",", "v", ",", "edge_data", "in", "graph", ".", "edges", "(", "data", "=", "True", ")", ":", "data", "=", "edge_data", "_fix_agraph_color", "(", "data", ")" ]
changes colors to hex strings on graph attrs
[ "changes", "colors", "to", "hex", "strings", "on", "graph", "attrs" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L1075-L1113
train
Erotemic/utool
utool/util_graph.py
dag_longest_path
def dag_longest_path(graph, source, target): """ Finds the longest path in a dag between two nodes """ if source == target: return [source] allpaths = nx.all_simple_paths(graph, source, target) longest_path = [] for l in allpaths: if len(l) > len(longest_path): longest_path = l return longest_path
python
def dag_longest_path(graph, source, target): """ Finds the longest path in a dag between two nodes """ if source == target: return [source] allpaths = nx.all_simple_paths(graph, source, target) longest_path = [] for l in allpaths: if len(l) > len(longest_path): longest_path = l return longest_path
[ "def", "dag_longest_path", "(", "graph", ",", "source", ",", "target", ")", ":", "if", "source", "==", "target", ":", "return", "[", "source", "]", "allpaths", "=", "nx", ".", "all_simple_paths", "(", "graph", ",", "source", ",", "target", ")", "longest_path", "=", "[", "]", "for", "l", "in", "allpaths", ":", "if", "len", "(", "l", ")", ">", "len", "(", "longest_path", ")", ":", "longest_path", "=", "l", "return", "longest_path" ]
Finds the longest path in a dag between two nodes
[ "Finds", "the", "longest", "path", "in", "a", "dag", "between", "two", "nodes" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L1126-L1137
train
Erotemic/utool
utool/util_graph.py
simplify_graph
def simplify_graph(graph): """ strips out everything but connectivity Args: graph (nx.Graph): Returns: nx.Graph: new_graph CommandLine: python3 -m utool.util_graph simplify_graph --show python2 -m utool.util_graph simplify_graph --show python2 -c "import networkx as nx; print(nx.__version__)" python3 -c "import networkx as nx; print(nx.__version__)" Example: >>> # ENABLE_DOCTEST >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> graph = nx.DiGraph([('a', 'b'), ('a', 'c'), ('a', 'e'), >>> ('a', 'd'), ('b', 'd'), ('c', 'e'), >>> ('d', 'e'), ('c', 'e'), ('c', 'd')]) >>> new_graph = simplify_graph(graph) >>> result = ut.repr2(list(new_graph.edges())) >>> #adj_list = sorted(list(nx.generate_adjlist(new_graph))) >>> #result = ut.repr2(adj_list) >>> print(result) [(0, 1), (0, 2), (0, 3), (0, 4), (1, 3), (2, 3), (2, 4), (3, 4)] ['0 1 2 3 4', '1 3 4', '2 4', '3', '4 3'] """ import utool as ut nodes = sorted(list(graph.nodes())) node_lookup = ut.make_index_lookup(nodes) if graph.is_multigraph(): edges = list(graph.edges(keys=True)) else: edges = list(graph.edges()) new_nodes = ut.take(node_lookup, nodes) if graph.is_multigraph(): new_edges = [(node_lookup[e[0]], node_lookup[e[1]], e[2], {}) for e in edges] else: new_edges = [(node_lookup[e[0]], node_lookup[e[1]]) for e in edges] cls = graph.__class__ new_graph = cls() new_graph.add_nodes_from(new_nodes) new_graph.add_edges_from(new_edges) return new_graph
python
def simplify_graph(graph): """ strips out everything but connectivity Args: graph (nx.Graph): Returns: nx.Graph: new_graph CommandLine: python3 -m utool.util_graph simplify_graph --show python2 -m utool.util_graph simplify_graph --show python2 -c "import networkx as nx; print(nx.__version__)" python3 -c "import networkx as nx; print(nx.__version__)" Example: >>> # ENABLE_DOCTEST >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> graph = nx.DiGraph([('a', 'b'), ('a', 'c'), ('a', 'e'), >>> ('a', 'd'), ('b', 'd'), ('c', 'e'), >>> ('d', 'e'), ('c', 'e'), ('c', 'd')]) >>> new_graph = simplify_graph(graph) >>> result = ut.repr2(list(new_graph.edges())) >>> #adj_list = sorted(list(nx.generate_adjlist(new_graph))) >>> #result = ut.repr2(adj_list) >>> print(result) [(0, 1), (0, 2), (0, 3), (0, 4), (1, 3), (2, 3), (2, 4), (3, 4)] ['0 1 2 3 4', '1 3 4', '2 4', '3', '4 3'] """ import utool as ut nodes = sorted(list(graph.nodes())) node_lookup = ut.make_index_lookup(nodes) if graph.is_multigraph(): edges = list(graph.edges(keys=True)) else: edges = list(graph.edges()) new_nodes = ut.take(node_lookup, nodes) if graph.is_multigraph(): new_edges = [(node_lookup[e[0]], node_lookup[e[1]], e[2], {}) for e in edges] else: new_edges = [(node_lookup[e[0]], node_lookup[e[1]]) for e in edges] cls = graph.__class__ new_graph = cls() new_graph.add_nodes_from(new_nodes) new_graph.add_edges_from(new_edges) return new_graph
[ "def", "simplify_graph", "(", "graph", ")", ":", "import", "utool", "as", "ut", "nodes", "=", "sorted", "(", "list", "(", "graph", ".", "nodes", "(", ")", ")", ")", "node_lookup", "=", "ut", ".", "make_index_lookup", "(", "nodes", ")", "if", "graph", ".", "is_multigraph", "(", ")", ":", "edges", "=", "list", "(", "graph", ".", "edges", "(", "keys", "=", "True", ")", ")", "else", ":", "edges", "=", "list", "(", "graph", ".", "edges", "(", ")", ")", "new_nodes", "=", "ut", ".", "take", "(", "node_lookup", ",", "nodes", ")", "if", "graph", ".", "is_multigraph", "(", ")", ":", "new_edges", "=", "[", "(", "node_lookup", "[", "e", "[", "0", "]", "]", ",", "node_lookup", "[", "e", "[", "1", "]", "]", ",", "e", "[", "2", "]", ",", "{", "}", ")", "for", "e", "in", "edges", "]", "else", ":", "new_edges", "=", "[", "(", "node_lookup", "[", "e", "[", "0", "]", "]", ",", "node_lookup", "[", "e", "[", "1", "]", "]", ")", "for", "e", "in", "edges", "]", "cls", "=", "graph", ".", "__class__", "new_graph", "=", "cls", "(", ")", "new_graph", ".", "add_nodes_from", "(", "new_nodes", ")", "new_graph", ".", "add_edges_from", "(", "new_edges", ")", "return", "new_graph" ]
strips out everything but connectivity Args: graph (nx.Graph): Returns: nx.Graph: new_graph CommandLine: python3 -m utool.util_graph simplify_graph --show python2 -m utool.util_graph simplify_graph --show python2 -c "import networkx as nx; print(nx.__version__)" python3 -c "import networkx as nx; print(nx.__version__)" Example: >>> # ENABLE_DOCTEST >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> graph = nx.DiGraph([('a', 'b'), ('a', 'c'), ('a', 'e'), >>> ('a', 'd'), ('b', 'd'), ('c', 'e'), >>> ('d', 'e'), ('c', 'e'), ('c', 'd')]) >>> new_graph = simplify_graph(graph) >>> result = ut.repr2(list(new_graph.edges())) >>> #adj_list = sorted(list(nx.generate_adjlist(new_graph))) >>> #result = ut.repr2(adj_list) >>> print(result) [(0, 1), (0, 2), (0, 3), (0, 4), (1, 3), (2, 3), (2, 4), (3, 4)] ['0 1 2 3 4', '1 3 4', '2 4', '3', '4 3']
[ "strips", "out", "everything", "but", "connectivity" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L1515-L1565
train
Erotemic/utool
utool/util_graph.py
subgraph_from_edges
def subgraph_from_edges(G, edge_list, ref_back=True): """ Creates a networkx graph that is a subgraph of G defined by the list of edges in edge_list. Requires G to be a networkx MultiGraph or MultiDiGraph edge_list is a list of edges in either (u,v) or (u,v,d) form where u and v are nodes comprising an edge, and d would be a dictionary of edge attributes ref_back determines whether the created subgraph refers to back to the original graph and therefore changes to the subgraph's attributes also affect the original graph, or if it is to create a new copy of the original graph. References: http://stackoverflow.com/questions/16150557/nx-subgraph-from-edges """ # TODO: support multi-di-graph sub_nodes = list({y for x in edge_list for y in x[0:2]}) #edge_list_no_data = [edge[0:2] for edge in edge_list] multi_edge_list = [edge[0:3] for edge in edge_list] if ref_back: G_sub = G.subgraph(sub_nodes) for edge in G_sub.edges(keys=True): if edge not in multi_edge_list: G_sub.remove_edge(*edge) else: G_sub = G.subgraph(sub_nodes).copy() for edge in G_sub.edges(keys=True): if edge not in multi_edge_list: G_sub.remove_edge(*edge) return G_sub
python
def subgraph_from_edges(G, edge_list, ref_back=True): """ Creates a networkx graph that is a subgraph of G defined by the list of edges in edge_list. Requires G to be a networkx MultiGraph or MultiDiGraph edge_list is a list of edges in either (u,v) or (u,v,d) form where u and v are nodes comprising an edge, and d would be a dictionary of edge attributes ref_back determines whether the created subgraph refers to back to the original graph and therefore changes to the subgraph's attributes also affect the original graph, or if it is to create a new copy of the original graph. References: http://stackoverflow.com/questions/16150557/nx-subgraph-from-edges """ # TODO: support multi-di-graph sub_nodes = list({y for x in edge_list for y in x[0:2]}) #edge_list_no_data = [edge[0:2] for edge in edge_list] multi_edge_list = [edge[0:3] for edge in edge_list] if ref_back: G_sub = G.subgraph(sub_nodes) for edge in G_sub.edges(keys=True): if edge not in multi_edge_list: G_sub.remove_edge(*edge) else: G_sub = G.subgraph(sub_nodes).copy() for edge in G_sub.edges(keys=True): if edge not in multi_edge_list: G_sub.remove_edge(*edge) return G_sub
[ "def", "subgraph_from_edges", "(", "G", ",", "edge_list", ",", "ref_back", "=", "True", ")", ":", "# TODO: support multi-di-graph", "sub_nodes", "=", "list", "(", "{", "y", "for", "x", "in", "edge_list", "for", "y", "in", "x", "[", "0", ":", "2", "]", "}", ")", "#edge_list_no_data = [edge[0:2] for edge in edge_list]", "multi_edge_list", "=", "[", "edge", "[", "0", ":", "3", "]", "for", "edge", "in", "edge_list", "]", "if", "ref_back", ":", "G_sub", "=", "G", ".", "subgraph", "(", "sub_nodes", ")", "for", "edge", "in", "G_sub", ".", "edges", "(", "keys", "=", "True", ")", ":", "if", "edge", "not", "in", "multi_edge_list", ":", "G_sub", ".", "remove_edge", "(", "*", "edge", ")", "else", ":", "G_sub", "=", "G", ".", "subgraph", "(", "sub_nodes", ")", ".", "copy", "(", ")", "for", "edge", "in", "G_sub", ".", "edges", "(", "keys", "=", "True", ")", ":", "if", "edge", "not", "in", "multi_edge_list", ":", "G_sub", ".", "remove_edge", "(", "*", "edge", ")", "return", "G_sub" ]
Creates a networkx graph that is a subgraph of G defined by the list of edges in edge_list. Requires G to be a networkx MultiGraph or MultiDiGraph edge_list is a list of edges in either (u,v) or (u,v,d) form where u and v are nodes comprising an edge, and d would be a dictionary of edge attributes ref_back determines whether the created subgraph refers to back to the original graph and therefore changes to the subgraph's attributes also affect the original graph, or if it is to create a new copy of the original graph. References: http://stackoverflow.com/questions/16150557/nx-subgraph-from-edges
[ "Creates", "a", "networkx", "graph", "that", "is", "a", "subgraph", "of", "G", "defined", "by", "the", "list", "of", "edges", "in", "edge_list", "." ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L1568-L1603
train
Erotemic/utool
utool/util_graph.py
all_multi_paths
def all_multi_paths(graph, source, target, data=False): r""" Returns specific paths along multi-edges from the source to this table. Multipaths are identified by edge keys. Returns all paths from source to target. This function treats multi-edges as distinct and returns the key value in each edge tuple that defines a path. Example: >>> # DISABLE_DOCTEST >>> from dtool.depcache_control import * # NOQA >>> from utool.util_graph import * # NOQA >>> from dtool.example_depcache import testdata_depc >>> depc = testdata_depc() >>> graph = depc.graph >>> source = depc.root >>> target = 'notchpair' >>> path_list1 = ut.all_multi_paths(graph, depc.root, 'notchpair') >>> path_list2 = ut.all_multi_paths(graph, depc.root, 'spam') >>> result1 = ('path_list1 = %s' % ut.repr3(path_list1, nl=1)) >>> result2 = ('path_list2 = %s' % ut.repr3(path_list2, nl=2)) >>> result = '\n'.join([result1, result2]) >>> print(result) path_list1 = [ [('dummy_annot', 'notch', 0), ('notch', 'notchpair', 0)], [('dummy_annot', 'notch', 0), ('notch', 'notchpair', 1)], ] path_list2 = [ [ ('dummy_annot', 'chip', 0), ('chip', 'keypoint', 0), ('keypoint', 'fgweight', 0), ('fgweight', 'spam', 0), ], [ ('dummy_annot', 'chip', 0), ('chip', 'keypoint', 0), ('keypoint', 'spam', 0), ], [ ('dummy_annot', 'chip', 0), ('chip', 'spam', 0), ], [ ('dummy_annot', 'probchip', 0), ('probchip', 'fgweight', 0), ('fgweight', 'spam', 0), ], ] """ path_multiedges = list(nx_all_simple_edge_paths(graph, source, target, keys=True, data=data)) return path_multiedges
python
def all_multi_paths(graph, source, target, data=False): r""" Returns specific paths along multi-edges from the source to this table. Multipaths are identified by edge keys. Returns all paths from source to target. This function treats multi-edges as distinct and returns the key value in each edge tuple that defines a path. Example: >>> # DISABLE_DOCTEST >>> from dtool.depcache_control import * # NOQA >>> from utool.util_graph import * # NOQA >>> from dtool.example_depcache import testdata_depc >>> depc = testdata_depc() >>> graph = depc.graph >>> source = depc.root >>> target = 'notchpair' >>> path_list1 = ut.all_multi_paths(graph, depc.root, 'notchpair') >>> path_list2 = ut.all_multi_paths(graph, depc.root, 'spam') >>> result1 = ('path_list1 = %s' % ut.repr3(path_list1, nl=1)) >>> result2 = ('path_list2 = %s' % ut.repr3(path_list2, nl=2)) >>> result = '\n'.join([result1, result2]) >>> print(result) path_list1 = [ [('dummy_annot', 'notch', 0), ('notch', 'notchpair', 0)], [('dummy_annot', 'notch', 0), ('notch', 'notchpair', 1)], ] path_list2 = [ [ ('dummy_annot', 'chip', 0), ('chip', 'keypoint', 0), ('keypoint', 'fgweight', 0), ('fgweight', 'spam', 0), ], [ ('dummy_annot', 'chip', 0), ('chip', 'keypoint', 0), ('keypoint', 'spam', 0), ], [ ('dummy_annot', 'chip', 0), ('chip', 'spam', 0), ], [ ('dummy_annot', 'probchip', 0), ('probchip', 'fgweight', 0), ('fgweight', 'spam', 0), ], ] """ path_multiedges = list(nx_all_simple_edge_paths(graph, source, target, keys=True, data=data)) return path_multiedges
[ "def", "all_multi_paths", "(", "graph", ",", "source", ",", "target", ",", "data", "=", "False", ")", ":", "path_multiedges", "=", "list", "(", "nx_all_simple_edge_paths", "(", "graph", ",", "source", ",", "target", ",", "keys", "=", "True", ",", "data", "=", "data", ")", ")", "return", "path_multiedges" ]
r""" Returns specific paths along multi-edges from the source to this table. Multipaths are identified by edge keys. Returns all paths from source to target. This function treats multi-edges as distinct and returns the key value in each edge tuple that defines a path. Example: >>> # DISABLE_DOCTEST >>> from dtool.depcache_control import * # NOQA >>> from utool.util_graph import * # NOQA >>> from dtool.example_depcache import testdata_depc >>> depc = testdata_depc() >>> graph = depc.graph >>> source = depc.root >>> target = 'notchpair' >>> path_list1 = ut.all_multi_paths(graph, depc.root, 'notchpair') >>> path_list2 = ut.all_multi_paths(graph, depc.root, 'spam') >>> result1 = ('path_list1 = %s' % ut.repr3(path_list1, nl=1)) >>> result2 = ('path_list2 = %s' % ut.repr3(path_list2, nl=2)) >>> result = '\n'.join([result1, result2]) >>> print(result) path_list1 = [ [('dummy_annot', 'notch', 0), ('notch', 'notchpair', 0)], [('dummy_annot', 'notch', 0), ('notch', 'notchpair', 1)], ] path_list2 = [ [ ('dummy_annot', 'chip', 0), ('chip', 'keypoint', 0), ('keypoint', 'fgweight', 0), ('fgweight', 'spam', 0), ], [ ('dummy_annot', 'chip', 0), ('chip', 'keypoint', 0), ('keypoint', 'spam', 0), ], [ ('dummy_annot', 'chip', 0), ('chip', 'spam', 0), ], [ ('dummy_annot', 'probchip', 0), ('probchip', 'fgweight', 0), ('fgweight', 'spam', 0), ], ]
[ "r", "Returns", "specific", "paths", "along", "multi", "-", "edges", "from", "the", "source", "to", "this", "table", ".", "Multipaths", "are", "identified", "by", "edge", "keys", "." ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L1613-L1666
train
Erotemic/utool
utool/util_graph.py
bfs_conditional
def bfs_conditional(G, source, reverse=False, keys=True, data=False, yield_nodes=True, yield_if=None, continue_if=None, visited_nodes=None, yield_source=False): """ Produce edges in a breadth-first-search starting at source, but only return nodes that satisfiy a condition, and only iterate past a node if it satisfies a different condition. conditions are callables that take (G, child, edge) and return true or false CommandLine: python -m utool.util_graph bfs_conditional Example: >>> # DISABLE_DOCTEST >>> import networkx as nx >>> import utool as ut >>> G = nx.Graph() >>> G.add_edges_from([(1, 2), (1, 3), (2, 3), (2, 4)]) >>> continue_if = lambda G, child, edge: True >>> result = list(ut.bfs_conditional(G, 1, yield_nodes=False)) >>> print(result) [(1, 2), (1, 3), (2, 1), (2, 3), (2, 4), (3, 1), (3, 2), (4, 2)] Example: >>> # ENABLE_DOCTEST >>> import networkx as nx >>> import utool as ut >>> G = nx.Graph() >>> continue_if = lambda G, child, edge: (child % 2 == 0) >>> yield_if = lambda G, child, edge: (child % 2 == 1) >>> G.add_edges_from([(0, 1), (1, 3), (3, 5), (5, 10), >>> (4, 3), (3, 6), >>> (0, 2), (2, 4), (4, 6), (6, 10)]) >>> result = list(ut.bfs_conditional(G, 0, continue_if=continue_if, >>> yield_if=yield_if)) >>> print(result) [1, 3, 5] """ if reverse and hasattr(G, 'reverse'): G = G.reverse() if isinstance(G, nx.Graph): neighbors = functools.partial(G.edges, data=data) else: neighbors = functools.partial(G.edges, keys=keys, data=data) queue = collections.deque([]) if visited_nodes is None: visited_nodes = set([]) else: visited_nodes = set(visited_nodes) if source not in visited_nodes: if yield_nodes and yield_source: yield source visited_nodes.add(source) new_edges = neighbors(source) if isinstance(new_edges, list): new_edges = iter(new_edges) queue.append((source, new_edges)) while queue: parent, edges = queue[0] for edge in edges: child = edge[1] if yield_nodes: if child not in visited_nodes: if yield_if is None or yield_if(G, child, edge): yield child else: if yield_if is None or yield_if(G, child, edge): yield edge if child not in visited_nodes: visited_nodes.add(child) # Add new children to queue if the condition is satisfied if continue_if is None or continue_if(G, child, edge): new_edges = neighbors(child) if isinstance(new_edges, list): new_edges = iter(new_edges) queue.append((child, new_edges)) queue.popleft()
python
def bfs_conditional(G, source, reverse=False, keys=True, data=False, yield_nodes=True, yield_if=None, continue_if=None, visited_nodes=None, yield_source=False): """ Produce edges in a breadth-first-search starting at source, but only return nodes that satisfiy a condition, and only iterate past a node if it satisfies a different condition. conditions are callables that take (G, child, edge) and return true or false CommandLine: python -m utool.util_graph bfs_conditional Example: >>> # DISABLE_DOCTEST >>> import networkx as nx >>> import utool as ut >>> G = nx.Graph() >>> G.add_edges_from([(1, 2), (1, 3), (2, 3), (2, 4)]) >>> continue_if = lambda G, child, edge: True >>> result = list(ut.bfs_conditional(G, 1, yield_nodes=False)) >>> print(result) [(1, 2), (1, 3), (2, 1), (2, 3), (2, 4), (3, 1), (3, 2), (4, 2)] Example: >>> # ENABLE_DOCTEST >>> import networkx as nx >>> import utool as ut >>> G = nx.Graph() >>> continue_if = lambda G, child, edge: (child % 2 == 0) >>> yield_if = lambda G, child, edge: (child % 2 == 1) >>> G.add_edges_from([(0, 1), (1, 3), (3, 5), (5, 10), >>> (4, 3), (3, 6), >>> (0, 2), (2, 4), (4, 6), (6, 10)]) >>> result = list(ut.bfs_conditional(G, 0, continue_if=continue_if, >>> yield_if=yield_if)) >>> print(result) [1, 3, 5] """ if reverse and hasattr(G, 'reverse'): G = G.reverse() if isinstance(G, nx.Graph): neighbors = functools.partial(G.edges, data=data) else: neighbors = functools.partial(G.edges, keys=keys, data=data) queue = collections.deque([]) if visited_nodes is None: visited_nodes = set([]) else: visited_nodes = set(visited_nodes) if source not in visited_nodes: if yield_nodes and yield_source: yield source visited_nodes.add(source) new_edges = neighbors(source) if isinstance(new_edges, list): new_edges = iter(new_edges) queue.append((source, new_edges)) while queue: parent, edges = queue[0] for edge in edges: child = edge[1] if yield_nodes: if child not in visited_nodes: if yield_if is None or yield_if(G, child, edge): yield child else: if yield_if is None or yield_if(G, child, edge): yield edge if child not in visited_nodes: visited_nodes.add(child) # Add new children to queue if the condition is satisfied if continue_if is None or continue_if(G, child, edge): new_edges = neighbors(child) if isinstance(new_edges, list): new_edges = iter(new_edges) queue.append((child, new_edges)) queue.popleft()
[ "def", "bfs_conditional", "(", "G", ",", "source", ",", "reverse", "=", "False", ",", "keys", "=", "True", ",", "data", "=", "False", ",", "yield_nodes", "=", "True", ",", "yield_if", "=", "None", ",", "continue_if", "=", "None", ",", "visited_nodes", "=", "None", ",", "yield_source", "=", "False", ")", ":", "if", "reverse", "and", "hasattr", "(", "G", ",", "'reverse'", ")", ":", "G", "=", "G", ".", "reverse", "(", ")", "if", "isinstance", "(", "G", ",", "nx", ".", "Graph", ")", ":", "neighbors", "=", "functools", ".", "partial", "(", "G", ".", "edges", ",", "data", "=", "data", ")", "else", ":", "neighbors", "=", "functools", ".", "partial", "(", "G", ".", "edges", ",", "keys", "=", "keys", ",", "data", "=", "data", ")", "queue", "=", "collections", ".", "deque", "(", "[", "]", ")", "if", "visited_nodes", "is", "None", ":", "visited_nodes", "=", "set", "(", "[", "]", ")", "else", ":", "visited_nodes", "=", "set", "(", "visited_nodes", ")", "if", "source", "not", "in", "visited_nodes", ":", "if", "yield_nodes", "and", "yield_source", ":", "yield", "source", "visited_nodes", ".", "add", "(", "source", ")", "new_edges", "=", "neighbors", "(", "source", ")", "if", "isinstance", "(", "new_edges", ",", "list", ")", ":", "new_edges", "=", "iter", "(", "new_edges", ")", "queue", ".", "append", "(", "(", "source", ",", "new_edges", ")", ")", "while", "queue", ":", "parent", ",", "edges", "=", "queue", "[", "0", "]", "for", "edge", "in", "edges", ":", "child", "=", "edge", "[", "1", "]", "if", "yield_nodes", ":", "if", "child", "not", "in", "visited_nodes", ":", "if", "yield_if", "is", "None", "or", "yield_if", "(", "G", ",", "child", ",", "edge", ")", ":", "yield", "child", "else", ":", "if", "yield_if", "is", "None", "or", "yield_if", "(", "G", ",", "child", ",", "edge", ")", ":", "yield", "edge", "if", "child", "not", "in", "visited_nodes", ":", "visited_nodes", ".", "add", "(", "child", ")", "# Add new children to queue if the condition is satisfied", "if", "continue_if", "is", "None", "or", "continue_if", "(", "G", ",", "child", ",", "edge", ")", ":", "new_edges", "=", "neighbors", "(", "child", ")", "if", "isinstance", "(", "new_edges", ",", "list", ")", ":", "new_edges", "=", "iter", "(", "new_edges", ")", "queue", ".", "append", "(", "(", "child", ",", "new_edges", ")", ")", "queue", ".", "popleft", "(", ")" ]
Produce edges in a breadth-first-search starting at source, but only return nodes that satisfiy a condition, and only iterate past a node if it satisfies a different condition. conditions are callables that take (G, child, edge) and return true or false CommandLine: python -m utool.util_graph bfs_conditional Example: >>> # DISABLE_DOCTEST >>> import networkx as nx >>> import utool as ut >>> G = nx.Graph() >>> G.add_edges_from([(1, 2), (1, 3), (2, 3), (2, 4)]) >>> continue_if = lambda G, child, edge: True >>> result = list(ut.bfs_conditional(G, 1, yield_nodes=False)) >>> print(result) [(1, 2), (1, 3), (2, 1), (2, 3), (2, 4), (3, 1), (3, 2), (4, 2)] Example: >>> # ENABLE_DOCTEST >>> import networkx as nx >>> import utool as ut >>> G = nx.Graph() >>> continue_if = lambda G, child, edge: (child % 2 == 0) >>> yield_if = lambda G, child, edge: (child % 2 == 1) >>> G.add_edges_from([(0, 1), (1, 3), (3, 5), (5, 10), >>> (4, 3), (3, 6), >>> (0, 2), (2, 4), (4, 6), (6, 10)]) >>> result = list(ut.bfs_conditional(G, 0, continue_if=continue_if, >>> yield_if=yield_if)) >>> print(result) [1, 3, 5]
[ "Produce", "edges", "in", "a", "breadth", "-", "first", "-", "search", "starting", "at", "source", "but", "only", "return", "nodes", "that", "satisfiy", "a", "condition", "and", "only", "iterate", "past", "a", "node", "if", "it", "satisfies", "a", "different", "condition", "." ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L1744-L1826
train
Erotemic/utool
utool/util_graph.py
color_nodes
def color_nodes(graph, labelattr='label', brightness=.878, outof=None, sat_adjust=None): """ Colors edges and nodes by nid """ import plottool as pt import utool as ut node_to_lbl = nx.get_node_attributes(graph, labelattr) unique_lbls = sorted(set(node_to_lbl.values())) ncolors = len(unique_lbls) if outof is None: if (ncolors) == 1: unique_colors = [pt.LIGHT_BLUE] elif (ncolors) == 2: # https://matplotlib.org/examples/color/named_colors.html unique_colors = ['royalblue', 'orange'] unique_colors = list(map(pt.color_funcs.ensure_base01, unique_colors)) else: unique_colors = pt.distinct_colors(ncolors, brightness=brightness) else: unique_colors = pt.distinct_colors(outof, brightness=brightness) if sat_adjust: unique_colors = [ pt.color_funcs.adjust_hsv_of_rgb(c, sat_adjust=sat_adjust) for c in unique_colors ] # Find edges and aids strictly between two nids if outof is None: lbl_to_color = ut.dzip(unique_lbls, unique_colors) else: gray = pt.color_funcs.ensure_base01('lightgray') unique_colors = [gray] + unique_colors offset = max(1, min(unique_lbls)) - 1 node_to_lbl = ut.map_vals(lambda nid: max(0, nid - offset), node_to_lbl) lbl_to_color = ut.dzip(range(outof + 1), unique_colors) node_to_color = ut.map_vals(lbl_to_color, node_to_lbl) nx.set_node_attributes(graph, name='color', values=node_to_color) ut.nx_ensure_agraph_color(graph)
python
def color_nodes(graph, labelattr='label', brightness=.878, outof=None, sat_adjust=None): """ Colors edges and nodes by nid """ import plottool as pt import utool as ut node_to_lbl = nx.get_node_attributes(graph, labelattr) unique_lbls = sorted(set(node_to_lbl.values())) ncolors = len(unique_lbls) if outof is None: if (ncolors) == 1: unique_colors = [pt.LIGHT_BLUE] elif (ncolors) == 2: # https://matplotlib.org/examples/color/named_colors.html unique_colors = ['royalblue', 'orange'] unique_colors = list(map(pt.color_funcs.ensure_base01, unique_colors)) else: unique_colors = pt.distinct_colors(ncolors, brightness=brightness) else: unique_colors = pt.distinct_colors(outof, brightness=brightness) if sat_adjust: unique_colors = [ pt.color_funcs.adjust_hsv_of_rgb(c, sat_adjust=sat_adjust) for c in unique_colors ] # Find edges and aids strictly between two nids if outof is None: lbl_to_color = ut.dzip(unique_lbls, unique_colors) else: gray = pt.color_funcs.ensure_base01('lightgray') unique_colors = [gray] + unique_colors offset = max(1, min(unique_lbls)) - 1 node_to_lbl = ut.map_vals(lambda nid: max(0, nid - offset), node_to_lbl) lbl_to_color = ut.dzip(range(outof + 1), unique_colors) node_to_color = ut.map_vals(lbl_to_color, node_to_lbl) nx.set_node_attributes(graph, name='color', values=node_to_color) ut.nx_ensure_agraph_color(graph)
[ "def", "color_nodes", "(", "graph", ",", "labelattr", "=", "'label'", ",", "brightness", "=", ".878", ",", "outof", "=", "None", ",", "sat_adjust", "=", "None", ")", ":", "import", "plottool", "as", "pt", "import", "utool", "as", "ut", "node_to_lbl", "=", "nx", ".", "get_node_attributes", "(", "graph", ",", "labelattr", ")", "unique_lbls", "=", "sorted", "(", "set", "(", "node_to_lbl", ".", "values", "(", ")", ")", ")", "ncolors", "=", "len", "(", "unique_lbls", ")", "if", "outof", "is", "None", ":", "if", "(", "ncolors", ")", "==", "1", ":", "unique_colors", "=", "[", "pt", ".", "LIGHT_BLUE", "]", "elif", "(", "ncolors", ")", "==", "2", ":", "# https://matplotlib.org/examples/color/named_colors.html", "unique_colors", "=", "[", "'royalblue'", ",", "'orange'", "]", "unique_colors", "=", "list", "(", "map", "(", "pt", ".", "color_funcs", ".", "ensure_base01", ",", "unique_colors", ")", ")", "else", ":", "unique_colors", "=", "pt", ".", "distinct_colors", "(", "ncolors", ",", "brightness", "=", "brightness", ")", "else", ":", "unique_colors", "=", "pt", ".", "distinct_colors", "(", "outof", ",", "brightness", "=", "brightness", ")", "if", "sat_adjust", ":", "unique_colors", "=", "[", "pt", ".", "color_funcs", ".", "adjust_hsv_of_rgb", "(", "c", ",", "sat_adjust", "=", "sat_adjust", ")", "for", "c", "in", "unique_colors", "]", "# Find edges and aids strictly between two nids", "if", "outof", "is", "None", ":", "lbl_to_color", "=", "ut", ".", "dzip", "(", "unique_lbls", ",", "unique_colors", ")", "else", ":", "gray", "=", "pt", ".", "color_funcs", ".", "ensure_base01", "(", "'lightgray'", ")", "unique_colors", "=", "[", "gray", "]", "+", "unique_colors", "offset", "=", "max", "(", "1", ",", "min", "(", "unique_lbls", ")", ")", "-", "1", "node_to_lbl", "=", "ut", ".", "map_vals", "(", "lambda", "nid", ":", "max", "(", "0", ",", "nid", "-", "offset", ")", ",", "node_to_lbl", ")", "lbl_to_color", "=", "ut", ".", "dzip", "(", "range", "(", "outof", "+", "1", ")", ",", "unique_colors", ")", "node_to_color", "=", "ut", ".", "map_vals", "(", "lbl_to_color", ",", "node_to_lbl", ")", "nx", ".", "set_node_attributes", "(", "graph", ",", "name", "=", "'color'", ",", "values", "=", "node_to_color", ")", "ut", ".", "nx_ensure_agraph_color", "(", "graph", ")" ]
Colors edges and nodes by nid
[ "Colors", "edges", "and", "nodes", "by", "nid" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L1829-L1865
train
Erotemic/utool
utool/util_graph.py
approx_min_num_components
def approx_min_num_components(nodes, negative_edges): """ Find approximate minimum number of connected components possible Each edge represents that two nodes must be separated This code doesn't solve the problem. The problem is NP-complete and reduces to minimum clique cover (MCC). This is only an approximate solution. Not sure what the approximation ratio is. CommandLine: python -m utool.util_graph approx_min_num_components Example: >>> # ENABLE_DOCTEST >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> nodes = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> edges = [(1, 2), (2, 3), (3, 1), >>> (4, 5), (5, 6), (6, 4), >>> (7, 8), (8, 9), (9, 7), >>> (1, 4), (4, 7), (7, 1), >>> ] >>> g_pos = nx.Graph() >>> g_pos.add_edges_from(edges) >>> g_neg = nx.complement(g_pos) >>> #import plottool as pt >>> #pt.qt4ensure() >>> #pt.show_nx(g_pos) >>> #pt.show_nx(g_neg) >>> negative_edges = g_neg.edges() >>> nodes = [1, 2, 3, 4, 5, 6, 7] >>> negative_edges = [(1, 2), (2, 3), (4, 5)] >>> result = approx_min_num_components(nodes, negative_edges) >>> print(result) 2 """ import utool as ut num = 0 g_neg = nx.Graph() g_neg.add_nodes_from(nodes) g_neg.add_edges_from(negative_edges) # Collapse all nodes with degree 0 if nx.__version__.startswith('2'): deg0_nodes = [n for n, d in g_neg.degree() if d == 0] else: deg0_nodes = [n for n, d in g_neg.degree_iter() if d == 0] for u, v in ut.itertwo(deg0_nodes): nx_contracted_nodes(g_neg, v, u, inplace=True) # g_neg = nx.contracted_nodes(g_neg, v, u, self_loops=False) # Initialize unused nodes to be everything unused = list(g_neg.nodes()) # complement of the graph contains all possible positive edges g_pos = nx.complement(g_neg) if False: from networkx.algorithms.approximation import clique maxiset, cliques = clique.clique_removal(g_pos) num = len(cliques) return num # Iterate until we have used all nodes while len(unused) > 0: # Seed a new "minimum component" num += 1 # Grab a random unused node n1 #idx1 = np.random.randint(0, len(unused)) idx1 = 0 n1 = unused[idx1] unused.remove(n1) neigbs = list(g_pos.neighbors(n1)) neigbs = ut.isect(neigbs, unused) while len(neigbs) > 0: # Find node n2, that n1 could be connected to #idx2 = np.random.randint(0, len(neigbs)) idx2 = 0 n2 = neigbs[idx2] unused.remove(n2) # Collapse negative information of n1 and n2 g_neg = nx.contracted_nodes(g_neg, n1, n2) # Compute new possible positive edges g_pos = nx.complement(g_neg) # Iterate until n1 has no more possible connections neigbs = list(g_pos.neighbors(n1)) neigbs = ut.isect(neigbs, unused) print('num = %r' % (num,)) return num
python
def approx_min_num_components(nodes, negative_edges): """ Find approximate minimum number of connected components possible Each edge represents that two nodes must be separated This code doesn't solve the problem. The problem is NP-complete and reduces to minimum clique cover (MCC). This is only an approximate solution. Not sure what the approximation ratio is. CommandLine: python -m utool.util_graph approx_min_num_components Example: >>> # ENABLE_DOCTEST >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> nodes = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> edges = [(1, 2), (2, 3), (3, 1), >>> (4, 5), (5, 6), (6, 4), >>> (7, 8), (8, 9), (9, 7), >>> (1, 4), (4, 7), (7, 1), >>> ] >>> g_pos = nx.Graph() >>> g_pos.add_edges_from(edges) >>> g_neg = nx.complement(g_pos) >>> #import plottool as pt >>> #pt.qt4ensure() >>> #pt.show_nx(g_pos) >>> #pt.show_nx(g_neg) >>> negative_edges = g_neg.edges() >>> nodes = [1, 2, 3, 4, 5, 6, 7] >>> negative_edges = [(1, 2), (2, 3), (4, 5)] >>> result = approx_min_num_components(nodes, negative_edges) >>> print(result) 2 """ import utool as ut num = 0 g_neg = nx.Graph() g_neg.add_nodes_from(nodes) g_neg.add_edges_from(negative_edges) # Collapse all nodes with degree 0 if nx.__version__.startswith('2'): deg0_nodes = [n for n, d in g_neg.degree() if d == 0] else: deg0_nodes = [n for n, d in g_neg.degree_iter() if d == 0] for u, v in ut.itertwo(deg0_nodes): nx_contracted_nodes(g_neg, v, u, inplace=True) # g_neg = nx.contracted_nodes(g_neg, v, u, self_loops=False) # Initialize unused nodes to be everything unused = list(g_neg.nodes()) # complement of the graph contains all possible positive edges g_pos = nx.complement(g_neg) if False: from networkx.algorithms.approximation import clique maxiset, cliques = clique.clique_removal(g_pos) num = len(cliques) return num # Iterate until we have used all nodes while len(unused) > 0: # Seed a new "minimum component" num += 1 # Grab a random unused node n1 #idx1 = np.random.randint(0, len(unused)) idx1 = 0 n1 = unused[idx1] unused.remove(n1) neigbs = list(g_pos.neighbors(n1)) neigbs = ut.isect(neigbs, unused) while len(neigbs) > 0: # Find node n2, that n1 could be connected to #idx2 = np.random.randint(0, len(neigbs)) idx2 = 0 n2 = neigbs[idx2] unused.remove(n2) # Collapse negative information of n1 and n2 g_neg = nx.contracted_nodes(g_neg, n1, n2) # Compute new possible positive edges g_pos = nx.complement(g_neg) # Iterate until n1 has no more possible connections neigbs = list(g_pos.neighbors(n1)) neigbs = ut.isect(neigbs, unused) print('num = %r' % (num,)) return num
[ "def", "approx_min_num_components", "(", "nodes", ",", "negative_edges", ")", ":", "import", "utool", "as", "ut", "num", "=", "0", "g_neg", "=", "nx", ".", "Graph", "(", ")", "g_neg", ".", "add_nodes_from", "(", "nodes", ")", "g_neg", ".", "add_edges_from", "(", "negative_edges", ")", "# Collapse all nodes with degree 0", "if", "nx", ".", "__version__", ".", "startswith", "(", "'2'", ")", ":", "deg0_nodes", "=", "[", "n", "for", "n", ",", "d", "in", "g_neg", ".", "degree", "(", ")", "if", "d", "==", "0", "]", "else", ":", "deg0_nodes", "=", "[", "n", "for", "n", ",", "d", "in", "g_neg", ".", "degree_iter", "(", ")", "if", "d", "==", "0", "]", "for", "u", ",", "v", "in", "ut", ".", "itertwo", "(", "deg0_nodes", ")", ":", "nx_contracted_nodes", "(", "g_neg", ",", "v", ",", "u", ",", "inplace", "=", "True", ")", "# g_neg = nx.contracted_nodes(g_neg, v, u, self_loops=False)", "# Initialize unused nodes to be everything", "unused", "=", "list", "(", "g_neg", ".", "nodes", "(", ")", ")", "# complement of the graph contains all possible positive edges", "g_pos", "=", "nx", ".", "complement", "(", "g_neg", ")", "if", "False", ":", "from", "networkx", ".", "algorithms", ".", "approximation", "import", "clique", "maxiset", ",", "cliques", "=", "clique", ".", "clique_removal", "(", "g_pos", ")", "num", "=", "len", "(", "cliques", ")", "return", "num", "# Iterate until we have used all nodes", "while", "len", "(", "unused", ")", ">", "0", ":", "# Seed a new \"minimum component\"", "num", "+=", "1", "# Grab a random unused node n1", "#idx1 = np.random.randint(0, len(unused))", "idx1", "=", "0", "n1", "=", "unused", "[", "idx1", "]", "unused", ".", "remove", "(", "n1", ")", "neigbs", "=", "list", "(", "g_pos", ".", "neighbors", "(", "n1", ")", ")", "neigbs", "=", "ut", ".", "isect", "(", "neigbs", ",", "unused", ")", "while", "len", "(", "neigbs", ")", ">", "0", ":", "# Find node n2, that n1 could be connected to", "#idx2 = np.random.randint(0, len(neigbs))", "idx2", "=", "0", "n2", "=", "neigbs", "[", "idx2", "]", "unused", ".", "remove", "(", "n2", ")", "# Collapse negative information of n1 and n2", "g_neg", "=", "nx", ".", "contracted_nodes", "(", "g_neg", ",", "n1", ",", "n2", ")", "# Compute new possible positive edges", "g_pos", "=", "nx", ".", "complement", "(", "g_neg", ")", "# Iterate until n1 has no more possible connections", "neigbs", "=", "list", "(", "g_pos", ".", "neighbors", "(", "n1", ")", ")", "neigbs", "=", "ut", ".", "isect", "(", "neigbs", ",", "unused", ")", "print", "(", "'num = %r'", "%", "(", "num", ",", ")", ")", "return", "num" ]
Find approximate minimum number of connected components possible Each edge represents that two nodes must be separated This code doesn't solve the problem. The problem is NP-complete and reduces to minimum clique cover (MCC). This is only an approximate solution. Not sure what the approximation ratio is. CommandLine: python -m utool.util_graph approx_min_num_components Example: >>> # ENABLE_DOCTEST >>> from utool.util_graph import * # NOQA >>> import utool as ut >>> nodes = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> edges = [(1, 2), (2, 3), (3, 1), >>> (4, 5), (5, 6), (6, 4), >>> (7, 8), (8, 9), (9, 7), >>> (1, 4), (4, 7), (7, 1), >>> ] >>> g_pos = nx.Graph() >>> g_pos.add_edges_from(edges) >>> g_neg = nx.complement(g_pos) >>> #import plottool as pt >>> #pt.qt4ensure() >>> #pt.show_nx(g_pos) >>> #pt.show_nx(g_neg) >>> negative_edges = g_neg.edges() >>> nodes = [1, 2, 3, 4, 5, 6, 7] >>> negative_edges = [(1, 2), (2, 3), (4, 5)] >>> result = approx_min_num_components(nodes, negative_edges) >>> print(result) 2
[ "Find", "approximate", "minimum", "number", "of", "connected", "components", "possible", "Each", "edge", "represents", "that", "two", "nodes", "must", "be", "separated" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L2034-L2122
train
walchko/pyrk
pyrk/pyrk.py
RK4.solve
def solve(self, y, h, t_end): """ Given a function, initial conditions, step size and end value, this will calculate an unforced system. The default start time is t=0.0, but this can be changed. y - initial state h - step size n - stop time """ ts = [] ys = [] yi = y ti = 0.0 while ti < t_end: ts.append(ti) yi = self.step(yi, None, ti, h) ys.append(yi) ti += h return ts, ys
python
def solve(self, y, h, t_end): """ Given a function, initial conditions, step size and end value, this will calculate an unforced system. The default start time is t=0.0, but this can be changed. y - initial state h - step size n - stop time """ ts = [] ys = [] yi = y ti = 0.0 while ti < t_end: ts.append(ti) yi = self.step(yi, None, ti, h) ys.append(yi) ti += h return ts, ys
[ "def", "solve", "(", "self", ",", "y", ",", "h", ",", "t_end", ")", ":", "ts", "=", "[", "]", "ys", "=", "[", "]", "yi", "=", "y", "ti", "=", "0.0", "while", "ti", "<", "t_end", ":", "ts", ".", "append", "(", "ti", ")", "yi", "=", "self", ".", "step", "(", "yi", ",", "None", ",", "ti", ",", "h", ")", "ys", ".", "append", "(", "yi", ")", "ti", "+=", "h", "return", "ts", ",", "ys" ]
Given a function, initial conditions, step size and end value, this will calculate an unforced system. The default start time is t=0.0, but this can be changed. y - initial state h - step size n - stop time
[ "Given", "a", "function", "initial", "conditions", "step", "size", "and", "end", "value", "this", "will", "calculate", "an", "unforced", "system", ".", "The", "default", "start", "time", "is", "t", "=", "0", ".", "0", "but", "this", "can", "be", "changed", "." ]
f75dce843e795343d37cfe20d780989f56f0c418
https://github.com/walchko/pyrk/blob/f75dce843e795343d37cfe20d780989f56f0c418/pyrk/pyrk.py#L23-L42
train
walchko/pyrk
pyrk/pyrk.py
RK4.step
def step(self, y, u, t, h): """ This is called by solve, but can be called by the user who wants to run through an integration with a control force. y - state at t u - control inputs at t t - time h - step size """ k1 = h * self.func(t, y, u) k2 = h * self.func(t + .5*h, y + .5*h*k1, u) k3 = h * self.func(t + .5*h, y + .5*h*k2, u) k4 = h * self.func(t + h, y + h*k3, u) return y + (k1 + 2*k2 + 2*k3 + k4) / 6.0
python
def step(self, y, u, t, h): """ This is called by solve, but can be called by the user who wants to run through an integration with a control force. y - state at t u - control inputs at t t - time h - step size """ k1 = h * self.func(t, y, u) k2 = h * self.func(t + .5*h, y + .5*h*k1, u) k3 = h * self.func(t + .5*h, y + .5*h*k2, u) k4 = h * self.func(t + h, y + h*k3, u) return y + (k1 + 2*k2 + 2*k3 + k4) / 6.0
[ "def", "step", "(", "self", ",", "y", ",", "u", ",", "t", ",", "h", ")", ":", "k1", "=", "h", "*", "self", ".", "func", "(", "t", ",", "y", ",", "u", ")", "k2", "=", "h", "*", "self", ".", "func", "(", "t", "+", ".5", "*", "h", ",", "y", "+", ".5", "*", "h", "*", "k1", ",", "u", ")", "k3", "=", "h", "*", "self", ".", "func", "(", "t", "+", ".5", "*", "h", ",", "y", "+", ".5", "*", "h", "*", "k2", ",", "u", ")", "k4", "=", "h", "*", "self", ".", "func", "(", "t", "+", "h", ",", "y", "+", "h", "*", "k3", ",", "u", ")", "return", "y", "+", "(", "k1", "+", "2", "*", "k2", "+", "2", "*", "k3", "+", "k4", ")", "/", "6.0" ]
This is called by solve, but can be called by the user who wants to run through an integration with a control force. y - state at t u - control inputs at t t - time h - step size
[ "This", "is", "called", "by", "solve", "but", "can", "be", "called", "by", "the", "user", "who", "wants", "to", "run", "through", "an", "integration", "with", "a", "control", "force", "." ]
f75dce843e795343d37cfe20d780989f56f0c418
https://github.com/walchko/pyrk/blob/f75dce843e795343d37cfe20d780989f56f0c418/pyrk/pyrk.py#L44-L58
train
glormph/msstitch
src/app/actions/prottable/bestpeptide.py
generate_proteins
def generate_proteins(pepfn, proteins, pepheader, scorecol, minlog, higherbetter=True, protcol=False): """Best peptide for each protein in a table""" protein_peptides = {} if minlog: higherbetter = False if not protcol: protcol = peptabledata.HEADER_MASTERPROTEINS for psm in reader.generate_tsv_psms(pepfn, pepheader): p_acc = psm[protcol] if ';' in p_acc: continue protein_peptides = evaluate_peptide(protein_peptides, psm, p_acc, higherbetter, scorecol, fncol=False) if minlog: try: nextbestscore = min([pep['score'] for pep in protein_peptides.values() if pep['score'] > 0]) except ValueError: import sys sys.stderr.write('Cannot find score of type {} which is above 0. ' 'Only scores above zero can have a -log value. ' 'Exiting.'.format(scorecol)) sys.exit(1) nextbestscore = -log(nextbestscore, 10) for protein in proteins: try: peptide = protein_peptides[protein[prottabledata.HEADER_PROTEIN]] except KeyError: print('WARNING - protein {} not found in peptide ' 'table'.format(protein[prottabledata.HEADER_PROTEIN])) peptide = {'score': 'NA'} if minlog and peptide['score'] != 'NA': peptide['score'] = log_score(peptide['score'], nextbestscore) protein[prottabledata.HEADER_QSCORE] = str( peptide['score']) yield protein
python
def generate_proteins(pepfn, proteins, pepheader, scorecol, minlog, higherbetter=True, protcol=False): """Best peptide for each protein in a table""" protein_peptides = {} if minlog: higherbetter = False if not protcol: protcol = peptabledata.HEADER_MASTERPROTEINS for psm in reader.generate_tsv_psms(pepfn, pepheader): p_acc = psm[protcol] if ';' in p_acc: continue protein_peptides = evaluate_peptide(protein_peptides, psm, p_acc, higherbetter, scorecol, fncol=False) if minlog: try: nextbestscore = min([pep['score'] for pep in protein_peptides.values() if pep['score'] > 0]) except ValueError: import sys sys.stderr.write('Cannot find score of type {} which is above 0. ' 'Only scores above zero can have a -log value. ' 'Exiting.'.format(scorecol)) sys.exit(1) nextbestscore = -log(nextbestscore, 10) for protein in proteins: try: peptide = protein_peptides[protein[prottabledata.HEADER_PROTEIN]] except KeyError: print('WARNING - protein {} not found in peptide ' 'table'.format(protein[prottabledata.HEADER_PROTEIN])) peptide = {'score': 'NA'} if minlog and peptide['score'] != 'NA': peptide['score'] = log_score(peptide['score'], nextbestscore) protein[prottabledata.HEADER_QSCORE] = str( peptide['score']) yield protein
[ "def", "generate_proteins", "(", "pepfn", ",", "proteins", ",", "pepheader", ",", "scorecol", ",", "minlog", ",", "higherbetter", "=", "True", ",", "protcol", "=", "False", ")", ":", "protein_peptides", "=", "{", "}", "if", "minlog", ":", "higherbetter", "=", "False", "if", "not", "protcol", ":", "protcol", "=", "peptabledata", ".", "HEADER_MASTERPROTEINS", "for", "psm", "in", "reader", ".", "generate_tsv_psms", "(", "pepfn", ",", "pepheader", ")", ":", "p_acc", "=", "psm", "[", "protcol", "]", "if", "';'", "in", "p_acc", ":", "continue", "protein_peptides", "=", "evaluate_peptide", "(", "protein_peptides", ",", "psm", ",", "p_acc", ",", "higherbetter", ",", "scorecol", ",", "fncol", "=", "False", ")", "if", "minlog", ":", "try", ":", "nextbestscore", "=", "min", "(", "[", "pep", "[", "'score'", "]", "for", "pep", "in", "protein_peptides", ".", "values", "(", ")", "if", "pep", "[", "'score'", "]", ">", "0", "]", ")", "except", "ValueError", ":", "import", "sys", "sys", ".", "stderr", ".", "write", "(", "'Cannot find score of type {} which is above 0. '", "'Only scores above zero can have a -log value. '", "'Exiting.'", ".", "format", "(", "scorecol", ")", ")", "sys", ".", "exit", "(", "1", ")", "nextbestscore", "=", "-", "log", "(", "nextbestscore", ",", "10", ")", "for", "protein", "in", "proteins", ":", "try", ":", "peptide", "=", "protein_peptides", "[", "protein", "[", "prottabledata", ".", "HEADER_PROTEIN", "]", "]", "except", "KeyError", ":", "print", "(", "'WARNING - protein {} not found in peptide '", "'table'", ".", "format", "(", "protein", "[", "prottabledata", ".", "HEADER_PROTEIN", "]", ")", ")", "peptide", "=", "{", "'score'", ":", "'NA'", "}", "if", "minlog", "and", "peptide", "[", "'score'", "]", "!=", "'NA'", ":", "peptide", "[", "'score'", "]", "=", "log_score", "(", "peptide", "[", "'score'", "]", ",", "nextbestscore", ")", "protein", "[", "prottabledata", ".", "HEADER_QSCORE", "]", "=", "str", "(", "peptide", "[", "'score'", "]", ")", "yield", "protein" ]
Best peptide for each protein in a table
[ "Best", "peptide", "for", "each", "protein", "in", "a", "table" ]
ded7e5cbd813d7797dc9d42805778266e59ff042
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/prottable/bestpeptide.py#L8-L46
train
LEMS/pylems
lems/model/simulation.py
Simulation.add
def add(self, child): """ Adds a typed child object to the simulation spec. @param child: Child object to be added. """ if isinstance(child, Run): self.add_run(child) elif isinstance(child, Record): self.add_record(child) elif isinstance(child, EventRecord): self.add_event_record(child) elif isinstance(child, DataDisplay): self.add_data_display(child) elif isinstance(child, DataWriter): self.add_data_writer(child) elif isinstance(child, EventWriter): self.add_event_writer(child) else: raise ModelError('Unsupported child element')
python
def add(self, child): """ Adds a typed child object to the simulation spec. @param child: Child object to be added. """ if isinstance(child, Run): self.add_run(child) elif isinstance(child, Record): self.add_record(child) elif isinstance(child, EventRecord): self.add_event_record(child) elif isinstance(child, DataDisplay): self.add_data_display(child) elif isinstance(child, DataWriter): self.add_data_writer(child) elif isinstance(child, EventWriter): self.add_event_writer(child) else: raise ModelError('Unsupported child element')
[ "def", "add", "(", "self", ",", "child", ")", ":", "if", "isinstance", "(", "child", ",", "Run", ")", ":", "self", ".", "add_run", "(", "child", ")", "elif", "isinstance", "(", "child", ",", "Record", ")", ":", "self", ".", "add_record", "(", "child", ")", "elif", "isinstance", "(", "child", ",", "EventRecord", ")", ":", "self", ".", "add_event_record", "(", "child", ")", "elif", "isinstance", "(", "child", ",", "DataDisplay", ")", ":", "self", ".", "add_data_display", "(", "child", ")", "elif", "isinstance", "(", "child", ",", "DataWriter", ")", ":", "self", ".", "add_data_writer", "(", "child", ")", "elif", "isinstance", "(", "child", ",", "EventWriter", ")", ":", "self", ".", "add_event_writer", "(", "child", ")", "else", ":", "raise", "ModelError", "(", "'Unsupported child element'", ")" ]
Adds a typed child object to the simulation spec. @param child: Child object to be added.
[ "Adds", "a", "typed", "child", "object", "to", "the", "simulation", "spec", "." ]
4eeb719d2f23650fe16c38626663b69b5c83818b
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/model/simulation.py#L345-L365
train
steveYeah/PyBomb
pybomb/clients/game_client.py
GameClient.fetch
def fetch(self, id_, return_fields=None): """ Wrapper for fetching details of game by ID :param id_: int :param return_fields: tuple :return: pybomb.clients.Response """ game_params = {"id": id_} if return_fields is not None: self._validate_return_fields(return_fields) field_list = ",".join(return_fields) game_params["field_list"] = field_list response = self._query(game_params, direct=True) return response
python
def fetch(self, id_, return_fields=None): """ Wrapper for fetching details of game by ID :param id_: int :param return_fields: tuple :return: pybomb.clients.Response """ game_params = {"id": id_} if return_fields is not None: self._validate_return_fields(return_fields) field_list = ",".join(return_fields) game_params["field_list"] = field_list response = self._query(game_params, direct=True) return response
[ "def", "fetch", "(", "self", ",", "id_", ",", "return_fields", "=", "None", ")", ":", "game_params", "=", "{", "\"id\"", ":", "id_", "}", "if", "return_fields", "is", "not", "None", ":", "self", ".", "_validate_return_fields", "(", "return_fields", ")", "field_list", "=", "\",\"", ".", "join", "(", "return_fields", ")", "game_params", "[", "\"field_list\"", "]", "=", "field_list", "response", "=", "self", ".", "_query", "(", "game_params", ",", "direct", "=", "True", ")", "return", "response" ]
Wrapper for fetching details of game by ID :param id_: int :param return_fields: tuple :return: pybomb.clients.Response
[ "Wrapper", "for", "fetching", "details", "of", "game", "by", "ID" ]
54045d74e642f8a1c4366c24bd6a330ae3da6257
https://github.com/steveYeah/PyBomb/blob/54045d74e642f8a1c4366c24bd6a330ae3da6257/pybomb/clients/game_client.py#L57-L76
train
glormph/msstitch
src/app/drivers/base.py
BaseDriver.define_options
def define_options(self, names, parser_options=None): """Given a list of option names, this returns a list of dicts defined in all_options and self.shared_options. These can then be used to populate the argparser with""" def copy_option(options, name): return {k: v for k, v in options[name].items()} if parser_options is None: parser_options = {} options = {} for name in names: try: option = copy_option(parser_options, name) except KeyError: option = copy_option(shared_options, name) try: options.update({option['clarg']: option}) except TypeError: options.update({option['clarg'][0]: option}) return options
python
def define_options(self, names, parser_options=None): """Given a list of option names, this returns a list of dicts defined in all_options and self.shared_options. These can then be used to populate the argparser with""" def copy_option(options, name): return {k: v for k, v in options[name].items()} if parser_options is None: parser_options = {} options = {} for name in names: try: option = copy_option(parser_options, name) except KeyError: option = copy_option(shared_options, name) try: options.update({option['clarg']: option}) except TypeError: options.update({option['clarg'][0]: option}) return options
[ "def", "define_options", "(", "self", ",", "names", ",", "parser_options", "=", "None", ")", ":", "def", "copy_option", "(", "options", ",", "name", ")", ":", "return", "{", "k", ":", "v", "for", "k", ",", "v", "in", "options", "[", "name", "]", ".", "items", "(", ")", "}", "if", "parser_options", "is", "None", ":", "parser_options", "=", "{", "}", "options", "=", "{", "}", "for", "name", "in", "names", ":", "try", ":", "option", "=", "copy_option", "(", "parser_options", ",", "name", ")", "except", "KeyError", ":", "option", "=", "copy_option", "(", "shared_options", ",", "name", ")", "try", ":", "options", ".", "update", "(", "{", "option", "[", "'clarg'", "]", ":", "option", "}", ")", "except", "TypeError", ":", "options", ".", "update", "(", "{", "option", "[", "'clarg'", "]", "[", "0", "]", ":", "option", "}", ")", "return", "options" ]
Given a list of option names, this returns a list of dicts defined in all_options and self.shared_options. These can then be used to populate the argparser with
[ "Given", "a", "list", "of", "option", "names", "this", "returns", "a", "list", "of", "dicts", "defined", "in", "all_options", "and", "self", ".", "shared_options", ".", "These", "can", "then", "be", "used", "to", "populate", "the", "argparser", "with" ]
ded7e5cbd813d7797dc9d42805778266e59ff042
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/drivers/base.py#L30-L48
train
Erotemic/utool
utool/util_resources.py
current_memory_usage
def current_memory_usage(): """ Returns this programs current memory usage in bytes """ import psutil proc = psutil.Process(os.getpid()) #meminfo = proc.get_memory_info() meminfo = proc.memory_info() rss = meminfo[0] # Resident Set Size / Mem Usage vms = meminfo[1] # Virtual Memory Size / VM Size # NOQA return rss
python
def current_memory_usage(): """ Returns this programs current memory usage in bytes """ import psutil proc = psutil.Process(os.getpid()) #meminfo = proc.get_memory_info() meminfo = proc.memory_info() rss = meminfo[0] # Resident Set Size / Mem Usage vms = meminfo[1] # Virtual Memory Size / VM Size # NOQA return rss
[ "def", "current_memory_usage", "(", ")", ":", "import", "psutil", "proc", "=", "psutil", ".", "Process", "(", "os", ".", "getpid", "(", ")", ")", "#meminfo = proc.get_memory_info()", "meminfo", "=", "proc", ".", "memory_info", "(", ")", "rss", "=", "meminfo", "[", "0", "]", "# Resident Set Size / Mem Usage", "vms", "=", "meminfo", "[", "1", "]", "# Virtual Memory Size / VM Size # NOQA", "return", "rss" ]
Returns this programs current memory usage in bytes
[ "Returns", "this", "programs", "current", "memory", "usage", "in", "bytes" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_resources.py#L120-L130
train
Erotemic/utool
utool/util_resources.py
num_unused_cpus
def num_unused_cpus(thresh=10): """ Returns the number of cpus with utilization less than `thresh` percent """ import psutil cpu_usage = psutil.cpu_percent(percpu=True) return sum([p < thresh for p in cpu_usage])
python
def num_unused_cpus(thresh=10): """ Returns the number of cpus with utilization less than `thresh` percent """ import psutil cpu_usage = psutil.cpu_percent(percpu=True) return sum([p < thresh for p in cpu_usage])
[ "def", "num_unused_cpus", "(", "thresh", "=", "10", ")", ":", "import", "psutil", "cpu_usage", "=", "psutil", ".", "cpu_percent", "(", "percpu", "=", "True", ")", "return", "sum", "(", "[", "p", "<", "thresh", "for", "p", "in", "cpu_usage", "]", ")" ]
Returns the number of cpus with utilization less than `thresh` percent
[ "Returns", "the", "number", "of", "cpus", "with", "utilization", "less", "than", "thresh", "percent" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_resources.py#L185-L191
train
glormph/msstitch
src/app/actions/mslookup/proteingrouping.py
get_protein_group_content
def get_protein_group_content(pgmap, master): """For each master protein, we generate the protein group proteins complete with sequences, psm_ids and scores. Master proteins are included in this group. Returns a list of [protein, master, pep_hits, psm_hits, protein_score], which is ready to enter the DB table. """ # first item (0) is only a placeholder so the lookup.INDEX things get the # correct number. Would be nice with a solution, but the INDEXes were # originally made for mzidtsv protein group adding. pg_content = [[0, master, protein, len(peptides), len([psm for pgpsms in peptides.values() for psm in pgpsms]), sum([psm[1] for pgpsms in peptides.values() for psm in pgpsms]), # score next(iter(next(iter(peptides.values()))))[3], # coverage next(iter(next(iter(peptides.values()))))[2], # evid level ] for protein, peptides in pgmap.items()] return pg_content
python
def get_protein_group_content(pgmap, master): """For each master protein, we generate the protein group proteins complete with sequences, psm_ids and scores. Master proteins are included in this group. Returns a list of [protein, master, pep_hits, psm_hits, protein_score], which is ready to enter the DB table. """ # first item (0) is only a placeholder so the lookup.INDEX things get the # correct number. Would be nice with a solution, but the INDEXes were # originally made for mzidtsv protein group adding. pg_content = [[0, master, protein, len(peptides), len([psm for pgpsms in peptides.values() for psm in pgpsms]), sum([psm[1] for pgpsms in peptides.values() for psm in pgpsms]), # score next(iter(next(iter(peptides.values()))))[3], # coverage next(iter(next(iter(peptides.values()))))[2], # evid level ] for protein, peptides in pgmap.items()] return pg_content
[ "def", "get_protein_group_content", "(", "pgmap", ",", "master", ")", ":", "# first item (0) is only a placeholder so the lookup.INDEX things get the", "# correct number. Would be nice with a solution, but the INDEXes were", "# originally made for mzidtsv protein group adding.", "pg_content", "=", "[", "[", "0", ",", "master", ",", "protein", ",", "len", "(", "peptides", ")", ",", "len", "(", "[", "psm", "for", "pgpsms", "in", "peptides", ".", "values", "(", ")", "for", "psm", "in", "pgpsms", "]", ")", ",", "sum", "(", "[", "psm", "[", "1", "]", "for", "pgpsms", "in", "peptides", ".", "values", "(", ")", "for", "psm", "in", "pgpsms", "]", ")", ",", "# score", "next", "(", "iter", "(", "next", "(", "iter", "(", "peptides", ".", "values", "(", ")", ")", ")", ")", ")", "[", "3", "]", ",", "# coverage", "next", "(", "iter", "(", "next", "(", "iter", "(", "peptides", ".", "values", "(", ")", ")", ")", ")", ")", "[", "2", "]", ",", "# evid level", "]", "for", "protein", ",", "peptides", "in", "pgmap", ".", "items", "(", ")", "]", "return", "pg_content" ]
For each master protein, we generate the protein group proteins complete with sequences, psm_ids and scores. Master proteins are included in this group. Returns a list of [protein, master, pep_hits, psm_hits, protein_score], which is ready to enter the DB table.
[ "For", "each", "master", "protein", "we", "generate", "the", "protein", "group", "proteins", "complete", "with", "sequences", "psm_ids", "and", "scores", ".", "Master", "proteins", "are", "included", "in", "this", "group", "." ]
ded7e5cbd813d7797dc9d42805778266e59ff042
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/mslookup/proteingrouping.py#L180-L200
train
glormph/msstitch
src/app/actions/peptable/merge.py
get_protein_data
def get_protein_data(peptide, pdata, headerfields, accfield): """These fields are currently not pool dependent so headerfields is ignored""" report = get_proteins(peptide, pdata, headerfields) return get_cov_descriptions(peptide, pdata, report)
python
def get_protein_data(peptide, pdata, headerfields, accfield): """These fields are currently not pool dependent so headerfields is ignored""" report = get_proteins(peptide, pdata, headerfields) return get_cov_descriptions(peptide, pdata, report)
[ "def", "get_protein_data", "(", "peptide", ",", "pdata", ",", "headerfields", ",", "accfield", ")", ":", "report", "=", "get_proteins", "(", "peptide", ",", "pdata", ",", "headerfields", ")", "return", "get_cov_descriptions", "(", "peptide", ",", "pdata", ",", "report", ")" ]
These fields are currently not pool dependent so headerfields is ignored
[ "These", "fields", "are", "currently", "not", "pool", "dependent", "so", "headerfields", "is", "ignored" ]
ded7e5cbd813d7797dc9d42805778266e59ff042
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/actions/peptable/merge.py#L81-L85
train
Erotemic/utool
utool/util_progress.py
get_num_chunks
def get_num_chunks(length, chunksize): r""" Returns the number of chunks that a list will be split into given a chunksize. Args: length (int): chunksize (int): Returns: int: n_chunks CommandLine: python -m utool.util_progress --exec-get_num_chunks:0 Example0: >>> # ENABLE_DOCTEST >>> from utool.util_progress import * # NOQA >>> length = 2000 >>> chunksize = 256 >>> n_chunks = get_num_chunks(length, chunksize) >>> result = ('n_chunks = %s' % (six.text_type(n_chunks),)) >>> print(result) n_chunks = 8 """ n_chunks = int(math.ceil(length / chunksize)) return n_chunks
python
def get_num_chunks(length, chunksize): r""" Returns the number of chunks that a list will be split into given a chunksize. Args: length (int): chunksize (int): Returns: int: n_chunks CommandLine: python -m utool.util_progress --exec-get_num_chunks:0 Example0: >>> # ENABLE_DOCTEST >>> from utool.util_progress import * # NOQA >>> length = 2000 >>> chunksize = 256 >>> n_chunks = get_num_chunks(length, chunksize) >>> result = ('n_chunks = %s' % (six.text_type(n_chunks),)) >>> print(result) n_chunks = 8 """ n_chunks = int(math.ceil(length / chunksize)) return n_chunks
[ "def", "get_num_chunks", "(", "length", ",", "chunksize", ")", ":", "n_chunks", "=", "int", "(", "math", ".", "ceil", "(", "length", "/", "chunksize", ")", ")", "return", "n_chunks" ]
r""" Returns the number of chunks that a list will be split into given a chunksize. Args: length (int): chunksize (int): Returns: int: n_chunks CommandLine: python -m utool.util_progress --exec-get_num_chunks:0 Example0: >>> # ENABLE_DOCTEST >>> from utool.util_progress import * # NOQA >>> length = 2000 >>> chunksize = 256 >>> n_chunks = get_num_chunks(length, chunksize) >>> result = ('n_chunks = %s' % (six.text_type(n_chunks),)) >>> print(result) n_chunks = 8
[ "r", "Returns", "the", "number", "of", "chunks", "that", "a", "list", "will", "be", "split", "into", "given", "a", "chunksize", "." ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_progress.py#L116-L142
train
Erotemic/utool
utool/util_progress.py
ProgChunks
def ProgChunks(list_, chunksize, nInput=None, **kwargs): """ Yeilds an iterator in chunks and computes progress Progress version of ut.ichunks Args: list_ (list): chunksize (?): nInput (None): (default = None) Kwargs: length, freq Returns: ProgressIter: progiter_ CommandLine: python -m utool.util_progress ProgChunks --show Example: >>> # ENABLE_DOCTEST >>> from utool.util_progress import * # NOQA >>> import utool as ut >>> list_ = range(100) >>> chunksize = 10 >>> nInput = None >>> progiter_ = ProgChunks(list_, chunksize, nInput) >>> iter_ = iter(progiter_) >>> chunk = six.next(iter_) >>> assert len(chunk) == 10 >>> rest = ut.flatten(list(progiter_)) >>> assert len(rest) == 90 """ if nInput is None: nInput = len(list_) n_chunks = get_num_chunks(nInput, chunksize) kwargs['length'] = n_chunks if 'freq' not in kwargs: kwargs['freq'] = 1 chunk_iter = util_iter.ichunks(list_, chunksize) progiter_ = ProgressIter(chunk_iter, **kwargs) return progiter_
python
def ProgChunks(list_, chunksize, nInput=None, **kwargs): """ Yeilds an iterator in chunks and computes progress Progress version of ut.ichunks Args: list_ (list): chunksize (?): nInput (None): (default = None) Kwargs: length, freq Returns: ProgressIter: progiter_ CommandLine: python -m utool.util_progress ProgChunks --show Example: >>> # ENABLE_DOCTEST >>> from utool.util_progress import * # NOQA >>> import utool as ut >>> list_ = range(100) >>> chunksize = 10 >>> nInput = None >>> progiter_ = ProgChunks(list_, chunksize, nInput) >>> iter_ = iter(progiter_) >>> chunk = six.next(iter_) >>> assert len(chunk) == 10 >>> rest = ut.flatten(list(progiter_)) >>> assert len(rest) == 90 """ if nInput is None: nInput = len(list_) n_chunks = get_num_chunks(nInput, chunksize) kwargs['length'] = n_chunks if 'freq' not in kwargs: kwargs['freq'] = 1 chunk_iter = util_iter.ichunks(list_, chunksize) progiter_ = ProgressIter(chunk_iter, **kwargs) return progiter_
[ "def", "ProgChunks", "(", "list_", ",", "chunksize", ",", "nInput", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "nInput", "is", "None", ":", "nInput", "=", "len", "(", "list_", ")", "n_chunks", "=", "get_num_chunks", "(", "nInput", ",", "chunksize", ")", "kwargs", "[", "'length'", "]", "=", "n_chunks", "if", "'freq'", "not", "in", "kwargs", ":", "kwargs", "[", "'freq'", "]", "=", "1", "chunk_iter", "=", "util_iter", ".", "ichunks", "(", "list_", ",", "chunksize", ")", "progiter_", "=", "ProgressIter", "(", "chunk_iter", ",", "*", "*", "kwargs", ")", "return", "progiter_" ]
Yeilds an iterator in chunks and computes progress Progress version of ut.ichunks Args: list_ (list): chunksize (?): nInput (None): (default = None) Kwargs: length, freq Returns: ProgressIter: progiter_ CommandLine: python -m utool.util_progress ProgChunks --show Example: >>> # ENABLE_DOCTEST >>> from utool.util_progress import * # NOQA >>> import utool as ut >>> list_ = range(100) >>> chunksize = 10 >>> nInput = None >>> progiter_ = ProgChunks(list_, chunksize, nInput) >>> iter_ = iter(progiter_) >>> chunk = six.next(iter_) >>> assert len(chunk) == 10 >>> rest = ut.flatten(list(progiter_)) >>> assert len(rest) == 90
[ "Yeilds", "an", "iterator", "in", "chunks", "and", "computes", "progress", "Progress", "version", "of", "ut", ".", "ichunks" ]
3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_progress.py#L145-L186
train