repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
lsst-sqre/documenteer
documenteer/sphinxext/utils.py
make_python_xref_nodes
def make_python_xref_nodes(py_typestr, state, hide_namespace=False): """Make docutils nodes containing a cross-reference to a Python object. Parameters ---------- py_typestr : `str` Name of the Python object. For example ``'mypackage.mymodule.MyClass'``. If you have the object itself, or its type, use the `make_python_xref_nodes_for_type` function instead. state : ``docutils.statemachine.State`` Usually the directive's ``state`` attribute. hide_namespace : `bool`, optional If `True`, the namespace of the object is hidden in the rendered cross reference. Internally, this uses ``:py:obj:`~{py_obj}` (note tilde). Returns ------- instance from ``docutils.nodes`` Docutils node representing the cross reference. Examples -------- If called from within a directive: .. code-block:: python make_python_xref_nodes('numpy.sin', self.state) See also -------- `make_python_xref_nodes_for_type` """ if hide_namespace: template = ':py:obj:`~{}`\n' else: template = ':py:obj:`{}`\n' xref_text = template.format(py_typestr) return parse_rst_content(xref_text, state)
python
def make_python_xref_nodes(py_typestr, state, hide_namespace=False): """Make docutils nodes containing a cross-reference to a Python object. Parameters ---------- py_typestr : `str` Name of the Python object. For example ``'mypackage.mymodule.MyClass'``. If you have the object itself, or its type, use the `make_python_xref_nodes_for_type` function instead. state : ``docutils.statemachine.State`` Usually the directive's ``state`` attribute. hide_namespace : `bool`, optional If `True`, the namespace of the object is hidden in the rendered cross reference. Internally, this uses ``:py:obj:`~{py_obj}` (note tilde). Returns ------- instance from ``docutils.nodes`` Docutils node representing the cross reference. Examples -------- If called from within a directive: .. code-block:: python make_python_xref_nodes('numpy.sin', self.state) See also -------- `make_python_xref_nodes_for_type` """ if hide_namespace: template = ':py:obj:`~{}`\n' else: template = ':py:obj:`{}`\n' xref_text = template.format(py_typestr) return parse_rst_content(xref_text, state)
[ "def", "make_python_xref_nodes", "(", "py_typestr", ",", "state", ",", "hide_namespace", "=", "False", ")", ":", "if", "hide_namespace", ":", "template", "=", "':py:obj:`~{}`\\n'", "else", ":", "template", "=", "':py:obj:`{}`\\n'", "xref_text", "=", "template", ".", "format", "(", "py_typestr", ")", "return", "parse_rst_content", "(", "xref_text", ",", "state", ")" ]
Make docutils nodes containing a cross-reference to a Python object. Parameters ---------- py_typestr : `str` Name of the Python object. For example ``'mypackage.mymodule.MyClass'``. If you have the object itself, or its type, use the `make_python_xref_nodes_for_type` function instead. state : ``docutils.statemachine.State`` Usually the directive's ``state`` attribute. hide_namespace : `bool`, optional If `True`, the namespace of the object is hidden in the rendered cross reference. Internally, this uses ``:py:obj:`~{py_obj}` (note tilde). Returns ------- instance from ``docutils.nodes`` Docutils node representing the cross reference. Examples -------- If called from within a directive: .. code-block:: python make_python_xref_nodes('numpy.sin', self.state) See also -------- `make_python_xref_nodes_for_type`
[ "Make", "docutils", "nodes", "containing", "a", "cross", "-", "reference", "to", "a", "Python", "object", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/utils.py#L44-L83
train
lsst-sqre/documenteer
documenteer/sphinxext/utils.py
make_python_xref_nodes_for_type
def make_python_xref_nodes_for_type(py_type, state, hide_namespace=False): """Make docutils nodes containing a cross-reference to a Python object, given the object's type. Parameters ---------- py_type : `obj` Type of an object. For example ``mypackage.mymodule.MyClass``. If you have instance of the type, use ``type(myinstance)``. state : ``docutils.statemachine.State`` Usually the directive's ``state`` attribute. hide_namespace : `bool`, optional If `True`, the namespace of the object is hidden in the rendered cross reference. Internally, this uses ``:py:obj:`~{py_obj}` (note tilde). Returns ------- instance from ``docutils.nodes`` Docutils node representing the cross reference. Examples -------- If called from within a directive: .. code-block:: python make_python_xref_nodes(numpy.sin, self.state) See also -------- `make_python_xref_nodes` """ if py_type.__module__ == 'builtins': typestr = py_type.__name__ else: typestr = '.'.join((py_type.__module__, py_type.__name__)) return make_python_xref_nodes(typestr, state, hide_namespace=hide_namespace)
python
def make_python_xref_nodes_for_type(py_type, state, hide_namespace=False): """Make docutils nodes containing a cross-reference to a Python object, given the object's type. Parameters ---------- py_type : `obj` Type of an object. For example ``mypackage.mymodule.MyClass``. If you have instance of the type, use ``type(myinstance)``. state : ``docutils.statemachine.State`` Usually the directive's ``state`` attribute. hide_namespace : `bool`, optional If `True`, the namespace of the object is hidden in the rendered cross reference. Internally, this uses ``:py:obj:`~{py_obj}` (note tilde). Returns ------- instance from ``docutils.nodes`` Docutils node representing the cross reference. Examples -------- If called from within a directive: .. code-block:: python make_python_xref_nodes(numpy.sin, self.state) See also -------- `make_python_xref_nodes` """ if py_type.__module__ == 'builtins': typestr = py_type.__name__ else: typestr = '.'.join((py_type.__module__, py_type.__name__)) return make_python_xref_nodes(typestr, state, hide_namespace=hide_namespace)
[ "def", "make_python_xref_nodes_for_type", "(", "py_type", ",", "state", ",", "hide_namespace", "=", "False", ")", ":", "if", "py_type", ".", "__module__", "==", "'builtins'", ":", "typestr", "=", "py_type", ".", "__name__", "else", ":", "typestr", "=", "'.'", ".", "join", "(", "(", "py_type", ".", "__module__", ",", "py_type", ".", "__name__", ")", ")", "return", "make_python_xref_nodes", "(", "typestr", ",", "state", ",", "hide_namespace", "=", "hide_namespace", ")" ]
Make docutils nodes containing a cross-reference to a Python object, given the object's type. Parameters ---------- py_type : `obj` Type of an object. For example ``mypackage.mymodule.MyClass``. If you have instance of the type, use ``type(myinstance)``. state : ``docutils.statemachine.State`` Usually the directive's ``state`` attribute. hide_namespace : `bool`, optional If `True`, the namespace of the object is hidden in the rendered cross reference. Internally, this uses ``:py:obj:`~{py_obj}` (note tilde). Returns ------- instance from ``docutils.nodes`` Docutils node representing the cross reference. Examples -------- If called from within a directive: .. code-block:: python make_python_xref_nodes(numpy.sin, self.state) See also -------- `make_python_xref_nodes`
[ "Make", "docutils", "nodes", "containing", "a", "cross", "-", "reference", "to", "a", "Python", "object", "given", "the", "object", "s", "type", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/utils.py#L86-L126
train
lsst-sqre/documenteer
documenteer/sphinxext/utils.py
make_section
def make_section(section_id=None, contents=None): """Make a docutils section node. Parameters ---------- section_id : `str` Section identifier, which is appended to both the ``ids`` and ``names`` attributes. contents : `list` of ``docutils.nodes`` List of docutils nodes that are inserted into the section. Returns ------- ``docutils.nodes.section`` Docutils section node. """ section = nodes.section() section['ids'].append(nodes.make_id(section_id)) section['names'].append(section_id) if contents is not None: section.extend(contents) return section
python
def make_section(section_id=None, contents=None): """Make a docutils section node. Parameters ---------- section_id : `str` Section identifier, which is appended to both the ``ids`` and ``names`` attributes. contents : `list` of ``docutils.nodes`` List of docutils nodes that are inserted into the section. Returns ------- ``docutils.nodes.section`` Docutils section node. """ section = nodes.section() section['ids'].append(nodes.make_id(section_id)) section['names'].append(section_id) if contents is not None: section.extend(contents) return section
[ "def", "make_section", "(", "section_id", "=", "None", ",", "contents", "=", "None", ")", ":", "section", "=", "nodes", ".", "section", "(", ")", "section", "[", "'ids'", "]", ".", "append", "(", "nodes", ".", "make_id", "(", "section_id", ")", ")", "section", "[", "'names'", "]", ".", "append", "(", "section_id", ")", "if", "contents", "is", "not", "None", ":", "section", ".", "extend", "(", "contents", ")", "return", "section" ]
Make a docutils section node. Parameters ---------- section_id : `str` Section identifier, which is appended to both the ``ids`` and ``names`` attributes. contents : `list` of ``docutils.nodes`` List of docutils nodes that are inserted into the section. Returns ------- ``docutils.nodes.section`` Docutils section node.
[ "Make", "a", "docutils", "section", "node", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/utils.py#L129-L150
train
lsst-sqre/documenteer
documenteer/sphinxext/utils.py
split_role_content
def split_role_content(role_rawsource): """Split the ``rawsource`` of a role into standard components. Parameters ---------- role_rawsource : `str` The content of the role: its ``rawsource`` attribute. Returns ------- parts : `dict` Dictionary with keys: ``last_component`` (`bool`) If `True`, the display should show only the last component of a namespace. The user signals this by prefixing the role's content with a ``~`` character. ``display`` (`str`) Custom display content. See Examples. ``ref`` (`str`) The reference content. If the role doesn't have a custom display, the reference will be the role's content. The ``ref`` never includes a ``~`` prefix. Examples -------- >>> split_role_role('Tables <lsst.afw.table.Table>') {'last_component': False, 'display': 'Tables', 'ref': 'lsst.afw.table.Table'} >>> split_role_role('~lsst.afw.table.Table') {'last_component': True, 'display': None, 'ref': 'lsst.afw.table.Table'} """ parts = { 'last_component': False, 'display': None, 'ref': None } if role_rawsource.startswith('~'): # Only the last part of a namespace should be shown. parts['last_component'] = True # Strip that marker off role_rawsource = role_rawsource.lstrip('~') match = ROLE_DISPLAY_PATTERN.match(role_rawsource) if match: parts['display'] = match.group('display').strip() parts['ref'] = match.group('reference').strip() else: # No suggested display parts['display'] = None parts['ref'] = role_rawsource.strip() return parts
python
def split_role_content(role_rawsource): """Split the ``rawsource`` of a role into standard components. Parameters ---------- role_rawsource : `str` The content of the role: its ``rawsource`` attribute. Returns ------- parts : `dict` Dictionary with keys: ``last_component`` (`bool`) If `True`, the display should show only the last component of a namespace. The user signals this by prefixing the role's content with a ``~`` character. ``display`` (`str`) Custom display content. See Examples. ``ref`` (`str`) The reference content. If the role doesn't have a custom display, the reference will be the role's content. The ``ref`` never includes a ``~`` prefix. Examples -------- >>> split_role_role('Tables <lsst.afw.table.Table>') {'last_component': False, 'display': 'Tables', 'ref': 'lsst.afw.table.Table'} >>> split_role_role('~lsst.afw.table.Table') {'last_component': True, 'display': None, 'ref': 'lsst.afw.table.Table'} """ parts = { 'last_component': False, 'display': None, 'ref': None } if role_rawsource.startswith('~'): # Only the last part of a namespace should be shown. parts['last_component'] = True # Strip that marker off role_rawsource = role_rawsource.lstrip('~') match = ROLE_DISPLAY_PATTERN.match(role_rawsource) if match: parts['display'] = match.group('display').strip() parts['ref'] = match.group('reference').strip() else: # No suggested display parts['display'] = None parts['ref'] = role_rawsource.strip() return parts
[ "def", "split_role_content", "(", "role_rawsource", ")", ":", "parts", "=", "{", "'last_component'", ":", "False", ",", "'display'", ":", "None", ",", "'ref'", ":", "None", "}", "if", "role_rawsource", ".", "startswith", "(", "'~'", ")", ":", "# Only the last part of a namespace should be shown.", "parts", "[", "'last_component'", "]", "=", "True", "# Strip that marker off", "role_rawsource", "=", "role_rawsource", ".", "lstrip", "(", "'~'", ")", "match", "=", "ROLE_DISPLAY_PATTERN", ".", "match", "(", "role_rawsource", ")", "if", "match", ":", "parts", "[", "'display'", "]", "=", "match", ".", "group", "(", "'display'", ")", ".", "strip", "(", ")", "parts", "[", "'ref'", "]", "=", "match", ".", "group", "(", "'reference'", ")", ".", "strip", "(", ")", "else", ":", "# No suggested display", "parts", "[", "'display'", "]", "=", "None", "parts", "[", "'ref'", "]", "=", "role_rawsource", ".", "strip", "(", ")", "return", "parts" ]
Split the ``rawsource`` of a role into standard components. Parameters ---------- role_rawsource : `str` The content of the role: its ``rawsource`` attribute. Returns ------- parts : `dict` Dictionary with keys: ``last_component`` (`bool`) If `True`, the display should show only the last component of a namespace. The user signals this by prefixing the role's content with a ``~`` character. ``display`` (`str`) Custom display content. See Examples. ``ref`` (`str`) The reference content. If the role doesn't have a custom display, the reference will be the role's content. The ``ref`` never includes a ``~`` prefix. Examples -------- >>> split_role_role('Tables <lsst.afw.table.Table>') {'last_component': False, 'display': 'Tables', 'ref': 'lsst.afw.table.Table'} >>> split_role_role('~lsst.afw.table.Table') {'last_component': True, 'display': None, 'ref': 'lsst.afw.table.Table'}
[ "Split", "the", "rawsource", "of", "a", "role", "into", "standard", "components", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/utils.py#L156-L212
train
mojaie/chorus
chorus/molutil.py
largest_graph
def largest_graph(mol): """Return a molecule which has largest graph in the compound Passing single molecule object will results as same as molutil.clone """ mol.require("Valence") mol.require("Topology") m = clone(mol) # Avoid modification of original object if m.isolated: for k in itertools.chain.from_iterable(m.isolated): m.remove_atom(k) return m
python
def largest_graph(mol): """Return a molecule which has largest graph in the compound Passing single molecule object will results as same as molutil.clone """ mol.require("Valence") mol.require("Topology") m = clone(mol) # Avoid modification of original object if m.isolated: for k in itertools.chain.from_iterable(m.isolated): m.remove_atom(k) return m
[ "def", "largest_graph", "(", "mol", ")", ":", "mol", ".", "require", "(", "\"Valence\"", ")", "mol", ".", "require", "(", "\"Topology\"", ")", "m", "=", "clone", "(", "mol", ")", "# Avoid modification of original object", "if", "m", ".", "isolated", ":", "for", "k", "in", "itertools", ".", "chain", ".", "from_iterable", "(", "m", ".", "isolated", ")", ":", "m", ".", "remove_atom", "(", "k", ")", "return", "m" ]
Return a molecule which has largest graph in the compound Passing single molecule object will results as same as molutil.clone
[ "Return", "a", "molecule", "which", "has", "largest", "graph", "in", "the", "compound", "Passing", "single", "molecule", "object", "will", "results", "as", "same", "as", "molutil", ".", "clone" ]
fc7fe23a0272554c67671645ab07830b315eeb1b
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/molutil.py#L64-L74
train
mojaie/chorus
chorus/molutil.py
H_donor_count
def H_donor_count(mol): """Hydrogen bond donor count """ mol.require("Valence") return sum(1 for _, a in mol.atoms_iter() if a.H_donor)
python
def H_donor_count(mol): """Hydrogen bond donor count """ mol.require("Valence") return sum(1 for _, a in mol.atoms_iter() if a.H_donor)
[ "def", "H_donor_count", "(", "mol", ")", ":", "mol", ".", "require", "(", "\"Valence\"", ")", "return", "sum", "(", "1", "for", "_", ",", "a", "in", "mol", ".", "atoms_iter", "(", ")", "if", "a", ".", "H_donor", ")" ]
Hydrogen bond donor count
[ "Hydrogen", "bond", "donor", "count" ]
fc7fe23a0272554c67671645ab07830b315eeb1b
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/molutil.py#L105-L108
train
mojaie/chorus
chorus/molutil.py
H_acceptor_count
def H_acceptor_count(mol): """Hydrogen bond acceptor count """ mol.require("Valence") return sum(1 for _, a in mol.atoms_iter() if a.H_acceptor)
python
def H_acceptor_count(mol): """Hydrogen bond acceptor count """ mol.require("Valence") return sum(1 for _, a in mol.atoms_iter() if a.H_acceptor)
[ "def", "H_acceptor_count", "(", "mol", ")", ":", "mol", ".", "require", "(", "\"Valence\"", ")", "return", "sum", "(", "1", "for", "_", ",", "a", "in", "mol", ".", "atoms_iter", "(", ")", "if", "a", ".", "H_acceptor", ")" ]
Hydrogen bond acceptor count
[ "Hydrogen", "bond", "acceptor", "count" ]
fc7fe23a0272554c67671645ab07830b315eeb1b
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/molutil.py#L111-L114
train
mojaie/chorus
chorus/molutil.py
rotatable_count
def rotatable_count(mol): """Rotatable bond count """ mol.require("Rotatable") return sum(1 for _, _, b in mol.bonds_iter() if b.rotatable)
python
def rotatable_count(mol): """Rotatable bond count """ mol.require("Rotatable") return sum(1 for _, _, b in mol.bonds_iter() if b.rotatable)
[ "def", "rotatable_count", "(", "mol", ")", ":", "mol", ".", "require", "(", "\"Rotatable\"", ")", "return", "sum", "(", "1", "for", "_", ",", "_", ",", "b", "in", "mol", ".", "bonds_iter", "(", ")", "if", "b", ".", "rotatable", ")" ]
Rotatable bond count
[ "Rotatable", "bond", "count" ]
fc7fe23a0272554c67671645ab07830b315eeb1b
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/molutil.py#L117-L120
train
mojaie/chorus
chorus/molutil.py
rule_of_five_violation
def rule_of_five_violation(mol): """Lipinski's rule of five violation count """ v = 0 if mw(mol) > 500: v += 1 if H_donor_count(mol) > 5: v += 1 if H_acceptor_count(mol) > 10: v += 1 try: if wclogp.wclogp(mol) > 5: v += 1 except TypeError: # N/A v += 1 return v
python
def rule_of_five_violation(mol): """Lipinski's rule of five violation count """ v = 0 if mw(mol) > 500: v += 1 if H_donor_count(mol) > 5: v += 1 if H_acceptor_count(mol) > 10: v += 1 try: if wclogp.wclogp(mol) > 5: v += 1 except TypeError: # N/A v += 1 return v
[ "def", "rule_of_five_violation", "(", "mol", ")", ":", "v", "=", "0", "if", "mw", "(", "mol", ")", ">", "500", ":", "v", "+=", "1", "if", "H_donor_count", "(", "mol", ")", ">", "5", ":", "v", "+=", "1", "if", "H_acceptor_count", "(", "mol", ")", ">", "10", ":", "v", "+=", "1", "try", ":", "if", "wclogp", ".", "wclogp", "(", "mol", ")", ">", "5", ":", "v", "+=", "1", "except", "TypeError", ":", "# N/A", "v", "+=", "1", "return", "v" ]
Lipinski's rule of five violation count
[ "Lipinski", "s", "rule", "of", "five", "violation", "count" ]
fc7fe23a0272554c67671645ab07830b315eeb1b
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/molutil.py#L123-L137
train
mojaie/chorus
chorus/molutil.py
formula
def formula(mol): """Chemical formula. Atoms should be arranged in order of C, H and other atoms. Molecules should be arranged in order of length of formula text. """ mol.require("Valence") mol.require("Topology") total_cntr = Counter() for m in sorted(mols_iter(mol), key=len, reverse=True): cntr = Counter() for i in m: cntr += mol.atom(i).composition() text = [] Cs = cntr.pop("C", 0) if Cs: text.append("C") if Cs > 1: text.append(str(Cs)) Hs = cntr.pop("H", 0) if Hs: text.append("H") if Hs > 1: text.append(str(Hs)) heteros = sorted(cntr.items(), key=lambda x: atom_number(x[0])) for k, v in heteros: text.append(k) if v > 1: text.append(str(v)) total_cntr["".join(text)] += 1 total = sorted(total_cntr.items(), key=lambda x: len(x[0]), reverse=True) total_text = [] for k, v in total: if v > 1: total_text.append(str(v) + k) else: total_text.append(k) return ".".join(total_text)
python
def formula(mol): """Chemical formula. Atoms should be arranged in order of C, H and other atoms. Molecules should be arranged in order of length of formula text. """ mol.require("Valence") mol.require("Topology") total_cntr = Counter() for m in sorted(mols_iter(mol), key=len, reverse=True): cntr = Counter() for i in m: cntr += mol.atom(i).composition() text = [] Cs = cntr.pop("C", 0) if Cs: text.append("C") if Cs > 1: text.append(str(Cs)) Hs = cntr.pop("H", 0) if Hs: text.append("H") if Hs > 1: text.append(str(Hs)) heteros = sorted(cntr.items(), key=lambda x: atom_number(x[0])) for k, v in heteros: text.append(k) if v > 1: text.append(str(v)) total_cntr["".join(text)] += 1 total = sorted(total_cntr.items(), key=lambda x: len(x[0]), reverse=True) total_text = [] for k, v in total: if v > 1: total_text.append(str(v) + k) else: total_text.append(k) return ".".join(total_text)
[ "def", "formula", "(", "mol", ")", ":", "mol", ".", "require", "(", "\"Valence\"", ")", "mol", ".", "require", "(", "\"Topology\"", ")", "total_cntr", "=", "Counter", "(", ")", "for", "m", "in", "sorted", "(", "mols_iter", "(", "mol", ")", ",", "key", "=", "len", ",", "reverse", "=", "True", ")", ":", "cntr", "=", "Counter", "(", ")", "for", "i", "in", "m", ":", "cntr", "+=", "mol", ".", "atom", "(", "i", ")", ".", "composition", "(", ")", "text", "=", "[", "]", "Cs", "=", "cntr", ".", "pop", "(", "\"C\"", ",", "0", ")", "if", "Cs", ":", "text", ".", "append", "(", "\"C\"", ")", "if", "Cs", ">", "1", ":", "text", ".", "append", "(", "str", "(", "Cs", ")", ")", "Hs", "=", "cntr", ".", "pop", "(", "\"H\"", ",", "0", ")", "if", "Hs", ":", "text", ".", "append", "(", "\"H\"", ")", "if", "Hs", ">", "1", ":", "text", ".", "append", "(", "str", "(", "Hs", ")", ")", "heteros", "=", "sorted", "(", "cntr", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "atom_number", "(", "x", "[", "0", "]", ")", ")", "for", "k", ",", "v", "in", "heteros", ":", "text", ".", "append", "(", "k", ")", "if", "v", ">", "1", ":", "text", ".", "append", "(", "str", "(", "v", ")", ")", "total_cntr", "[", "\"\"", ".", "join", "(", "text", ")", "]", "+=", "1", "total", "=", "sorted", "(", "total_cntr", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "len", "(", "x", "[", "0", "]", ")", ",", "reverse", "=", "True", ")", "total_text", "=", "[", "]", "for", "k", ",", "v", "in", "total", ":", "if", "v", ">", "1", ":", "total_text", ".", "append", "(", "str", "(", "v", ")", "+", "k", ")", "else", ":", "total_text", ".", "append", "(", "k", ")", "return", "\".\"", ".", "join", "(", "total_text", ")" ]
Chemical formula. Atoms should be arranged in order of C, H and other atoms. Molecules should be arranged in order of length of formula text.
[ "Chemical", "formula", ".", "Atoms", "should", "be", "arranged", "in", "order", "of", "C", "H", "and", "other", "atoms", ".", "Molecules", "should", "be", "arranged", "in", "order", "of", "length", "of", "formula", "text", "." ]
fc7fe23a0272554c67671645ab07830b315eeb1b
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/molutil.py#L164-L200
train
zsimic/runez
src/runez/click.py
debug
def debug(*args, **attrs): """Show debugging information.""" attrs.setdefault("is_flag", True) attrs.setdefault("default", None) return option(debug, *args, **attrs)
python
def debug(*args, **attrs): """Show debugging information.""" attrs.setdefault("is_flag", True) attrs.setdefault("default", None) return option(debug, *args, **attrs)
[ "def", "debug", "(", "*", "args", ",", "*", "*", "attrs", ")", ":", "attrs", ".", "setdefault", "(", "\"is_flag\"", ",", "True", ")", "attrs", ".", "setdefault", "(", "\"default\"", ",", "None", ")", "return", "option", "(", "debug", ",", "*", "args", ",", "*", "*", "attrs", ")" ]
Show debugging information.
[ "Show", "debugging", "information", "." ]
14363b719a1aae1528859a501a22d075ce0abfcc
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/click.py#L49-L53
train
zsimic/runez
src/runez/click.py
dryrun
def dryrun(*args, **attrs): """Perform a dryrun.""" attrs.setdefault("is_flag", True) attrs.setdefault("default", None) return option(dryrun, *args, **attrs)
python
def dryrun(*args, **attrs): """Perform a dryrun.""" attrs.setdefault("is_flag", True) attrs.setdefault("default", None) return option(dryrun, *args, **attrs)
[ "def", "dryrun", "(", "*", "args", ",", "*", "*", "attrs", ")", ":", "attrs", ".", "setdefault", "(", "\"is_flag\"", ",", "True", ")", "attrs", ".", "setdefault", "(", "\"default\"", ",", "None", ")", "return", "option", "(", "dryrun", ",", "*", "args", ",", "*", "*", "attrs", ")" ]
Perform a dryrun.
[ "Perform", "a", "dryrun", "." ]
14363b719a1aae1528859a501a22d075ce0abfcc
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/click.py#L56-L60
train
zsimic/runez
src/runez/click.py
log
def log(*args, **attrs): """Override log file location.""" attrs.setdefault("metavar", "PATH") attrs.setdefault("show_default", False) return option(log, *args, **attrs)
python
def log(*args, **attrs): """Override log file location.""" attrs.setdefault("metavar", "PATH") attrs.setdefault("show_default", False) return option(log, *args, **attrs)
[ "def", "log", "(", "*", "args", ",", "*", "*", "attrs", ")", ":", "attrs", ".", "setdefault", "(", "\"metavar\"", ",", "\"PATH\"", ")", "attrs", ".", "setdefault", "(", "\"show_default\"", ",", "False", ")", "return", "option", "(", "log", ",", "*", "args", ",", "*", "*", "attrs", ")" ]
Override log file location.
[ "Override", "log", "file", "location", "." ]
14363b719a1aae1528859a501a22d075ce0abfcc
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/click.py#L63-L67
train
zsimic/runez
src/runez/click.py
version
def version(*args, **attrs): """Show the version and exit.""" if hasattr(sys, "_getframe"): package = attrs.pop("package", sys._getframe(1).f_globals.get("__package__")) if package: attrs.setdefault("version", get_version(package)) return click.version_option(*args, **attrs)
python
def version(*args, **attrs): """Show the version and exit.""" if hasattr(sys, "_getframe"): package = attrs.pop("package", sys._getframe(1).f_globals.get("__package__")) if package: attrs.setdefault("version", get_version(package)) return click.version_option(*args, **attrs)
[ "def", "version", "(", "*", "args", ",", "*", "*", "attrs", ")", ":", "if", "hasattr", "(", "sys", ",", "\"_getframe\"", ")", ":", "package", "=", "attrs", ".", "pop", "(", "\"package\"", ",", "sys", ".", "_getframe", "(", "1", ")", ".", "f_globals", ".", "get", "(", "\"__package__\"", ")", ")", "if", "package", ":", "attrs", ".", "setdefault", "(", "\"version\"", ",", "get_version", "(", "package", ")", ")", "return", "click", ".", "version_option", "(", "*", "args", ",", "*", "*", "attrs", ")" ]
Show the version and exit.
[ "Show", "the", "version", "and", "exit", "." ]
14363b719a1aae1528859a501a22d075ce0abfcc
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/click.py#L70-L76
train
mojaie/chorus
chorus/rdkit.py
to_rdmol
def to_rdmol(mol): """Convert molecule to RDMol""" rwmol = Chem.RWMol(Chem.MolFromSmiles('')) key_to_idx = {} bond_type = {1: Chem.BondType.SINGLE, 2: Chem.BondType.DOUBLE, 3: Chem.BondType.TRIPLE} conf = Chem.Conformer(rwmol.GetNumAtoms()) for k, a in mol.atoms_iter(): i = rwmol.AddAtom(Chem.Atom(atom_number(a.symbol))) key_to_idx[k] = i conf.SetAtomPosition(i, a.coords) rwmol.AddConformer(conf) for u, v, b in mol.bonds_iter(): ui = key_to_idx[u] vi = key_to_idx[v] rwmol.AddBond(ui, vi, bond_type[b.order]) Chem.GetSSSR(rwmol) # Ring recognition is required for fingerprint rwmol.UpdatePropertyCache(strict=False) return rwmol.GetMol()
python
def to_rdmol(mol): """Convert molecule to RDMol""" rwmol = Chem.RWMol(Chem.MolFromSmiles('')) key_to_idx = {} bond_type = {1: Chem.BondType.SINGLE, 2: Chem.BondType.DOUBLE, 3: Chem.BondType.TRIPLE} conf = Chem.Conformer(rwmol.GetNumAtoms()) for k, a in mol.atoms_iter(): i = rwmol.AddAtom(Chem.Atom(atom_number(a.symbol))) key_to_idx[k] = i conf.SetAtomPosition(i, a.coords) rwmol.AddConformer(conf) for u, v, b in mol.bonds_iter(): ui = key_to_idx[u] vi = key_to_idx[v] rwmol.AddBond(ui, vi, bond_type[b.order]) Chem.GetSSSR(rwmol) # Ring recognition is required for fingerprint rwmol.UpdatePropertyCache(strict=False) return rwmol.GetMol()
[ "def", "to_rdmol", "(", "mol", ")", ":", "rwmol", "=", "Chem", ".", "RWMol", "(", "Chem", ".", "MolFromSmiles", "(", "''", ")", ")", "key_to_idx", "=", "{", "}", "bond_type", "=", "{", "1", ":", "Chem", ".", "BondType", ".", "SINGLE", ",", "2", ":", "Chem", ".", "BondType", ".", "DOUBLE", ",", "3", ":", "Chem", ".", "BondType", ".", "TRIPLE", "}", "conf", "=", "Chem", ".", "Conformer", "(", "rwmol", ".", "GetNumAtoms", "(", ")", ")", "for", "k", ",", "a", "in", "mol", ".", "atoms_iter", "(", ")", ":", "i", "=", "rwmol", ".", "AddAtom", "(", "Chem", ".", "Atom", "(", "atom_number", "(", "a", ".", "symbol", ")", ")", ")", "key_to_idx", "[", "k", "]", "=", "i", "conf", ".", "SetAtomPosition", "(", "i", ",", "a", ".", "coords", ")", "rwmol", ".", "AddConformer", "(", "conf", ")", "for", "u", ",", "v", ",", "b", "in", "mol", ".", "bonds_iter", "(", ")", ":", "ui", "=", "key_to_idx", "[", "u", "]", "vi", "=", "key_to_idx", "[", "v", "]", "rwmol", ".", "AddBond", "(", "ui", ",", "vi", ",", "bond_type", "[", "b", ".", "order", "]", ")", "Chem", ".", "GetSSSR", "(", "rwmol", ")", "# Ring recognition is required for fingerprint", "rwmol", ".", "UpdatePropertyCache", "(", "strict", "=", "False", ")", "return", "rwmol", ".", "GetMol", "(", ")" ]
Convert molecule to RDMol
[ "Convert", "molecule", "to", "RDMol" ]
fc7fe23a0272554c67671645ab07830b315eeb1b
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/rdkit.py#L20-L39
train
mojaie/chorus
chorus/rdkit.py
morgan_sim
def morgan_sim(mol1, mol2, radius=2, digit=3): """Calculate morgan fingerprint similarity by using RDKit radius=2 roughly equivalent to ECFP4 """ rdmol1 = to_rdmol(mol1) rdmol2 = to_rdmol(mol2) fp1 = AllChem.GetMorganFingerprint(rdmol1, radius) fp2 = AllChem.GetMorganFingerprint(rdmol2, radius) return round(DataStructs.DiceSimilarity(fp1, fp2), digit)
python
def morgan_sim(mol1, mol2, radius=2, digit=3): """Calculate morgan fingerprint similarity by using RDKit radius=2 roughly equivalent to ECFP4 """ rdmol1 = to_rdmol(mol1) rdmol2 = to_rdmol(mol2) fp1 = AllChem.GetMorganFingerprint(rdmol1, radius) fp2 = AllChem.GetMorganFingerprint(rdmol2, radius) return round(DataStructs.DiceSimilarity(fp1, fp2), digit)
[ "def", "morgan_sim", "(", "mol1", ",", "mol2", ",", "radius", "=", "2", ",", "digit", "=", "3", ")", ":", "rdmol1", "=", "to_rdmol", "(", "mol1", ")", "rdmol2", "=", "to_rdmol", "(", "mol2", ")", "fp1", "=", "AllChem", ".", "GetMorganFingerprint", "(", "rdmol1", ",", "radius", ")", "fp2", "=", "AllChem", ".", "GetMorganFingerprint", "(", "rdmol2", ",", "radius", ")", "return", "round", "(", "DataStructs", ".", "DiceSimilarity", "(", "fp1", ",", "fp2", ")", ",", "digit", ")" ]
Calculate morgan fingerprint similarity by using RDKit radius=2 roughly equivalent to ECFP4
[ "Calculate", "morgan", "fingerprint", "similarity", "by", "using", "RDKit", "radius", "=", "2", "roughly", "equivalent", "to", "ECFP4" ]
fc7fe23a0272554c67671645ab07830b315eeb1b
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/rdkit.py#L63-L71
train
maljovec/topopy
topopy/MergeTree.py
MergeTree.build
def build(self, X, Y, w=None, edges=None): """ Assigns data to this object and builds the Merge Tree @ In, X, an m-by-n array of values specifying m n-dimensional samples @ In, Y, a m vector of values specifying the output responses corresponding to the m samples specified by X @ In, w, an optional m vector of values specifying the weights associated to each of the m samples used. Default of None means all points will be equally weighted @ In, edges, an optional list of custom edges to use as a starting point for pruning, or in place of a computed graph. """ super(MergeTree, self).build(X, Y, w, edges) if self.debug: sys.stdout.write("Merge Tree Computation: ") start = time.clock() self.__tree = MergeTreeFloat( vectorFloat(self.Xnorm.flatten()), vectorFloat(self.Y), str(self.gradient), self.graph_rep.full_graph(), self.debug, ) self._internal_build() if self.debug: end = time.clock() sys.stdout.write("%f s\n" % (end - start))
python
def build(self, X, Y, w=None, edges=None): """ Assigns data to this object and builds the Merge Tree @ In, X, an m-by-n array of values specifying m n-dimensional samples @ In, Y, a m vector of values specifying the output responses corresponding to the m samples specified by X @ In, w, an optional m vector of values specifying the weights associated to each of the m samples used. Default of None means all points will be equally weighted @ In, edges, an optional list of custom edges to use as a starting point for pruning, or in place of a computed graph. """ super(MergeTree, self).build(X, Y, w, edges) if self.debug: sys.stdout.write("Merge Tree Computation: ") start = time.clock() self.__tree = MergeTreeFloat( vectorFloat(self.Xnorm.flatten()), vectorFloat(self.Y), str(self.gradient), self.graph_rep.full_graph(), self.debug, ) self._internal_build() if self.debug: end = time.clock() sys.stdout.write("%f s\n" % (end - start))
[ "def", "build", "(", "self", ",", "X", ",", "Y", ",", "w", "=", "None", ",", "edges", "=", "None", ")", ":", "super", "(", "MergeTree", ",", "self", ")", ".", "build", "(", "X", ",", "Y", ",", "w", ",", "edges", ")", "if", "self", ".", "debug", ":", "sys", ".", "stdout", ".", "write", "(", "\"Merge Tree Computation: \"", ")", "start", "=", "time", ".", "clock", "(", ")", "self", ".", "__tree", "=", "MergeTreeFloat", "(", "vectorFloat", "(", "self", ".", "Xnorm", ".", "flatten", "(", ")", ")", ",", "vectorFloat", "(", "self", ".", "Y", ")", ",", "str", "(", "self", ".", "gradient", ")", ",", "self", ".", "graph_rep", ".", "full_graph", "(", ")", ",", "self", ".", "debug", ",", ")", "self", ".", "_internal_build", "(", ")", "if", "self", ".", "debug", ":", "end", "=", "time", ".", "clock", "(", ")", "sys", ".", "stdout", ".", "write", "(", "\"%f s\\n\"", "%", "(", "end", "-", "start", ")", ")" ]
Assigns data to this object and builds the Merge Tree @ In, X, an m-by-n array of values specifying m n-dimensional samples @ In, Y, a m vector of values specifying the output responses corresponding to the m samples specified by X @ In, w, an optional m vector of values specifying the weights associated to each of the m samples used. Default of None means all points will be equally weighted @ In, edges, an optional list of custom edges to use as a starting point for pruning, or in place of a computed graph.
[ "Assigns", "data", "to", "this", "object", "and", "builds", "the", "Merge", "Tree" ]
4be598d51c4e4043b73d4ad44beed6d289e2f088
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MergeTree.py#L103-L133
train
maljovec/topopy
topopy/MergeTree.py
MergeTree.build_for_contour_tree
def build_for_contour_tree(self, contour_tree, negate=False): """ A helper function that will reduce duplication of data by reusing the parent contour tree's parameters and data """ if self.debug: tree_type = "Join" if negate: tree_type = "Split" sys.stdout.write("{} Tree Computation: ".format(tree_type)) start = time.clock() Y = contour_tree.Y if negate: Y = -Y self.__tree = MergeTreeFloat( vectorFloat(contour_tree.Xnorm.flatten()), vectorFloat(Y), str(contour_tree.gradient), contour_tree.graph_rep.full_graph(), self.debug, ) self._internal_build() if self.debug: end = time.clock() sys.stdout.write("%f s\n" % (end - start))
python
def build_for_contour_tree(self, contour_tree, negate=False): """ A helper function that will reduce duplication of data by reusing the parent contour tree's parameters and data """ if self.debug: tree_type = "Join" if negate: tree_type = "Split" sys.stdout.write("{} Tree Computation: ".format(tree_type)) start = time.clock() Y = contour_tree.Y if negate: Y = -Y self.__tree = MergeTreeFloat( vectorFloat(contour_tree.Xnorm.flatten()), vectorFloat(Y), str(contour_tree.gradient), contour_tree.graph_rep.full_graph(), self.debug, ) self._internal_build() if self.debug: end = time.clock() sys.stdout.write("%f s\n" % (end - start))
[ "def", "build_for_contour_tree", "(", "self", ",", "contour_tree", ",", "negate", "=", "False", ")", ":", "if", "self", ".", "debug", ":", "tree_type", "=", "\"Join\"", "if", "negate", ":", "tree_type", "=", "\"Split\"", "sys", ".", "stdout", ".", "write", "(", "\"{} Tree Computation: \"", ".", "format", "(", "tree_type", ")", ")", "start", "=", "time", ".", "clock", "(", ")", "Y", "=", "contour_tree", ".", "Y", "if", "negate", ":", "Y", "=", "-", "Y", "self", ".", "__tree", "=", "MergeTreeFloat", "(", "vectorFloat", "(", "contour_tree", ".", "Xnorm", ".", "flatten", "(", ")", ")", ",", "vectorFloat", "(", "Y", ")", ",", "str", "(", "contour_tree", ".", "gradient", ")", ",", "contour_tree", ".", "graph_rep", ".", "full_graph", "(", ")", ",", "self", ".", "debug", ",", ")", "self", ".", "_internal_build", "(", ")", "if", "self", ".", "debug", ":", "end", "=", "time", ".", "clock", "(", ")", "sys", ".", "stdout", ".", "write", "(", "\"%f s\\n\"", "%", "(", "end", "-", "start", ")", ")" ]
A helper function that will reduce duplication of data by reusing the parent contour tree's parameters and data
[ "A", "helper", "function", "that", "will", "reduce", "duplication", "of", "data", "by", "reusing", "the", "parent", "contour", "tree", "s", "parameters", "and", "data" ]
4be598d51c4e4043b73d4ad44beed6d289e2f088
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MergeTree.py#L135-L160
train
zsimic/runez
src/runez/context.py
verify_abort
def verify_abort(func, *args, **kwargs): """ Convenient wrapper around functions that should exit or raise an exception Example: assert "Can't create folder" in verify_abort(ensure_folder, "/dev/null/not-there") Args: func (callable): Function to execute *args: Args to pass to 'func' **kwargs: Named args to pass to 'func' Returns: (str): Chatter from call to 'func', if it did indeed raise """ expected_exception = kwargs.pop("expected_exception", runez.system.AbortException) with CaptureOutput() as logged: try: value = func(*args, **kwargs) assert False, "%s did not raise, but returned %s" % (func, value) except expected_exception: return str(logged)
python
def verify_abort(func, *args, **kwargs): """ Convenient wrapper around functions that should exit or raise an exception Example: assert "Can't create folder" in verify_abort(ensure_folder, "/dev/null/not-there") Args: func (callable): Function to execute *args: Args to pass to 'func' **kwargs: Named args to pass to 'func' Returns: (str): Chatter from call to 'func', if it did indeed raise """ expected_exception = kwargs.pop("expected_exception", runez.system.AbortException) with CaptureOutput() as logged: try: value = func(*args, **kwargs) assert False, "%s did not raise, but returned %s" % (func, value) except expected_exception: return str(logged)
[ "def", "verify_abort", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "expected_exception", "=", "kwargs", ".", "pop", "(", "\"expected_exception\"", ",", "runez", ".", "system", ".", "AbortException", ")", "with", "CaptureOutput", "(", ")", "as", "logged", ":", "try", ":", "value", "=", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "assert", "False", ",", "\"%s did not raise, but returned %s\"", "%", "(", "func", ",", "value", ")", "except", "expected_exception", ":", "return", "str", "(", "logged", ")" ]
Convenient wrapper around functions that should exit or raise an exception Example: assert "Can't create folder" in verify_abort(ensure_folder, "/dev/null/not-there") Args: func (callable): Function to execute *args: Args to pass to 'func' **kwargs: Named args to pass to 'func' Returns: (str): Chatter from call to 'func', if it did indeed raise
[ "Convenient", "wrapper", "around", "functions", "that", "should", "exit", "or", "raise", "an", "exception" ]
14363b719a1aae1528859a501a22d075ce0abfcc
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/context.py#L248-L270
train
zsimic/runez
src/runez/context.py
CapturedStream.pop
def pop(self, strip=False): """Current content popped, useful for testing""" r = self.contents() self.clear() if r and strip: r = r.strip() return r
python
def pop(self, strip=False): """Current content popped, useful for testing""" r = self.contents() self.clear() if r and strip: r = r.strip() return r
[ "def", "pop", "(", "self", ",", "strip", "=", "False", ")", ":", "r", "=", "self", ".", "contents", "(", ")", "self", ".", "clear", "(", ")", "if", "r", "and", "strip", ":", "r", "=", "r", ".", "strip", "(", ")", "return", "r" ]
Current content popped, useful for testing
[ "Current", "content", "popped", "useful", "for", "testing" ]
14363b719a1aae1528859a501a22d075ce0abfcc
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/context.py#L72-L78
train
mojaie/chorus
chorus/draw/svg.py
SVG.contents
def contents(self): """Get svg string """ c = self._header[:] c.append(' font-weight="{}"'.format(self.font_weight)) c.append(' font-family="{}"'.format(self.font_family)) c.append(' width="{}" height="{}"'.format(*self.screen_size)) sclw = self.original_size[0] * self.scale_factor sclh = self.original_size[1] * self.scale_factor longside = max([sclw, sclh]) width = round(longside + self.margin * 2, 2) height = round(longside + self.margin * 2, 2) xleft = round(-self.margin - (longside - sclw) / 2, 2) ytop = round(-self.margin - (longside - sclh) / 2, 2) c.append(' viewBox="{} {} {} {}">\n'.format( xleft, ytop, width, height)) if self.bgcolor is not None: c.append('<rect x="{}", y="{}" width="{}" height="{}" fill="{}" \ />\n'.format(xleft, ytop, width, height, self.bgcolor)) c.extend(self._elems) c.append("</svg>") return "".join(c)
python
def contents(self): """Get svg string """ c = self._header[:] c.append(' font-weight="{}"'.format(self.font_weight)) c.append(' font-family="{}"'.format(self.font_family)) c.append(' width="{}" height="{}"'.format(*self.screen_size)) sclw = self.original_size[0] * self.scale_factor sclh = self.original_size[1] * self.scale_factor longside = max([sclw, sclh]) width = round(longside + self.margin * 2, 2) height = round(longside + self.margin * 2, 2) xleft = round(-self.margin - (longside - sclw) / 2, 2) ytop = round(-self.margin - (longside - sclh) / 2, 2) c.append(' viewBox="{} {} {} {}">\n'.format( xleft, ytop, width, height)) if self.bgcolor is not None: c.append('<rect x="{}", y="{}" width="{}" height="{}" fill="{}" \ />\n'.format(xleft, ytop, width, height, self.bgcolor)) c.extend(self._elems) c.append("</svg>") return "".join(c)
[ "def", "contents", "(", "self", ")", ":", "c", "=", "self", ".", "_header", "[", ":", "]", "c", ".", "append", "(", "' font-weight=\"{}\"'", ".", "format", "(", "self", ".", "font_weight", ")", ")", "c", ".", "append", "(", "' font-family=\"{}\"'", ".", "format", "(", "self", ".", "font_family", ")", ")", "c", ".", "append", "(", "' width=\"{}\" height=\"{}\"'", ".", "format", "(", "*", "self", ".", "screen_size", ")", ")", "sclw", "=", "self", ".", "original_size", "[", "0", "]", "*", "self", ".", "scale_factor", "sclh", "=", "self", ".", "original_size", "[", "1", "]", "*", "self", ".", "scale_factor", "longside", "=", "max", "(", "[", "sclw", ",", "sclh", "]", ")", "width", "=", "round", "(", "longside", "+", "self", ".", "margin", "*", "2", ",", "2", ")", "height", "=", "round", "(", "longside", "+", "self", ".", "margin", "*", "2", ",", "2", ")", "xleft", "=", "round", "(", "-", "self", ".", "margin", "-", "(", "longside", "-", "sclw", ")", "/", "2", ",", "2", ")", "ytop", "=", "round", "(", "-", "self", ".", "margin", "-", "(", "longside", "-", "sclh", ")", "/", "2", ",", "2", ")", "c", ".", "append", "(", "' viewBox=\"{} {} {} {}\">\\n'", ".", "format", "(", "xleft", ",", "ytop", ",", "width", ",", "height", ")", ")", "if", "self", ".", "bgcolor", "is", "not", "None", ":", "c", ".", "append", "(", "'<rect x=\"{}\", y=\"{}\" width=\"{}\" height=\"{}\" fill=\"{}\" \\\n />\\n'", ".", "format", "(", "xleft", ",", "ytop", ",", "width", ",", "height", ",", "self", ".", "bgcolor", ")", ")", "c", ".", "extend", "(", "self", ".", "_elems", ")", "c", ".", "append", "(", "\"</svg>\"", ")", "return", "\"\"", ".", "join", "(", "c", ")" ]
Get svg string
[ "Get", "svg", "string" ]
fc7fe23a0272554c67671645ab07830b315eeb1b
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/draw/svg.py#L49-L70
train
mojaie/chorus
chorus/draw/svg.py
SVG.data_url_scheme
def data_url_scheme(self): """Get svg in Data URL Scheme format. """ # TODO: move to web.app or make it function # remove #svg from dataframe encoded = base64.b64encode(self.contents().encode()) return "data:image/svg+xml;base64," + encoded.decode()
python
def data_url_scheme(self): """Get svg in Data URL Scheme format. """ # TODO: move to web.app or make it function # remove #svg from dataframe encoded = base64.b64encode(self.contents().encode()) return "data:image/svg+xml;base64," + encoded.decode()
[ "def", "data_url_scheme", "(", "self", ")", ":", "# TODO: move to web.app or make it function", "# remove #svg from dataframe", "encoded", "=", "base64", ".", "b64encode", "(", "self", ".", "contents", "(", ")", ".", "encode", "(", ")", ")", "return", "\"data:image/svg+xml;base64,\"", "+", "encoded", ".", "decode", "(", ")" ]
Get svg in Data URL Scheme format.
[ "Get", "svg", "in", "Data", "URL", "Scheme", "format", "." ]
fc7fe23a0272554c67671645ab07830b315eeb1b
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/draw/svg.py#L72-L78
train
mojaie/chorus
chorus/draw/svg.py
SVG._coords_conv
def _coords_conv(self, pos): """For Svg coordinate system, reflect over X axis and translate from center to top-left """ px = (self.original_size[0] / 2 + pos[0]) * self.scale_factor py = (self.original_size[1] / 2 - pos[1]) * self.scale_factor return round(px, 2), round(py, 2)
python
def _coords_conv(self, pos): """For Svg coordinate system, reflect over X axis and translate from center to top-left """ px = (self.original_size[0] / 2 + pos[0]) * self.scale_factor py = (self.original_size[1] / 2 - pos[1]) * self.scale_factor return round(px, 2), round(py, 2)
[ "def", "_coords_conv", "(", "self", ",", "pos", ")", ":", "px", "=", "(", "self", ".", "original_size", "[", "0", "]", "/", "2", "+", "pos", "[", "0", "]", ")", "*", "self", ".", "scale_factor", "py", "=", "(", "self", ".", "original_size", "[", "1", "]", "/", "2", "-", "pos", "[", "1", "]", ")", "*", "self", ".", "scale_factor", "return", "round", "(", "px", ",", "2", ")", ",", "round", "(", "py", ",", "2", ")" ]
For Svg coordinate system, reflect over X axis and translate from center to top-left
[ "For", "Svg", "coordinate", "system", "reflect", "over", "X", "axis", "and", "translate", "from", "center", "to", "top", "-", "left" ]
fc7fe23a0272554c67671645ab07830b315eeb1b
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/draw/svg.py#L89-L95
train
mastro35/flows
flows/FlowsLogger.py
FlowsLogger.get_logger
def get_logger(self): """ Returns the standard logger """ if Global.LOGGER: Global.LOGGER.debug('configuring a logger') if self._logger_instance is not None: return self._logger_instance self._logger_instance = logging.getLogger("flowsLogger") self._logger_instance.setLevel(logging.DEBUG) log_format = '%(asctime)s - [%(levelname)s]|%(thread)d\t%(message)s' log_date_format = '%Y-%m-%d %H:%M:%S' formatter = logging.Formatter(log_format, log_date_format) new_log_stream_handler = logging.StreamHandler() new_log_stream_handler.setFormatter(formatter) new_log_stream_handler.setLevel(logging.INFO) self._logger_instance.addHandler(new_log_stream_handler) return self._logger_instance
python
def get_logger(self): """ Returns the standard logger """ if Global.LOGGER: Global.LOGGER.debug('configuring a logger') if self._logger_instance is not None: return self._logger_instance self._logger_instance = logging.getLogger("flowsLogger") self._logger_instance.setLevel(logging.DEBUG) log_format = '%(asctime)s - [%(levelname)s]|%(thread)d\t%(message)s' log_date_format = '%Y-%m-%d %H:%M:%S' formatter = logging.Formatter(log_format, log_date_format) new_log_stream_handler = logging.StreamHandler() new_log_stream_handler.setFormatter(formatter) new_log_stream_handler.setLevel(logging.INFO) self._logger_instance.addHandler(new_log_stream_handler) return self._logger_instance
[ "def", "get_logger", "(", "self", ")", ":", "if", "Global", ".", "LOGGER", ":", "Global", ".", "LOGGER", ".", "debug", "(", "'configuring a logger'", ")", "if", "self", ".", "_logger_instance", "is", "not", "None", ":", "return", "self", ".", "_logger_instance", "self", ".", "_logger_instance", "=", "logging", ".", "getLogger", "(", "\"flowsLogger\"", ")", "self", ".", "_logger_instance", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "log_format", "=", "'%(asctime)s - [%(levelname)s]|%(thread)d\\t%(message)s'", "log_date_format", "=", "'%Y-%m-%d %H:%M:%S'", "formatter", "=", "logging", ".", "Formatter", "(", "log_format", ",", "log_date_format", ")", "new_log_stream_handler", "=", "logging", ".", "StreamHandler", "(", ")", "new_log_stream_handler", ".", "setFormatter", "(", "formatter", ")", "new_log_stream_handler", ".", "setLevel", "(", "logging", ".", "INFO", ")", "self", ".", "_logger_instance", ".", "addHandler", "(", "new_log_stream_handler", ")", "return", "self", ".", "_logger_instance" ]
Returns the standard logger
[ "Returns", "the", "standard", "logger" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsLogger.py#L39-L61
train
mastro35/flows
flows/FlowsLogger.py
FlowsLogger.reconfigure_log_level
def reconfigure_log_level(self): """ Returns a new standard logger instance """ if Global.LOGGER: Global.LOGGER.debug('reconfiguring logger level') stream_handlers = filter(lambda x: type(x) is logging.StreamHandler, self._logger_instance.handlers) for x in stream_handlers: x.level = Global.CONFIG_MANAGER.log_level return self.get_logger()
python
def reconfigure_log_level(self): """ Returns a new standard logger instance """ if Global.LOGGER: Global.LOGGER.debug('reconfiguring logger level') stream_handlers = filter(lambda x: type(x) is logging.StreamHandler, self._logger_instance.handlers) for x in stream_handlers: x.level = Global.CONFIG_MANAGER.log_level return self.get_logger()
[ "def", "reconfigure_log_level", "(", "self", ")", ":", "if", "Global", ".", "LOGGER", ":", "Global", ".", "LOGGER", ".", "debug", "(", "'reconfiguring logger level'", ")", "stream_handlers", "=", "filter", "(", "lambda", "x", ":", "type", "(", "x", ")", "is", "logging", ".", "StreamHandler", ",", "self", ".", "_logger_instance", ".", "handlers", ")", "for", "x", "in", "stream_handlers", ":", "x", ".", "level", "=", "Global", ".", "CONFIG_MANAGER", ".", "log_level", "return", "self", ".", "get_logger", "(", ")" ]
Returns a new standard logger instance
[ "Returns", "a", "new", "standard", "logger", "instance" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsLogger.py#L63-L75
train
lsst-sqre/documenteer
documenteer/sphinxext/packagetoctree.py
_build_toctree_node
def _build_toctree_node(parent=None, entries=None, includefiles=None, caption=None): """Factory for a toctree node. """ # Add the toctree's node itself subnode = sphinx.addnodes.toctree() subnode['parent'] = parent subnode['entries'] = entries subnode['includefiles'] = includefiles subnode['caption'] = caption # These values are needed for toctree node types. We don't need/want # these to be configurable for module-toctree. subnode['maxdepth'] = 1 subnode['hidden'] = False subnode['glob'] = None subnode['hidden'] = False subnode['includehidden'] = False subnode['numbered'] = 0 subnode['titlesonly'] = False return subnode
python
def _build_toctree_node(parent=None, entries=None, includefiles=None, caption=None): """Factory for a toctree node. """ # Add the toctree's node itself subnode = sphinx.addnodes.toctree() subnode['parent'] = parent subnode['entries'] = entries subnode['includefiles'] = includefiles subnode['caption'] = caption # These values are needed for toctree node types. We don't need/want # these to be configurable for module-toctree. subnode['maxdepth'] = 1 subnode['hidden'] = False subnode['glob'] = None subnode['hidden'] = False subnode['includehidden'] = False subnode['numbered'] = 0 subnode['titlesonly'] = False return subnode
[ "def", "_build_toctree_node", "(", "parent", "=", "None", ",", "entries", "=", "None", ",", "includefiles", "=", "None", ",", "caption", "=", "None", ")", ":", "# Add the toctree's node itself", "subnode", "=", "sphinx", ".", "addnodes", ".", "toctree", "(", ")", "subnode", "[", "'parent'", "]", "=", "parent", "subnode", "[", "'entries'", "]", "=", "entries", "subnode", "[", "'includefiles'", "]", "=", "includefiles", "subnode", "[", "'caption'", "]", "=", "caption", "# These values are needed for toctree node types. We don't need/want", "# these to be configurable for module-toctree.", "subnode", "[", "'maxdepth'", "]", "=", "1", "subnode", "[", "'hidden'", "]", "=", "False", "subnode", "[", "'glob'", "]", "=", "None", "subnode", "[", "'hidden'", "]", "=", "False", "subnode", "[", "'includehidden'", "]", "=", "False", "subnode", "[", "'numbered'", "]", "=", "0", "subnode", "[", "'titlesonly'", "]", "=", "False", "return", "subnode" ]
Factory for a toctree node.
[ "Factory", "for", "a", "toctree", "node", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/packagetoctree.py#L228-L247
train
lsst-sqre/documenteer
documenteer/sphinxext/packagetoctree.py
ModuleTocTree._parse_skip_option
def _parse_skip_option(self): """Parse the ``skip`` option of skipped module names. """ try: skip_text = self.options['skip'] except KeyError: return [] modules = [module.strip() for module in skip_text.split(',')] return modules
python
def _parse_skip_option(self): """Parse the ``skip`` option of skipped module names. """ try: skip_text = self.options['skip'] except KeyError: return [] modules = [module.strip() for module in skip_text.split(',')] return modules
[ "def", "_parse_skip_option", "(", "self", ")", ":", "try", ":", "skip_text", "=", "self", ".", "options", "[", "'skip'", "]", "except", "KeyError", ":", "return", "[", "]", "modules", "=", "[", "module", ".", "strip", "(", ")", "for", "module", "in", "skip_text", ".", "split", "(", "','", ")", "]", "return", "modules" ]
Parse the ``skip`` option of skipped module names.
[ "Parse", "the", "skip", "option", "of", "skipped", "module", "names", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/packagetoctree.py#L94-L103
train
lsst-sqre/documenteer
documenteer/sphinxext/packagetoctree.py
PackageTocTree._parse_skip_option
def _parse_skip_option(self): """Parse the ``skip`` option of skipped package names. """ try: skip_text = self.options['skip'] except KeyError: return [] packages = [package.strip() for package in skip_text.split(',')] return packages
python
def _parse_skip_option(self): """Parse the ``skip`` option of skipped package names. """ try: skip_text = self.options['skip'] except KeyError: return [] packages = [package.strip() for package in skip_text.split(',')] return packages
[ "def", "_parse_skip_option", "(", "self", ")", ":", "try", ":", "skip_text", "=", "self", ".", "options", "[", "'skip'", "]", "except", "KeyError", ":", "return", "[", "]", "packages", "=", "[", "package", ".", "strip", "(", ")", "for", "package", "in", "skip_text", ".", "split", "(", "','", ")", "]", "return", "packages" ]
Parse the ``skip`` option of skipped package names.
[ "Parse", "the", "skip", "option", "of", "skipped", "package", "names", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/packagetoctree.py#L188-L197
train
mastro35/flows
flows/FlowsManager.py
FlowsManager._set_command_line_arguments
def _set_command_line_arguments(self, args): """ Set internal configuration variables according to the input parameters """ Global.LOGGER.debug("setting command line arguments") if args.VERBOSE: Global.LOGGER.debug("verbose mode active") Global.CONFIG_MANAGER.log_level = logging.DEBUG Global.LOGGER_INSTANCE.reconfigure_log_level() if args.STATS > 0: Global.LOGGER.debug(f"stats requested every {args.STATS} seconds") Global.CONFIG_MANAGER.show_stats = True Global.CONFIG_MANAGER.stats_timeout = args.STATS if args.INTERVAL > 0: Global.LOGGER.debug(f"setting sleep interval to {args.INTERVAL} milliseconds") Global.CONFIG_MANAGER.sleep_interval = float(args.INTERVAL)/1000 if args.TRACE: Global.LOGGER.debug("tracing mode active") Global.CONFIG_MANAGER.tracing_mode = True Global.CONFIG_MANAGER.log_level = logging.DEBUG Global.LOGGER_INSTANCE.reconfigure_log_level() if args.MESSAGEINTERVAL is not None and args.MESSAGEINTERVAL > 0: Global.LOGGER.debug(f"setting message fetcher sleep interval to {args.MESSAGEINTERVAL/10} milliseconds") Global.CONFIG_MANAGER.message_fetcher_sleep_interval = float(args.MESSAGEINTERVAL)/10000 Global.CONFIG_MANAGER.fixed_message_fetcher_interval = True Global.LOGGER.debug(f"recipes to be parsed: {args.FILENAME}") Global.CONFIG_MANAGER.recipes = (args.FILENAME)
python
def _set_command_line_arguments(self, args): """ Set internal configuration variables according to the input parameters """ Global.LOGGER.debug("setting command line arguments") if args.VERBOSE: Global.LOGGER.debug("verbose mode active") Global.CONFIG_MANAGER.log_level = logging.DEBUG Global.LOGGER_INSTANCE.reconfigure_log_level() if args.STATS > 0: Global.LOGGER.debug(f"stats requested every {args.STATS} seconds") Global.CONFIG_MANAGER.show_stats = True Global.CONFIG_MANAGER.stats_timeout = args.STATS if args.INTERVAL > 0: Global.LOGGER.debug(f"setting sleep interval to {args.INTERVAL} milliseconds") Global.CONFIG_MANAGER.sleep_interval = float(args.INTERVAL)/1000 if args.TRACE: Global.LOGGER.debug("tracing mode active") Global.CONFIG_MANAGER.tracing_mode = True Global.CONFIG_MANAGER.log_level = logging.DEBUG Global.LOGGER_INSTANCE.reconfigure_log_level() if args.MESSAGEINTERVAL is not None and args.MESSAGEINTERVAL > 0: Global.LOGGER.debug(f"setting message fetcher sleep interval to {args.MESSAGEINTERVAL/10} milliseconds") Global.CONFIG_MANAGER.message_fetcher_sleep_interval = float(args.MESSAGEINTERVAL)/10000 Global.CONFIG_MANAGER.fixed_message_fetcher_interval = True Global.LOGGER.debug(f"recipes to be parsed: {args.FILENAME}") Global.CONFIG_MANAGER.recipes = (args.FILENAME)
[ "def", "_set_command_line_arguments", "(", "self", ",", "args", ")", ":", "Global", ".", "LOGGER", ".", "debug", "(", "\"setting command line arguments\"", ")", "if", "args", ".", "VERBOSE", ":", "Global", ".", "LOGGER", ".", "debug", "(", "\"verbose mode active\"", ")", "Global", ".", "CONFIG_MANAGER", ".", "log_level", "=", "logging", ".", "DEBUG", "Global", ".", "LOGGER_INSTANCE", ".", "reconfigure_log_level", "(", ")", "if", "args", ".", "STATS", ">", "0", ":", "Global", ".", "LOGGER", ".", "debug", "(", "f\"stats requested every {args.STATS} seconds\"", ")", "Global", ".", "CONFIG_MANAGER", ".", "show_stats", "=", "True", "Global", ".", "CONFIG_MANAGER", ".", "stats_timeout", "=", "args", ".", "STATS", "if", "args", ".", "INTERVAL", ">", "0", ":", "Global", ".", "LOGGER", ".", "debug", "(", "f\"setting sleep interval to {args.INTERVAL} milliseconds\"", ")", "Global", ".", "CONFIG_MANAGER", ".", "sleep_interval", "=", "float", "(", "args", ".", "INTERVAL", ")", "/", "1000", "if", "args", ".", "TRACE", ":", "Global", ".", "LOGGER", ".", "debug", "(", "\"tracing mode active\"", ")", "Global", ".", "CONFIG_MANAGER", ".", "tracing_mode", "=", "True", "Global", ".", "CONFIG_MANAGER", ".", "log_level", "=", "logging", ".", "DEBUG", "Global", ".", "LOGGER_INSTANCE", ".", "reconfigure_log_level", "(", ")", "if", "args", ".", "MESSAGEINTERVAL", "is", "not", "None", "and", "args", ".", "MESSAGEINTERVAL", ">", "0", ":", "Global", ".", "LOGGER", ".", "debug", "(", "f\"setting message fetcher sleep interval to {args.MESSAGEINTERVAL/10} milliseconds\"", ")", "Global", ".", "CONFIG_MANAGER", ".", "message_fetcher_sleep_interval", "=", "float", "(", "args", ".", "MESSAGEINTERVAL", ")", "/", "10000", "Global", ".", "CONFIG_MANAGER", ".", "fixed_message_fetcher_interval", "=", "True", "Global", ".", "LOGGER", ".", "debug", "(", "f\"recipes to be parsed: {args.FILENAME}\"", ")", "Global", ".", "CONFIG_MANAGER", ".", "recipes", "=", "(", "args", ".", "FILENAME", ")" ]
Set internal configuration variables according to the input parameters
[ "Set", "internal", "configuration", "variables", "according", "to", "the", "input", "parameters" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L66-L99
train
mastro35/flows
flows/FlowsManager.py
FlowsManager.start
def start(self): """ Start all the processes """ Global.LOGGER.info("starting the flow manager") self._start_actions() self._start_message_fetcher() Global.LOGGER.debug("flow manager started")
python
def start(self): """ Start all the processes """ Global.LOGGER.info("starting the flow manager") self._start_actions() self._start_message_fetcher() Global.LOGGER.debug("flow manager started")
[ "def", "start", "(", "self", ")", ":", "Global", ".", "LOGGER", ".", "info", "(", "\"starting the flow manager\"", ")", "self", ".", "_start_actions", "(", ")", "self", ".", "_start_message_fetcher", "(", ")", "Global", ".", "LOGGER", ".", "debug", "(", "\"flow manager started\"", ")" ]
Start all the processes
[ "Start", "all", "the", "processes" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L102-L109
train
mastro35/flows
flows/FlowsManager.py
FlowsManager.stop
def stop(self): """ Stop all the processes """ Global.LOGGER.info("stopping the flow manager") self._stop_actions() self.isrunning = False Global.LOGGER.debug("flow manager stopped")
python
def stop(self): """ Stop all the processes """ Global.LOGGER.info("stopping the flow manager") self._stop_actions() self.isrunning = False Global.LOGGER.debug("flow manager stopped")
[ "def", "stop", "(", "self", ")", ":", "Global", ".", "LOGGER", ".", "info", "(", "\"stopping the flow manager\"", ")", "self", ".", "_stop_actions", "(", ")", "self", ".", "isrunning", "=", "False", "Global", ".", "LOGGER", ".", "debug", "(", "\"flow manager stopped\"", ")" ]
Stop all the processes
[ "Stop", "all", "the", "processes" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L111-L118
train
mastro35/flows
flows/FlowsManager.py
FlowsManager.restart
def restart(self): """ Restart all the processes """ Global.LOGGER.info("restarting the flow manager") self._stop_actions() # stop the old actions self.actions = [] # clear the action list self._start_actions() # start the configured actions Global.LOGGER.debug("flow manager restarted")
python
def restart(self): """ Restart all the processes """ Global.LOGGER.info("restarting the flow manager") self._stop_actions() # stop the old actions self.actions = [] # clear the action list self._start_actions() # start the configured actions Global.LOGGER.debug("flow manager restarted")
[ "def", "restart", "(", "self", ")", ":", "Global", ".", "LOGGER", ".", "info", "(", "\"restarting the flow manager\"", ")", "self", ".", "_stop_actions", "(", ")", "# stop the old actions", "self", ".", "actions", "=", "[", "]", "# clear the action list", "self", ".", "_start_actions", "(", ")", "# start the configured actions", "Global", ".", "LOGGER", ".", "debug", "(", "\"flow manager restarted\"", ")" ]
Restart all the processes
[ "Restart", "all", "the", "processes" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L120-L128
train
mastro35/flows
flows/FlowsManager.py
FlowsManager._start_actions
def _start_actions(self): """ Start all the actions for the recipes """ Global.LOGGER.info("starting actions") for recipe in Global.CONFIG_MANAGER.recipes: Global.CONFIG_MANAGER.read_recipe(recipe) list(map(lambda section: self._start_action_for_section( section), Global.CONFIG_MANAGER.sections))
python
def _start_actions(self): """ Start all the actions for the recipes """ Global.LOGGER.info("starting actions") for recipe in Global.CONFIG_MANAGER.recipes: Global.CONFIG_MANAGER.read_recipe(recipe) list(map(lambda section: self._start_action_for_section( section), Global.CONFIG_MANAGER.sections))
[ "def", "_start_actions", "(", "self", ")", ":", "Global", ".", "LOGGER", ".", "info", "(", "\"starting actions\"", ")", "for", "recipe", "in", "Global", ".", "CONFIG_MANAGER", ".", "recipes", ":", "Global", ".", "CONFIG_MANAGER", ".", "read_recipe", "(", "recipe", ")", "list", "(", "map", "(", "lambda", "section", ":", "self", ".", "_start_action_for_section", "(", "section", ")", ",", "Global", ".", "CONFIG_MANAGER", ".", "sections", ")", ")" ]
Start all the actions for the recipes
[ "Start", "all", "the", "actions", "for", "the", "recipes" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L130-L140
train
mastro35/flows
flows/FlowsManager.py
FlowsManager._start_action_for_section
def _start_action_for_section(self, section): """ Start all the actions for a particular section """ if section == "configuration": return Global.LOGGER.debug("starting actions for section " + section) # read the configuration of the action action_configuration = Global.CONFIG_MANAGER.sections[ section] if len(action_configuration) == 0: Global.LOGGER.warn(f"section {section} has no configuration, skipping") return action_type = None # action_input = None new_managed_input = [] if "type" in action_configuration: action_type = action_configuration["type"] if "input" in action_configuration: action_input = action_configuration["input"] new_managed_input = (item.strip() for item in action_input.split(",")) my_action = Action.create_action_for_code(action_type, section, action_configuration, list(new_managed_input)) if not my_action: Global.LOGGER.warn(f"can't find a type for action {section}, the action will be skipped") return self.actions.append(my_action) Global.LOGGER.debug("updating the subscriptions table") for my_input in my_action.monitored_input: self.subscriptions.setdefault( my_input, []).append(my_action)
python
def _start_action_for_section(self, section): """ Start all the actions for a particular section """ if section == "configuration": return Global.LOGGER.debug("starting actions for section " + section) # read the configuration of the action action_configuration = Global.CONFIG_MANAGER.sections[ section] if len(action_configuration) == 0: Global.LOGGER.warn(f"section {section} has no configuration, skipping") return action_type = None # action_input = None new_managed_input = [] if "type" in action_configuration: action_type = action_configuration["type"] if "input" in action_configuration: action_input = action_configuration["input"] new_managed_input = (item.strip() for item in action_input.split(",")) my_action = Action.create_action_for_code(action_type, section, action_configuration, list(new_managed_input)) if not my_action: Global.LOGGER.warn(f"can't find a type for action {section}, the action will be skipped") return self.actions.append(my_action) Global.LOGGER.debug("updating the subscriptions table") for my_input in my_action.monitored_input: self.subscriptions.setdefault( my_input, []).append(my_action)
[ "def", "_start_action_for_section", "(", "self", ",", "section", ")", ":", "if", "section", "==", "\"configuration\"", ":", "return", "Global", ".", "LOGGER", ".", "debug", "(", "\"starting actions for section \"", "+", "section", ")", "# read the configuration of the action", "action_configuration", "=", "Global", ".", "CONFIG_MANAGER", ".", "sections", "[", "section", "]", "if", "len", "(", "action_configuration", ")", "==", "0", ":", "Global", ".", "LOGGER", ".", "warn", "(", "f\"section {section} has no configuration, skipping\"", ")", "return", "action_type", "=", "None", "# action_input = None", "new_managed_input", "=", "[", "]", "if", "\"type\"", "in", "action_configuration", ":", "action_type", "=", "action_configuration", "[", "\"type\"", "]", "if", "\"input\"", "in", "action_configuration", ":", "action_input", "=", "action_configuration", "[", "\"input\"", "]", "new_managed_input", "=", "(", "item", ".", "strip", "(", ")", "for", "item", "in", "action_input", ".", "split", "(", "\",\"", ")", ")", "my_action", "=", "Action", ".", "create_action_for_code", "(", "action_type", ",", "section", ",", "action_configuration", ",", "list", "(", "new_managed_input", ")", ")", "if", "not", "my_action", ":", "Global", ".", "LOGGER", ".", "warn", "(", "f\"can't find a type for action {section}, the action will be skipped\"", ")", "return", "self", ".", "actions", ".", "append", "(", "my_action", ")", "Global", ".", "LOGGER", ".", "debug", "(", "\"updating the subscriptions table\"", ")", "for", "my_input", "in", "my_action", ".", "monitored_input", ":", "self", ".", "subscriptions", ".", "setdefault", "(", "my_input", ",", "[", "]", ")", ".", "append", "(", "my_action", ")" ]
Start all the actions for a particular section
[ "Start", "all", "the", "actions", "for", "a", "particular", "section" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L142-L185
train
mastro35/flows
flows/FlowsManager.py
FlowsManager._stop_actions
def _stop_actions(self): """ Stop all the actions """ Global.LOGGER.info("stopping actions") list(map(lambda x: x.stop(), self.actions)) Global.LOGGER.info("actions stopped")
python
def _stop_actions(self): """ Stop all the actions """ Global.LOGGER.info("stopping actions") list(map(lambda x: x.stop(), self.actions)) Global.LOGGER.info("actions stopped")
[ "def", "_stop_actions", "(", "self", ")", ":", "Global", ".", "LOGGER", ".", "info", "(", "\"stopping actions\"", ")", "list", "(", "map", "(", "lambda", "x", ":", "x", ".", "stop", "(", ")", ",", "self", ".", "actions", ")", ")", "Global", ".", "LOGGER", ".", "info", "(", "\"actions stopped\"", ")" ]
Stop all the actions
[ "Stop", "all", "the", "actions" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L187-L195
train
mastro35/flows
flows/FlowsManager.py
FlowsManager._perform_system_check
def _perform_system_check(self): """ Perform a system check to define if we need to throttle to handle all the incoming messages """ if Global.CONFIG_MANAGER.tracing_mode: Global.LOGGER.debug("performing a system check") now = datetime.datetime.now() sent = Global.MESSAGE_DISPATCHER.dispatched received = self.fetched queue_length = sent - received message_sleep_interval = Global.CONFIG_MANAGER.message_fetcher_sleep_interval if Global.CONFIG_MANAGER.show_stats: if (now - self.last_stats_check_date).total_seconds() > Global.CONFIG_MANAGER.stats_timeout: self.last_stats_check_date = now stats_string = f"showing stats\n--- [STATS] ---\nMessage Sent: {sent}\nMessage Received: {received}\nMessage Sleep Interval = {message_sleep_interval}\nQueue length = {queue_length}\n--- [ END ] ---" Global.LOGGER.info(stats_string) # if we are accumulating messages, or we have processed at least 5000 messages # since last check, we need to speed up the process messages_limit_reached = sent - self.last_queue_check_count > Global.CONFIG_MANAGER.messages_dispatched_for_system_check queue_limit_reached = queue_length > Global.CONFIG_MANAGER.queue_length_for_system_check time_limit_since_last_check_is_over = (now - self.last_queue_check_date).total_seconds() > Global.CONFIG_MANAGER.seconds_between_queue_check if not Global.CONFIG_MANAGER.fixed_message_fetcher_interval: if (messages_limit_reached) or (queue_limit_reached and time_limit_since_last_check_is_over): cause = "messages limit reached" if messages_limit_reached else "queue limit reached" Global.LOGGER.debug(f"triggering the throttle function due to {cause}") self._adapt_sleep_interval(sent, received, queue_length, now)
python
def _perform_system_check(self): """ Perform a system check to define if we need to throttle to handle all the incoming messages """ if Global.CONFIG_MANAGER.tracing_mode: Global.LOGGER.debug("performing a system check") now = datetime.datetime.now() sent = Global.MESSAGE_DISPATCHER.dispatched received = self.fetched queue_length = sent - received message_sleep_interval = Global.CONFIG_MANAGER.message_fetcher_sleep_interval if Global.CONFIG_MANAGER.show_stats: if (now - self.last_stats_check_date).total_seconds() > Global.CONFIG_MANAGER.stats_timeout: self.last_stats_check_date = now stats_string = f"showing stats\n--- [STATS] ---\nMessage Sent: {sent}\nMessage Received: {received}\nMessage Sleep Interval = {message_sleep_interval}\nQueue length = {queue_length}\n--- [ END ] ---" Global.LOGGER.info(stats_string) # if we are accumulating messages, or we have processed at least 5000 messages # since last check, we need to speed up the process messages_limit_reached = sent - self.last_queue_check_count > Global.CONFIG_MANAGER.messages_dispatched_for_system_check queue_limit_reached = queue_length > Global.CONFIG_MANAGER.queue_length_for_system_check time_limit_since_last_check_is_over = (now - self.last_queue_check_date).total_seconds() > Global.CONFIG_MANAGER.seconds_between_queue_check if not Global.CONFIG_MANAGER.fixed_message_fetcher_interval: if (messages_limit_reached) or (queue_limit_reached and time_limit_since_last_check_is_over): cause = "messages limit reached" if messages_limit_reached else "queue limit reached" Global.LOGGER.debug(f"triggering the throttle function due to {cause}") self._adapt_sleep_interval(sent, received, queue_length, now)
[ "def", "_perform_system_check", "(", "self", ")", ":", "if", "Global", ".", "CONFIG_MANAGER", ".", "tracing_mode", ":", "Global", ".", "LOGGER", ".", "debug", "(", "\"performing a system check\"", ")", "now", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "sent", "=", "Global", ".", "MESSAGE_DISPATCHER", ".", "dispatched", "received", "=", "self", ".", "fetched", "queue_length", "=", "sent", "-", "received", "message_sleep_interval", "=", "Global", ".", "CONFIG_MANAGER", ".", "message_fetcher_sleep_interval", "if", "Global", ".", "CONFIG_MANAGER", ".", "show_stats", ":", "if", "(", "now", "-", "self", ".", "last_stats_check_date", ")", ".", "total_seconds", "(", ")", ">", "Global", ".", "CONFIG_MANAGER", ".", "stats_timeout", ":", "self", ".", "last_stats_check_date", "=", "now", "stats_string", "=", "f\"showing stats\\n--- [STATS] ---\\nMessage Sent: {sent}\\nMessage Received: {received}\\nMessage Sleep Interval = {message_sleep_interval}\\nQueue length = {queue_length}\\n--- [ END ] ---\"", "Global", ".", "LOGGER", ".", "info", "(", "stats_string", ")", "# if we are accumulating messages, or we have processed at least 5000 messages", "# since last check, we need to speed up the process", "messages_limit_reached", "=", "sent", "-", "self", ".", "last_queue_check_count", ">", "Global", ".", "CONFIG_MANAGER", ".", "messages_dispatched_for_system_check", "queue_limit_reached", "=", "queue_length", ">", "Global", ".", "CONFIG_MANAGER", ".", "queue_length_for_system_check", "time_limit_since_last_check_is_over", "=", "(", "now", "-", "self", ".", "last_queue_check_date", ")", ".", "total_seconds", "(", ")", ">", "Global", ".", "CONFIG_MANAGER", ".", "seconds_between_queue_check", "if", "not", "Global", ".", "CONFIG_MANAGER", ".", "fixed_message_fetcher_interval", ":", "if", "(", "messages_limit_reached", ")", "or", "(", "queue_limit_reached", "and", "time_limit_since_last_check_is_over", ")", ":", "cause", "=", "\"messages limit reached\"", "if", "messages_limit_reached", "else", "\"queue limit reached\"", "Global", ".", "LOGGER", ".", "debug", "(", "f\"triggering the throttle function due to {cause}\"", ")", "self", ".", "_adapt_sleep_interval", "(", "sent", ",", "received", ",", "queue_length", ",", "now", ")" ]
Perform a system check to define if we need to throttle to handle all the incoming messages
[ "Perform", "a", "system", "check", "to", "define", "if", "we", "need", "to", "throttle", "to", "handle", "all", "the", "incoming", "messages" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L197-L227
train
mastro35/flows
flows/FlowsManager.py
FlowsManager._deliver_message
def _deliver_message(self, msg): """ Deliver the message to the subscripted actions """ my_subscribed_actions = self.subscriptions.get(msg.sender, []) for action in my_subscribed_actions: if Global.CONFIG_MANAGER.tracing_mode: Global.LOGGER.debug(f"delivering message to {action.name}") action.on_input_received(msg)
python
def _deliver_message(self, msg): """ Deliver the message to the subscripted actions """ my_subscribed_actions = self.subscriptions.get(msg.sender, []) for action in my_subscribed_actions: if Global.CONFIG_MANAGER.tracing_mode: Global.LOGGER.debug(f"delivering message to {action.name}") action.on_input_received(msg)
[ "def", "_deliver_message", "(", "self", ",", "msg", ")", ":", "my_subscribed_actions", "=", "self", ".", "subscriptions", ".", "get", "(", "msg", ".", "sender", ",", "[", "]", ")", "for", "action", "in", "my_subscribed_actions", ":", "if", "Global", ".", "CONFIG_MANAGER", ".", "tracing_mode", ":", "Global", ".", "LOGGER", ".", "debug", "(", "f\"delivering message to {action.name}\"", ")", "action", ".", "on_input_received", "(", "msg", ")" ]
Deliver the message to the subscripted actions
[ "Deliver", "the", "message", "to", "the", "subscripted", "actions" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L229-L237
train
mastro35/flows
flows/FlowsManager.py
FlowsManager._fetch_messages
def _fetch_messages(self): """ Get an input message from the socket """ try: [_, msg] = self.socket.recv_multipart(flags=zmq.NOBLOCK) if Global.CONFIG_MANAGER.tracing_mode: Global.LOGGER.debug("fetched a new message") self.fetched = self.fetched + 1 obj = pickle.loads(msg) self._deliver_message(obj) return obj except zmq.error.Again: return None except Exception as new_exception: Global.LOGGER.error(new_exception) raise new_exception
python
def _fetch_messages(self): """ Get an input message from the socket """ try: [_, msg] = self.socket.recv_multipart(flags=zmq.NOBLOCK) if Global.CONFIG_MANAGER.tracing_mode: Global.LOGGER.debug("fetched a new message") self.fetched = self.fetched + 1 obj = pickle.loads(msg) self._deliver_message(obj) return obj except zmq.error.Again: return None except Exception as new_exception: Global.LOGGER.error(new_exception) raise new_exception
[ "def", "_fetch_messages", "(", "self", ")", ":", "try", ":", "[", "_", ",", "msg", "]", "=", "self", ".", "socket", ".", "recv_multipart", "(", "flags", "=", "zmq", ".", "NOBLOCK", ")", "if", "Global", ".", "CONFIG_MANAGER", ".", "tracing_mode", ":", "Global", ".", "LOGGER", ".", "debug", "(", "\"fetched a new message\"", ")", "self", ".", "fetched", "=", "self", ".", "fetched", "+", "1", "obj", "=", "pickle", ".", "loads", "(", "msg", ")", "self", ".", "_deliver_message", "(", "obj", ")", "return", "obj", "except", "zmq", ".", "error", ".", "Again", ":", "return", "None", "except", "Exception", "as", "new_exception", ":", "Global", ".", "LOGGER", ".", "error", "(", "new_exception", ")", "raise", "new_exception" ]
Get an input message from the socket
[ "Get", "an", "input", "message", "from", "the", "socket" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L239-L256
train
mastro35/flows
flows/FlowsManager.py
FlowsManager.message_fetcher_coroutine
async def message_fetcher_coroutine(self, loop): """ Register callback for message fetcher coroutines """ Global.LOGGER.debug('registering callbacks for message fetcher coroutine') self.isrunning = True while self.isrunning: loop.call_soon(self._fetch_messages) loop.call_soon(self._perform_system_check) await asyncio.sleep(Global.CONFIG_MANAGER.message_fetcher_sleep_interval) Global.LOGGER.debug('message fetcher stopped')
python
async def message_fetcher_coroutine(self, loop): """ Register callback for message fetcher coroutines """ Global.LOGGER.debug('registering callbacks for message fetcher coroutine') self.isrunning = True while self.isrunning: loop.call_soon(self._fetch_messages) loop.call_soon(self._perform_system_check) await asyncio.sleep(Global.CONFIG_MANAGER.message_fetcher_sleep_interval) Global.LOGGER.debug('message fetcher stopped')
[ "async", "def", "message_fetcher_coroutine", "(", "self", ",", "loop", ")", ":", "Global", ".", "LOGGER", ".", "debug", "(", "'registering callbacks for message fetcher coroutine'", ")", "self", ".", "isrunning", "=", "True", "while", "self", ".", "isrunning", ":", "loop", ".", "call_soon", "(", "self", ".", "_fetch_messages", ")", "loop", ".", "call_soon", "(", "self", ".", "_perform_system_check", ")", "await", "asyncio", ".", "sleep", "(", "Global", ".", "CONFIG_MANAGER", ".", "message_fetcher_sleep_interval", ")", "Global", ".", "LOGGER", ".", "debug", "(", "'message fetcher stopped'", ")" ]
Register callback for message fetcher coroutines
[ "Register", "callback", "for", "message", "fetcher", "coroutines" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L258-L269
train
mastro35/flows
flows/FlowsManager.py
FlowsManager._adapt_sleep_interval
def _adapt_sleep_interval(self, sent, received, queue, now): """ Adapt sleep time based on the number of the messages in queue """ Global.LOGGER.debug("adjusting sleep interval") dispatched_since_last_check = sent - self.last_queue_check_count seconds_since_last_check = ( now - self.last_queue_check_date).total_seconds() Global.LOGGER.debug( str(dispatched_since_last_check) + " dispatched in the last " + str(seconds_since_last_check)) sleep_time = (seconds_since_last_check / (dispatched_since_last_check + queue + 1)) * 0.75 if sleep_time > 0.5: sleep_time = 0.5 if sleep_time < 0.0001: sleep_time = 0.0001 self.last_queue_check_date = now self.last_queue_check_count = sent Global.CONFIG_MANAGER.message_fetcher_sleep_interval = sleep_time sleep_interval_log_string = f"new sleep_interval = {sleep_time}" Global.LOGGER.debug(sleep_interval_log_string) if Global.CONFIG_MANAGER.show_stats: Global.LOGGER.info(sleep_interval_log_string)
python
def _adapt_sleep_interval(self, sent, received, queue, now): """ Adapt sleep time based on the number of the messages in queue """ Global.LOGGER.debug("adjusting sleep interval") dispatched_since_last_check = sent - self.last_queue_check_count seconds_since_last_check = ( now - self.last_queue_check_date).total_seconds() Global.LOGGER.debug( str(dispatched_since_last_check) + " dispatched in the last " + str(seconds_since_last_check)) sleep_time = (seconds_since_last_check / (dispatched_since_last_check + queue + 1)) * 0.75 if sleep_time > 0.5: sleep_time = 0.5 if sleep_time < 0.0001: sleep_time = 0.0001 self.last_queue_check_date = now self.last_queue_check_count = sent Global.CONFIG_MANAGER.message_fetcher_sleep_interval = sleep_time sleep_interval_log_string = f"new sleep_interval = {sleep_time}" Global.LOGGER.debug(sleep_interval_log_string) if Global.CONFIG_MANAGER.show_stats: Global.LOGGER.info(sleep_interval_log_string)
[ "def", "_adapt_sleep_interval", "(", "self", ",", "sent", ",", "received", ",", "queue", ",", "now", ")", ":", "Global", ".", "LOGGER", ".", "debug", "(", "\"adjusting sleep interval\"", ")", "dispatched_since_last_check", "=", "sent", "-", "self", ".", "last_queue_check_count", "seconds_since_last_check", "=", "(", "now", "-", "self", ".", "last_queue_check_date", ")", ".", "total_seconds", "(", ")", "Global", ".", "LOGGER", ".", "debug", "(", "str", "(", "dispatched_since_last_check", ")", "+", "\" dispatched in the last \"", "+", "str", "(", "seconds_since_last_check", ")", ")", "sleep_time", "=", "(", "seconds_since_last_check", "/", "(", "dispatched_since_last_check", "+", "queue", "+", "1", ")", ")", "*", "0.75", "if", "sleep_time", ">", "0.5", ":", "sleep_time", "=", "0.5", "if", "sleep_time", "<", "0.0001", ":", "sleep_time", "=", "0.0001", "self", ".", "last_queue_check_date", "=", "now", "self", ".", "last_queue_check_count", "=", "sent", "Global", ".", "CONFIG_MANAGER", ".", "message_fetcher_sleep_interval", "=", "sleep_time", "sleep_interval_log_string", "=", "f\"new sleep_interval = {sleep_time}\"", "Global", ".", "LOGGER", ".", "debug", "(", "sleep_interval_log_string", ")", "if", "Global", ".", "CONFIG_MANAGER", ".", "show_stats", ":", "Global", ".", "LOGGER", ".", "info", "(", "sleep_interval_log_string", ")" ]
Adapt sleep time based on the number of the messages in queue
[ "Adapt", "sleep", "time", "based", "on", "the", "number", "of", "the", "messages", "in", "queue" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L284-L314
train
mastro35/flows
flows/FlowsManager.py
FlowsManager._parse_input_parameters
def _parse_input_parameters(self): """ Set the configuration for the Logger """ Global.LOGGER.debug("define and parsing command line arguments") parser = argparse.ArgumentParser( description='A workflow engine for Pythonistas', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('FILENAME', nargs='+',help='name of the recipe file(s)') parser.add_argument('-i', '--INTERVAL', type=int, default=500, metavar=('MS'), help='perform a cycle each [MS] milliseconds. (default = 500)') parser.add_argument('-m', '--MESSAGEINTERVAL', type=int, metavar=('X'), help='dequeue a message each [X] tenth of milliseconds. (default = auto)') parser.add_argument('-s', '--STATS', type=int, default=0, metavar=('SEC'), help='show stats each [SEC] seconds. (default = NO STATS)') parser.add_argument('-t', '--TRACE', action='store_true',help='enable super verbose output, only useful for tracing') parser.add_argument('-v', '--VERBOSE', action='store_true',help='enable verbose output') parser.add_argument('-V', '--VERSION', action="version", version=__version__) args = parser.parse_args() return args
python
def _parse_input_parameters(self): """ Set the configuration for the Logger """ Global.LOGGER.debug("define and parsing command line arguments") parser = argparse.ArgumentParser( description='A workflow engine for Pythonistas', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('FILENAME', nargs='+',help='name of the recipe file(s)') parser.add_argument('-i', '--INTERVAL', type=int, default=500, metavar=('MS'), help='perform a cycle each [MS] milliseconds. (default = 500)') parser.add_argument('-m', '--MESSAGEINTERVAL', type=int, metavar=('X'), help='dequeue a message each [X] tenth of milliseconds. (default = auto)') parser.add_argument('-s', '--STATS', type=int, default=0, metavar=('SEC'), help='show stats each [SEC] seconds. (default = NO STATS)') parser.add_argument('-t', '--TRACE', action='store_true',help='enable super verbose output, only useful for tracing') parser.add_argument('-v', '--VERBOSE', action='store_true',help='enable verbose output') parser.add_argument('-V', '--VERSION', action="version", version=__version__) args = parser.parse_args() return args
[ "def", "_parse_input_parameters", "(", "self", ")", ":", "Global", ".", "LOGGER", ".", "debug", "(", "\"define and parsing command line arguments\"", ")", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'A workflow engine for Pythonistas'", ",", "formatter_class", "=", "argparse", ".", "RawTextHelpFormatter", ")", "parser", ".", "add_argument", "(", "'FILENAME'", ",", "nargs", "=", "'+'", ",", "help", "=", "'name of the recipe file(s)'", ")", "parser", ".", "add_argument", "(", "'-i'", ",", "'--INTERVAL'", ",", "type", "=", "int", ",", "default", "=", "500", ",", "metavar", "=", "(", "'MS'", ")", ",", "help", "=", "'perform a cycle each [MS] milliseconds. (default = 500)'", ")", "parser", ".", "add_argument", "(", "'-m'", ",", "'--MESSAGEINTERVAL'", ",", "type", "=", "int", ",", "metavar", "=", "(", "'X'", ")", ",", "help", "=", "'dequeue a message each [X] tenth of milliseconds. (default = auto)'", ")", "parser", ".", "add_argument", "(", "'-s'", ",", "'--STATS'", ",", "type", "=", "int", ",", "default", "=", "0", ",", "metavar", "=", "(", "'SEC'", ")", ",", "help", "=", "'show stats each [SEC] seconds. (default = NO STATS)'", ")", "parser", ".", "add_argument", "(", "'-t'", ",", "'--TRACE'", ",", "action", "=", "'store_true'", ",", "help", "=", "'enable super verbose output, only useful for tracing'", ")", "parser", ".", "add_argument", "(", "'-v'", ",", "'--VERBOSE'", ",", "action", "=", "'store_true'", ",", "help", "=", "'enable verbose output'", ")", "parser", ".", "add_argument", "(", "'-V'", ",", "'--VERSION'", ",", "action", "=", "\"version\"", ",", "version", "=", "__version__", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "return", "args" ]
Set the configuration for the Logger
[ "Set", "the", "configuration", "for", "the", "Logger" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/FlowsManager.py#L316-L340
train
mesbahamin/chronophore
scripts/chronophore_migrate.py
migrate_050_to_051
def migrate_050_to_051(session): """Set time_out field of all flagged timesheet entries to Null. """ entries_to_update = session.query(Entry).filter( Entry.forgot_sign_out.is_(True)).filter( Entry.time_out.isnot(None)) for entry in entries_to_update: entry.time_out = None logging.info('Entry updated {}'.format(entry.uuid)) logging.debug(entry.uuid) session.add(entry)
python
def migrate_050_to_051(session): """Set time_out field of all flagged timesheet entries to Null. """ entries_to_update = session.query(Entry).filter( Entry.forgot_sign_out.is_(True)).filter( Entry.time_out.isnot(None)) for entry in entries_to_update: entry.time_out = None logging.info('Entry updated {}'.format(entry.uuid)) logging.debug(entry.uuid) session.add(entry)
[ "def", "migrate_050_to_051", "(", "session", ")", ":", "entries_to_update", "=", "session", ".", "query", "(", "Entry", ")", ".", "filter", "(", "Entry", ".", "forgot_sign_out", ".", "is_", "(", "True", ")", ")", ".", "filter", "(", "Entry", ".", "time_out", ".", "isnot", "(", "None", ")", ")", "for", "entry", "in", "entries_to_update", ":", "entry", ".", "time_out", "=", "None", "logging", ".", "info", "(", "'Entry updated {}'", ".", "format", "(", "entry", ".", "uuid", ")", ")", "logging", ".", "debug", "(", "entry", ".", "uuid", ")", "session", ".", "add", "(", "entry", ")" ]
Set time_out field of all flagged timesheet entries to Null.
[ "Set", "time_out", "field", "of", "all", "flagged", "timesheet", "entries", "to", "Null", "." ]
ee140c61b4dfada966f078de8304bac737cec6f7
https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/scripts/chronophore_migrate.py#L17-L29
train
ehansis/ozelot
ozelot/etl/tasks.py
get_task_param_string
def get_task_param_string(task): """Get all parameters of a task as one string Returns: str: task parameter string """ # get dict str -> str from luigi param_dict = task.to_str_params() # sort keys, serialize items = [] for key in sorted(param_dict.keys()): items.append("'{:s}': '{:s}'".format(key, param_dict[key])) return "{" + ", ".join(items) + "}"
python
def get_task_param_string(task): """Get all parameters of a task as one string Returns: str: task parameter string """ # get dict str -> str from luigi param_dict = task.to_str_params() # sort keys, serialize items = [] for key in sorted(param_dict.keys()): items.append("'{:s}': '{:s}'".format(key, param_dict[key])) return "{" + ", ".join(items) + "}"
[ "def", "get_task_param_string", "(", "task", ")", ":", "# get dict str -> str from luigi", "param_dict", "=", "task", ".", "to_str_params", "(", ")", "# sort keys, serialize", "items", "=", "[", "]", "for", "key", "in", "sorted", "(", "param_dict", ".", "keys", "(", ")", ")", ":", "items", ".", "append", "(", "\"'{:s}': '{:s}'\"", ".", "format", "(", "key", ",", "param_dict", "[", "key", "]", ")", ")", "return", "\"{\"", "+", "\", \"", ".", "join", "(", "items", ")", "+", "\"}\"" ]
Get all parameters of a task as one string Returns: str: task parameter string
[ "Get", "all", "parameters", "of", "a", "task", "as", "one", "string" ]
948675e02eb6fca940450f5cb814f53e97159e5b
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/etl/tasks.py#L18-L32
train
ehansis/ozelot
ozelot/etl/tasks.py
check_completion
def check_completion(task, mark_incomplete=False, clear=False, return_stats=False): """Recursively check if a task and all its requirements are complete Args: task (derived from luigi.Task): Task to check completion for; check everything 'downstream' from that task. mark_incomplete (bool): If ``True`` set any task as incomplete for which a requirement is found to be incomplete (checked recursively). This works only for tasks derived from :class:`ORMTask`. clear (bool): If ``True``, call the :func:`clear()` method of any task for which a requirement is found to be incomplete (checked recursively). This implies ``mark_incomplete = True``. This works only for tasks derived from :class:`ORMTask`. return_stats (bool): If ``True``, return task checking statistics in addition to completion status Returns: bool: ``True`` if the task, all its requirements and (recursively) all their requirements are complete, ``False`` otherwise. """ # run recursive task checking, get stats to_clear = dict() is_complete, stats = _check_completion(task, mark_incomplete=mark_incomplete, clear=clear, stats={}, visited=dict(), to_clear=to_clear) # task clearing needs to happen top-down: because of foreign key constraints, a task can # only be cleared once all tasks that require it have been cleared while to_clear: # find all tasks that we can currently clear - tasks not required by other tasks; # iterate over list of keys to be able to modify dict while iterating found_clearable_task = False for task_id in list(to_clear.keys()): v = to_clear[task_id] if not v['required_by']: # this is a task that can be cleared - no other task requires it found_clearable_task = True task = v['task'] if isinstance(task, ORMTask): task.mark_incomplete() task.clear() _increment_stats(stats, 'Cleared') config.logger.info("Cleared task: " + task_id) else: config.logger.info('Cannot clear task, not an ORMTask: ' + task_id) # remove the task from the list of tasks that need clearing, remove references # in the required_by lists of all other tasks; this is not an efficient implementation, # O(n^2), could be made O(n) using lookup tables of the task graph del to_clear[task_id] for w in to_clear.values(): w['required_by'].discard(task_id) if not found_clearable_task: raise RuntimeError("Error in recursive task clearing, no clearable task found") config.logger.info("Task completion checking, summary:\n" + str(stats)) if return_stats: return is_complete, stats else: return is_complete
python
def check_completion(task, mark_incomplete=False, clear=False, return_stats=False): """Recursively check if a task and all its requirements are complete Args: task (derived from luigi.Task): Task to check completion for; check everything 'downstream' from that task. mark_incomplete (bool): If ``True`` set any task as incomplete for which a requirement is found to be incomplete (checked recursively). This works only for tasks derived from :class:`ORMTask`. clear (bool): If ``True``, call the :func:`clear()` method of any task for which a requirement is found to be incomplete (checked recursively). This implies ``mark_incomplete = True``. This works only for tasks derived from :class:`ORMTask`. return_stats (bool): If ``True``, return task checking statistics in addition to completion status Returns: bool: ``True`` if the task, all its requirements and (recursively) all their requirements are complete, ``False`` otherwise. """ # run recursive task checking, get stats to_clear = dict() is_complete, stats = _check_completion(task, mark_incomplete=mark_incomplete, clear=clear, stats={}, visited=dict(), to_clear=to_clear) # task clearing needs to happen top-down: because of foreign key constraints, a task can # only be cleared once all tasks that require it have been cleared while to_clear: # find all tasks that we can currently clear - tasks not required by other tasks; # iterate over list of keys to be able to modify dict while iterating found_clearable_task = False for task_id in list(to_clear.keys()): v = to_clear[task_id] if not v['required_by']: # this is a task that can be cleared - no other task requires it found_clearable_task = True task = v['task'] if isinstance(task, ORMTask): task.mark_incomplete() task.clear() _increment_stats(stats, 'Cleared') config.logger.info("Cleared task: " + task_id) else: config.logger.info('Cannot clear task, not an ORMTask: ' + task_id) # remove the task from the list of tasks that need clearing, remove references # in the required_by lists of all other tasks; this is not an efficient implementation, # O(n^2), could be made O(n) using lookup tables of the task graph del to_clear[task_id] for w in to_clear.values(): w['required_by'].discard(task_id) if not found_clearable_task: raise RuntimeError("Error in recursive task clearing, no clearable task found") config.logger.info("Task completion checking, summary:\n" + str(stats)) if return_stats: return is_complete, stats else: return is_complete
[ "def", "check_completion", "(", "task", ",", "mark_incomplete", "=", "False", ",", "clear", "=", "False", ",", "return_stats", "=", "False", ")", ":", "# run recursive task checking, get stats", "to_clear", "=", "dict", "(", ")", "is_complete", ",", "stats", "=", "_check_completion", "(", "task", ",", "mark_incomplete", "=", "mark_incomplete", ",", "clear", "=", "clear", ",", "stats", "=", "{", "}", ",", "visited", "=", "dict", "(", ")", ",", "to_clear", "=", "to_clear", ")", "# task clearing needs to happen top-down: because of foreign key constraints, a task can", "# only be cleared once all tasks that require it have been cleared", "while", "to_clear", ":", "# find all tasks that we can currently clear - tasks not required by other tasks;", "# iterate over list of keys to be able to modify dict while iterating", "found_clearable_task", "=", "False", "for", "task_id", "in", "list", "(", "to_clear", ".", "keys", "(", ")", ")", ":", "v", "=", "to_clear", "[", "task_id", "]", "if", "not", "v", "[", "'required_by'", "]", ":", "# this is a task that can be cleared - no other task requires it", "found_clearable_task", "=", "True", "task", "=", "v", "[", "'task'", "]", "if", "isinstance", "(", "task", ",", "ORMTask", ")", ":", "task", ".", "mark_incomplete", "(", ")", "task", ".", "clear", "(", ")", "_increment_stats", "(", "stats", ",", "'Cleared'", ")", "config", ".", "logger", ".", "info", "(", "\"Cleared task: \"", "+", "task_id", ")", "else", ":", "config", ".", "logger", ".", "info", "(", "'Cannot clear task, not an ORMTask: '", "+", "task_id", ")", "# remove the task from the list of tasks that need clearing, remove references", "# in the required_by lists of all other tasks; this is not an efficient implementation,", "# O(n^2), could be made O(n) using lookup tables of the task graph", "del", "to_clear", "[", "task_id", "]", "for", "w", "in", "to_clear", ".", "values", "(", ")", ":", "w", "[", "'required_by'", "]", ".", "discard", "(", "task_id", ")", "if", "not", "found_clearable_task", ":", "raise", "RuntimeError", "(", "\"Error in recursive task clearing, no clearable task found\"", ")", "config", ".", "logger", ".", "info", "(", "\"Task completion checking, summary:\\n\"", "+", "str", "(", "stats", ")", ")", "if", "return_stats", ":", "return", "is_complete", ",", "stats", "else", ":", "return", "is_complete" ]
Recursively check if a task and all its requirements are complete Args: task (derived from luigi.Task): Task to check completion for; check everything 'downstream' from that task. mark_incomplete (bool): If ``True`` set any task as incomplete for which a requirement is found to be incomplete (checked recursively). This works only for tasks derived from :class:`ORMTask`. clear (bool): If ``True``, call the :func:`clear()` method of any task for which a requirement is found to be incomplete (checked recursively). This implies ``mark_incomplete = True``. This works only for tasks derived from :class:`ORMTask`. return_stats (bool): If ``True``, return task checking statistics in addition to completion status Returns: bool: ``True`` if the task, all its requirements and (recursively) all their requirements are complete, ``False`` otherwise.
[ "Recursively", "check", "if", "a", "task", "and", "all", "its", "requirements", "are", "complete" ]
948675e02eb6fca940450f5cb814f53e97159e5b
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/etl/tasks.py#L191-L257
train
ehansis/ozelot
ozelot/etl/tasks.py
TaskBase.build
def build(cls, local_scheduler=True, **task_params): """Instantiate the task and build it with luigi Args: local_scheduler (bool): use a local scheduler (True, default) or a remote scheduler task_params: parameters to pass to task for instantiation """ luigi.build([cls(**task_params)], local_scheduler=local_scheduler)
python
def build(cls, local_scheduler=True, **task_params): """Instantiate the task and build it with luigi Args: local_scheduler (bool): use a local scheduler (True, default) or a remote scheduler task_params: parameters to pass to task for instantiation """ luigi.build([cls(**task_params)], local_scheduler=local_scheduler)
[ "def", "build", "(", "cls", ",", "local_scheduler", "=", "True", ",", "*", "*", "task_params", ")", ":", "luigi", ".", "build", "(", "[", "cls", "(", "*", "*", "task_params", ")", "]", ",", "local_scheduler", "=", "local_scheduler", ")" ]
Instantiate the task and build it with luigi Args: local_scheduler (bool): use a local scheduler (True, default) or a remote scheduler task_params: parameters to pass to task for instantiation
[ "Instantiate", "the", "task", "and", "build", "it", "with", "luigi" ]
948675e02eb6fca940450f5cb814f53e97159e5b
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/etl/tasks.py#L63-L70
train
ehansis/ozelot
ozelot/etl/tasks.py
ORMObjectCreatorMixin.clear
def clear( self # type: ORMTask ): """Delete all objects created by this task. Iterate over `self.object_classes` and delete all objects of the listed classes. """ # mark this task as incomplete self.mark_incomplete() # delete objects for object_class in self.object_classes: self.session.query(object_class).delete() self.close_session()
python
def clear( self # type: ORMTask ): """Delete all objects created by this task. Iterate over `self.object_classes` and delete all objects of the listed classes. """ # mark this task as incomplete self.mark_incomplete() # delete objects for object_class in self.object_classes: self.session.query(object_class).delete() self.close_session()
[ "def", "clear", "(", "self", "# type: ORMTask", ")", ":", "# mark this task as incomplete", "self", ".", "mark_incomplete", "(", ")", "# delete objects", "for", "object_class", "in", "self", ".", "object_classes", ":", "self", ".", "session", ".", "query", "(", "object_class", ")", ".", "delete", "(", ")", "self", ".", "close_session", "(", ")" ]
Delete all objects created by this task. Iterate over `self.object_classes` and delete all objects of the listed classes.
[ "Delete", "all", "objects", "created", "by", "this", "task", "." ]
948675e02eb6fca940450f5cb814f53e97159e5b
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/etl/tasks.py#L347-L361
train
ehansis/ozelot
ozelot/etl/tasks.py
ORMWrapperTask.complete
def complete(self): """Task is complete if completion marker is set and all requirements are complete """ is_complete = super(ORMWrapperTask, self).complete() for req in self.requires(): is_complete &= req.complete() return is_complete
python
def complete(self): """Task is complete if completion marker is set and all requirements are complete """ is_complete = super(ORMWrapperTask, self).complete() for req in self.requires(): is_complete &= req.complete() return is_complete
[ "def", "complete", "(", "self", ")", ":", "is_complete", "=", "super", "(", "ORMWrapperTask", ",", "self", ")", ".", "complete", "(", ")", "for", "req", "in", "self", ".", "requires", "(", ")", ":", "is_complete", "&=", "req", ".", "complete", "(", ")", "return", "is_complete" ]
Task is complete if completion marker is set and all requirements are complete
[ "Task", "is", "complete", "if", "completion", "marker", "is", "set", "and", "all", "requirements", "are", "complete" ]
948675e02eb6fca940450f5cb814f53e97159e5b
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/etl/tasks.py#L390-L397
train
maljovec/topopy
topopy/MorseSmaleComplex.py
MorseSmaleComplex.save
def save(self, filename=None): """ Saves a constructed Morse-Smale Complex in json file @ In, filename, a filename for storing the hierarchical merging of features and the base level partitions of the data """ if filename is None: filename = "morse_smale_complex.json" with open(filename, "w") as fp: fp.write(self.to_json())
python
def save(self, filename=None): """ Saves a constructed Morse-Smale Complex in json file @ In, filename, a filename for storing the hierarchical merging of features and the base level partitions of the data """ if filename is None: filename = "morse_smale_complex.json" with open(filename, "w") as fp: fp.write(self.to_json())
[ "def", "save", "(", "self", ",", "filename", "=", "None", ")", ":", "if", "filename", "is", "None", ":", "filename", "=", "\"morse_smale_complex.json\"", "with", "open", "(", "filename", ",", "\"w\"", ")", "as", "fp", ":", "fp", ".", "write", "(", "self", ".", "to_json", "(", ")", ")" ]
Saves a constructed Morse-Smale Complex in json file @ In, filename, a filename for storing the hierarchical merging of features and the base level partitions of the data
[ "Saves", "a", "constructed", "Morse", "-", "Smale", "Complex", "in", "json", "file" ]
4be598d51c4e4043b73d4ad44beed6d289e2f088
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MorseSmaleComplex.py#L159-L168
train
maljovec/topopy
topopy/MorseSmaleComplex.py
MorseSmaleComplex.get_label
def get_label(self, indices=None): """ Returns the label pair indices requested by the user @ In, indices, a list of non-negative integers specifying the row indices to return @ Out, a list of integer 2-tuples specifying the minimum and maximum index of the specified rows. """ if indices is None: indices = list(range(0, self.get_sample_size())) elif isinstance(indices, collections.Iterable): indices = sorted(list(set(indices))) else: indices = [indices] if len(indices) == 0: return [] partitions = self.get_partitions(self.persistence) labels = self.X.shape[0] * [None] for label, partition_indices in partitions.items(): for idx in np.intersect1d(partition_indices, indices): labels[idx] = label labels = np.array(labels) if len(indices) == 1: return labels[indices][0] return labels[indices]
python
def get_label(self, indices=None): """ Returns the label pair indices requested by the user @ In, indices, a list of non-negative integers specifying the row indices to return @ Out, a list of integer 2-tuples specifying the minimum and maximum index of the specified rows. """ if indices is None: indices = list(range(0, self.get_sample_size())) elif isinstance(indices, collections.Iterable): indices = sorted(list(set(indices))) else: indices = [indices] if len(indices) == 0: return [] partitions = self.get_partitions(self.persistence) labels = self.X.shape[0] * [None] for label, partition_indices in partitions.items(): for idx in np.intersect1d(partition_indices, indices): labels[idx] = label labels = np.array(labels) if len(indices) == 1: return labels[indices][0] return labels[indices]
[ "def", "get_label", "(", "self", ",", "indices", "=", "None", ")", ":", "if", "indices", "is", "None", ":", "indices", "=", "list", "(", "range", "(", "0", ",", "self", ".", "get_sample_size", "(", ")", ")", ")", "elif", "isinstance", "(", "indices", ",", "collections", ".", "Iterable", ")", ":", "indices", "=", "sorted", "(", "list", "(", "set", "(", "indices", ")", ")", ")", "else", ":", "indices", "=", "[", "indices", "]", "if", "len", "(", "indices", ")", "==", "0", ":", "return", "[", "]", "partitions", "=", "self", ".", "get_partitions", "(", "self", ".", "persistence", ")", "labels", "=", "self", ".", "X", ".", "shape", "[", "0", "]", "*", "[", "None", "]", "for", "label", ",", "partition_indices", "in", "partitions", ".", "items", "(", ")", ":", "for", "idx", "in", "np", ".", "intersect1d", "(", "partition_indices", ",", "indices", ")", ":", "labels", "[", "idx", "]", "=", "label", "labels", "=", "np", ".", "array", "(", "labels", ")", "if", "len", "(", "indices", ")", "==", "1", ":", "return", "labels", "[", "indices", "]", "[", "0", "]", "return", "labels", "[", "indices", "]" ]
Returns the label pair indices requested by the user @ In, indices, a list of non-negative integers specifying the row indices to return @ Out, a list of integer 2-tuples specifying the minimum and maximum index of the specified rows.
[ "Returns", "the", "label", "pair", "indices", "requested", "by", "the", "user" ]
4be598d51c4e4043b73d4ad44beed6d289e2f088
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MorseSmaleComplex.py#L316-L341
train
maljovec/topopy
topopy/MorseSmaleComplex.py
MorseSmaleComplex.get_sample_size
def get_sample_size(self, key=None): """ Returns the number of samples in the input data @ In, key, an optional 2-tuple specifying a min-max id pair used for determining which partition size should be returned. If not specified then the size of the entire data set will be returned. @ Out, an integer specifying the number of samples. """ if key is None: return len(self.Y) else: return len(self.get_partitions(self.persistence)[key])
python
def get_sample_size(self, key=None): """ Returns the number of samples in the input data @ In, key, an optional 2-tuple specifying a min-max id pair used for determining which partition size should be returned. If not specified then the size of the entire data set will be returned. @ Out, an integer specifying the number of samples. """ if key is None: return len(self.Y) else: return len(self.get_partitions(self.persistence)[key])
[ "def", "get_sample_size", "(", "self", ",", "key", "=", "None", ")", ":", "if", "key", "is", "None", ":", "return", "len", "(", "self", ".", "Y", ")", "else", ":", "return", "len", "(", "self", ".", "get_partitions", "(", "self", ".", "persistence", ")", "[", "key", "]", ")" ]
Returns the number of samples in the input data @ In, key, an optional 2-tuple specifying a min-max id pair used for determining which partition size should be returned. If not specified then the size of the entire data set will be returned. @ Out, an integer specifying the number of samples.
[ "Returns", "the", "number", "of", "samples", "in", "the", "input", "data" ]
4be598d51c4e4043b73d4ad44beed6d289e2f088
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MorseSmaleComplex.py#L354-L365
train
maljovec/topopy
topopy/MorseSmaleComplex.py
MorseSmaleComplex.to_json
def to_json(self): """ Writes the complete Morse-Smale merge hierarchy to a string object. @ Out, a string object storing the entire merge hierarchy of all minima and maxima. """ capsule = {} capsule["Hierarchy"] = [] for ( dying, (persistence, surviving, saddle), ) in self.merge_sequence.items(): capsule["Hierarchy"].append( { "Dying": dying, "Persistence": persistence, "Surviving": surviving, "Saddle": saddle, } ) capsule["Partitions"] = [] base = np.array([None, None] * len(self.Y)).reshape(-1, 2) for (min_index, max_index), items in self.base_partitions.items(): base[items, :] = [min_index, max_index] capsule["Partitions"] = base.tolist() return json.dumps(capsule)
python
def to_json(self): """ Writes the complete Morse-Smale merge hierarchy to a string object. @ Out, a string object storing the entire merge hierarchy of all minima and maxima. """ capsule = {} capsule["Hierarchy"] = [] for ( dying, (persistence, surviving, saddle), ) in self.merge_sequence.items(): capsule["Hierarchy"].append( { "Dying": dying, "Persistence": persistence, "Surviving": surviving, "Saddle": saddle, } ) capsule["Partitions"] = [] base = np.array([None, None] * len(self.Y)).reshape(-1, 2) for (min_index, max_index), items in self.base_partitions.items(): base[items, :] = [min_index, max_index] capsule["Partitions"] = base.tolist() return json.dumps(capsule)
[ "def", "to_json", "(", "self", ")", ":", "capsule", "=", "{", "}", "capsule", "[", "\"Hierarchy\"", "]", "=", "[", "]", "for", "(", "dying", ",", "(", "persistence", ",", "surviving", ",", "saddle", ")", ",", ")", "in", "self", ".", "merge_sequence", ".", "items", "(", ")", ":", "capsule", "[", "\"Hierarchy\"", "]", ".", "append", "(", "{", "\"Dying\"", ":", "dying", ",", "\"Persistence\"", ":", "persistence", ",", "\"Surviving\"", ":", "surviving", ",", "\"Saddle\"", ":", "saddle", ",", "}", ")", "capsule", "[", "\"Partitions\"", "]", "=", "[", "]", "base", "=", "np", ".", "array", "(", "[", "None", ",", "None", "]", "*", "len", "(", "self", ".", "Y", ")", ")", ".", "reshape", "(", "-", "1", ",", "2", ")", "for", "(", "min_index", ",", "max_index", ")", ",", "items", "in", "self", ".", "base_partitions", ".", "items", "(", ")", ":", "base", "[", "items", ",", ":", "]", "=", "[", "min_index", ",", "max_index", "]", "capsule", "[", "\"Partitions\"", "]", "=", "base", ".", "tolist", "(", ")", "return", "json", ".", "dumps", "(", "capsule", ")" ]
Writes the complete Morse-Smale merge hierarchy to a string object. @ Out, a string object storing the entire merge hierarchy of all minima and maxima.
[ "Writes", "the", "complete", "Morse", "-", "Smale", "merge", "hierarchy", "to", "a", "string", "object", "." ]
4be598d51c4e4043b73d4ad44beed6d289e2f088
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MorseSmaleComplex.py#L382-L408
train
yymao/generic-catalog-reader
GCR/utils.py
dict_to_numpy_array
def dict_to_numpy_array(d): """ Convert a dict of 1d array to a numpy recarray """ return fromarrays(d.values(), np.dtype([(str(k), v.dtype) for k, v in d.items()]))
python
def dict_to_numpy_array(d): """ Convert a dict of 1d array to a numpy recarray """ return fromarrays(d.values(), np.dtype([(str(k), v.dtype) for k, v in d.items()]))
[ "def", "dict_to_numpy_array", "(", "d", ")", ":", "return", "fromarrays", "(", "d", ".", "values", "(", ")", ",", "np", ".", "dtype", "(", "[", "(", "str", "(", "k", ")", ",", "v", ".", "dtype", ")", "for", "k", ",", "v", "in", "d", ".", "items", "(", ")", "]", ")", ")" ]
Convert a dict of 1d array to a numpy recarray
[ "Convert", "a", "dict", "of", "1d", "array", "to", "a", "numpy", "recarray" ]
bc6267ac41b9f68106ed6065184469ac13fdc0b6
https://github.com/yymao/generic-catalog-reader/blob/bc6267ac41b9f68106ed6065184469ac13fdc0b6/GCR/utils.py#L23-L27
train
yymao/generic-catalog-reader
GCR/utils.py
concatenate_1d
def concatenate_1d(arrays): """ Concatenate 1D numpy arrays. Similar to np.concatenate but work with empty input and masked arrays. """ if len(arrays) == 0: return np.array([]) if len(arrays) == 1: return np.asanyarray(arrays[0]) if any(map(np.ma.is_masked, arrays)): return np.ma.concatenate(arrays) return np.concatenate(arrays)
python
def concatenate_1d(arrays): """ Concatenate 1D numpy arrays. Similar to np.concatenate but work with empty input and masked arrays. """ if len(arrays) == 0: return np.array([]) if len(arrays) == 1: return np.asanyarray(arrays[0]) if any(map(np.ma.is_masked, arrays)): return np.ma.concatenate(arrays) return np.concatenate(arrays)
[ "def", "concatenate_1d", "(", "arrays", ")", ":", "if", "len", "(", "arrays", ")", "==", "0", ":", "return", "np", ".", "array", "(", "[", "]", ")", "if", "len", "(", "arrays", ")", "==", "1", ":", "return", "np", ".", "asanyarray", "(", "arrays", "[", "0", "]", ")", "if", "any", "(", "map", "(", "np", ".", "ma", ".", "is_masked", ",", "arrays", ")", ")", ":", "return", "np", ".", "ma", ".", "concatenate", "(", "arrays", ")", "return", "np", ".", "concatenate", "(", "arrays", ")" ]
Concatenate 1D numpy arrays. Similar to np.concatenate but work with empty input and masked arrays.
[ "Concatenate", "1D", "numpy", "arrays", ".", "Similar", "to", "np", ".", "concatenate", "but", "work", "with", "empty", "input", "and", "masked", "arrays", "." ]
bc6267ac41b9f68106ed6065184469ac13fdc0b6
https://github.com/yymao/generic-catalog-reader/blob/bc6267ac41b9f68106ed6065184469ac13fdc0b6/GCR/utils.py#L29-L40
train
mojaie/chorus
chorus/model/atom.py
Atom.formula_html
def formula_html(self, reversed_=False): """Chemical formula HTML Args: reversed (bool): reversed text for leftmost atom groups """ if self.H_count == 1: text = "H" elif self.H_count > 1: text = "H<sub>{}</sub>".format(self.H_count) else: text = "" seq = [self.symbol, text, self.charge_sign_html()] if reversed_: seq = reversed(seq) return "".join(seq)
python
def formula_html(self, reversed_=False): """Chemical formula HTML Args: reversed (bool): reversed text for leftmost atom groups """ if self.H_count == 1: text = "H" elif self.H_count > 1: text = "H<sub>{}</sub>".format(self.H_count) else: text = "" seq = [self.symbol, text, self.charge_sign_html()] if reversed_: seq = reversed(seq) return "".join(seq)
[ "def", "formula_html", "(", "self", ",", "reversed_", "=", "False", ")", ":", "if", "self", ".", "H_count", "==", "1", ":", "text", "=", "\"H\"", "elif", "self", ".", "H_count", ">", "1", ":", "text", "=", "\"H<sub>{}</sub>\"", ".", "format", "(", "self", ".", "H_count", ")", "else", ":", "text", "=", "\"\"", "seq", "=", "[", "self", ".", "symbol", ",", "text", ",", "self", ".", "charge_sign_html", "(", ")", "]", "if", "reversed_", ":", "seq", "=", "reversed", "(", "seq", ")", "return", "\"\"", ".", "join", "(", "seq", ")" ]
Chemical formula HTML Args: reversed (bool): reversed text for leftmost atom groups
[ "Chemical", "formula", "HTML" ]
fc7fe23a0272554c67671645ab07830b315eeb1b
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/model/atom.py#L123-L138
train
mojaie/chorus
chorus/model/atom.py
Atom.charge_sign
def charge_sign(self): """Charge sign text""" if self.charge > 0: sign = "+" elif self.charge < 0: sign = "–" # en dash, not hyphen-minus else: return "" ab = abs(self.charge) if ab > 1: return str(ab) + sign return sign
python
def charge_sign(self): """Charge sign text""" if self.charge > 0: sign = "+" elif self.charge < 0: sign = "–" # en dash, not hyphen-minus else: return "" ab = abs(self.charge) if ab > 1: return str(ab) + sign return sign
[ "def", "charge_sign", "(", "self", ")", ":", "if", "self", ".", "charge", ">", "0", ":", "sign", "=", "\"+\"", "elif", "self", ".", "charge", "<", "0", ":", "sign", "=", "\"–\" ", "en dash, not hyphen-minus", "else", ":", "return", "\"\"", "ab", "=", "abs", "(", "self", ".", "charge", ")", "if", "ab", ">", "1", ":", "return", "str", "(", "ab", ")", "+", "sign", "return", "sign" ]
Charge sign text
[ "Charge", "sign", "text" ]
fc7fe23a0272554c67671645ab07830b315eeb1b
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/model/atom.py#L154-L165
train
mastro35/flows
flows/MessageDispatcher.py
MessageDispatcher.send_message
def send_message(self, message): """ Dispatch a message using 0mq """ with self._instance_lock: if message is None: Global.LOGGER.error("can't deliver a null messages") return if message.sender is None: Global.LOGGER.error(f"can't deliver anonymous messages with body {message.body}") return if message.receiver is None: Global.LOGGER.error( f"can't deliver message from {message.sender}: recipient not specified") return if message.message is None: Global.LOGGER.error(f"can't deliver message with no body from {message.sender}") return sender = "*" + message.sender + "*" self.socket.send_multipart( [bytes(sender, 'utf-8'), pickle.dumps(message)]) if Global.CONFIG_MANAGER.tracing_mode: Global.LOGGER.debug("dispatched : " + message.sender + "-" + message.message + "-" + message.receiver) self.dispatched = self.dispatched + 1
python
def send_message(self, message): """ Dispatch a message using 0mq """ with self._instance_lock: if message is None: Global.LOGGER.error("can't deliver a null messages") return if message.sender is None: Global.LOGGER.error(f"can't deliver anonymous messages with body {message.body}") return if message.receiver is None: Global.LOGGER.error( f"can't deliver message from {message.sender}: recipient not specified") return if message.message is None: Global.LOGGER.error(f"can't deliver message with no body from {message.sender}") return sender = "*" + message.sender + "*" self.socket.send_multipart( [bytes(sender, 'utf-8'), pickle.dumps(message)]) if Global.CONFIG_MANAGER.tracing_mode: Global.LOGGER.debug("dispatched : " + message.sender + "-" + message.message + "-" + message.receiver) self.dispatched = self.dispatched + 1
[ "def", "send_message", "(", "self", ",", "message", ")", ":", "with", "self", ".", "_instance_lock", ":", "if", "message", "is", "None", ":", "Global", ".", "LOGGER", ".", "error", "(", "\"can't deliver a null messages\"", ")", "return", "if", "message", ".", "sender", "is", "None", ":", "Global", ".", "LOGGER", ".", "error", "(", "f\"can't deliver anonymous messages with body {message.body}\"", ")", "return", "if", "message", ".", "receiver", "is", "None", ":", "Global", ".", "LOGGER", ".", "error", "(", "f\"can't deliver message from {message.sender}: recipient not specified\"", ")", "return", "if", "message", ".", "message", "is", "None", ":", "Global", ".", "LOGGER", ".", "error", "(", "f\"can't deliver message with no body from {message.sender}\"", ")", "return", "sender", "=", "\"*\"", "+", "message", ".", "sender", "+", "\"*\"", "self", ".", "socket", ".", "send_multipart", "(", "[", "bytes", "(", "sender", ",", "'utf-8'", ")", ",", "pickle", ".", "dumps", "(", "message", ")", "]", ")", "if", "Global", ".", "CONFIG_MANAGER", ".", "tracing_mode", ":", "Global", ".", "LOGGER", ".", "debug", "(", "\"dispatched : \"", "+", "message", ".", "sender", "+", "\"-\"", "+", "message", ".", "message", "+", "\"-\"", "+", "message", ".", "receiver", ")", "self", ".", "dispatched", "=", "self", ".", "dispatched", "+", "1" ]
Dispatch a message using 0mq
[ "Dispatch", "a", "message", "using", "0mq" ]
05e488385673a69597b5b39c7728795aa4d5eb18
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/MessageDispatcher.py#L84-L118
train
auto-mat/django-webmap-corpus
webmap/models.py
update_properties_cache
def update_properties_cache(sender, instance, action, reverse, model, pk_set, **kwargs): "Property cache actualization at POI save. It will not work yet after property removal." if action == 'post_add': instance.save_properties_cache()
python
def update_properties_cache(sender, instance, action, reverse, model, pk_set, **kwargs): "Property cache actualization at POI save. It will not work yet after property removal." if action == 'post_add': instance.save_properties_cache()
[ "def", "update_properties_cache", "(", "sender", ",", "instance", ",", "action", ",", "reverse", ",", "model", ",", "pk_set", ",", "*", "*", "kwargs", ")", ":", "if", "action", "==", "'post_add'", ":", "instance", ".", "save_properties_cache", "(", ")" ]
Property cache actualization at POI save. It will not work yet after property removal.
[ "Property", "cache", "actualization", "at", "POI", "save", ".", "It", "will", "not", "work", "yet", "after", "property", "removal", "." ]
1d8b7428d2bf3b1165985d767b19677bb6db9eae
https://github.com/auto-mat/django-webmap-corpus/blob/1d8b7428d2bf3b1165985d767b19677bb6db9eae/webmap/models.py#L236-L239
train
maljovec/topopy
topopy/MorseComplex.py
MorseComplex.to_json
def to_json(self): """ Writes the complete Morse complex merge hierarchy to a string object. @ Out, a string object storing the entire merge hierarchy of all maxima. """ capsule = {} capsule["Hierarchy"] = [] for ( dying, (persistence, surviving, saddle), ) in self.merge_sequence.items(): capsule["Hierarchy"].append( { "Persistence": persistence, "Dying": dying, "Surviving": surviving, "Saddle": saddle, } ) capsule["Partitions"] = [] base = np.array([None] * len(self.Y)) for label, items in self.base_partitions.items(): base[items] = label capsule["Partitions"] = base.tolist() return json.dumps(capsule, separators=(",", ":"))
python
def to_json(self): """ Writes the complete Morse complex merge hierarchy to a string object. @ Out, a string object storing the entire merge hierarchy of all maxima. """ capsule = {} capsule["Hierarchy"] = [] for ( dying, (persistence, surviving, saddle), ) in self.merge_sequence.items(): capsule["Hierarchy"].append( { "Persistence": persistence, "Dying": dying, "Surviving": surviving, "Saddle": saddle, } ) capsule["Partitions"] = [] base = np.array([None] * len(self.Y)) for label, items in self.base_partitions.items(): base[items] = label capsule["Partitions"] = base.tolist() return json.dumps(capsule, separators=(",", ":"))
[ "def", "to_json", "(", "self", ")", ":", "capsule", "=", "{", "}", "capsule", "[", "\"Hierarchy\"", "]", "=", "[", "]", "for", "(", "dying", ",", "(", "persistence", ",", "surviving", ",", "saddle", ")", ",", ")", "in", "self", ".", "merge_sequence", ".", "items", "(", ")", ":", "capsule", "[", "\"Hierarchy\"", "]", ".", "append", "(", "{", "\"Persistence\"", ":", "persistence", ",", "\"Dying\"", ":", "dying", ",", "\"Surviving\"", ":", "surviving", ",", "\"Saddle\"", ":", "saddle", ",", "}", ")", "capsule", "[", "\"Partitions\"", "]", "=", "[", "]", "base", "=", "np", ".", "array", "(", "[", "None", "]", "*", "len", "(", "self", ".", "Y", ")", ")", "for", "label", ",", "items", "in", "self", ".", "base_partitions", ".", "items", "(", ")", ":", "base", "[", "items", "]", "=", "label", "capsule", "[", "\"Partitions\"", "]", "=", "base", ".", "tolist", "(", ")", "return", "json", ".", "dumps", "(", "capsule", ",", "separators", "=", "(", "\",\"", ",", "\":\"", ")", ")" ]
Writes the complete Morse complex merge hierarchy to a string object. @ Out, a string object storing the entire merge hierarchy of all maxima.
[ "Writes", "the", "complete", "Morse", "complex", "merge", "hierarchy", "to", "a", "string", "object", "." ]
4be598d51c4e4043b73d4ad44beed6d289e2f088
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MorseComplex.py#L380-L406
train
redhat-cip/python-dciclient
dciclient/v1/api/jobs_events.py
iter
def iter(context, sequence, limit=10): """Iter to list all the jobs events.""" params = {'limit': limit, 'offset': 0} uri = '%s/%s/%s' % (context.dci_cs_api, RESOURCE, sequence) while True: j = context.session.get(uri, params=params).json() if len(j['jobs_events']): for i in j['jobs_events']: yield i else: break params['offset'] += params['limit']
python
def iter(context, sequence, limit=10): """Iter to list all the jobs events.""" params = {'limit': limit, 'offset': 0} uri = '%s/%s/%s' % (context.dci_cs_api, RESOURCE, sequence) while True: j = context.session.get(uri, params=params).json() if len(j['jobs_events']): for i in j['jobs_events']: yield i else: break params['offset'] += params['limit']
[ "def", "iter", "(", "context", ",", "sequence", ",", "limit", "=", "10", ")", ":", "params", "=", "{", "'limit'", ":", "limit", ",", "'offset'", ":", "0", "}", "uri", "=", "'%s/%s/%s'", "%", "(", "context", ".", "dci_cs_api", ",", "RESOURCE", ",", "sequence", ")", "while", "True", ":", "j", "=", "context", ".", "session", ".", "get", "(", "uri", ",", "params", "=", "params", ")", ".", "json", "(", ")", "if", "len", "(", "j", "[", "'jobs_events'", "]", ")", ":", "for", "i", "in", "j", "[", "'jobs_events'", "]", ":", "yield", "i", "else", ":", "break", "params", "[", "'offset'", "]", "+=", "params", "[", "'limit'", "]" ]
Iter to list all the jobs events.
[ "Iter", "to", "list", "all", "the", "jobs", "events", "." ]
a4aa5899062802bbe4c30a075d8447f8d222d214
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/api/jobs_events.py#L30-L43
train
redhat-cip/python-dciclient
dciclient/v1/api/jobs_events.py
delete
def delete(context, sequence): """Delete jobs events from a given sequence""" uri = '%s/%s/%s' % (context.dci_cs_api, RESOURCE, sequence) return context.session.delete(uri)
python
def delete(context, sequence): """Delete jobs events from a given sequence""" uri = '%s/%s/%s' % (context.dci_cs_api, RESOURCE, sequence) return context.session.delete(uri)
[ "def", "delete", "(", "context", ",", "sequence", ")", ":", "uri", "=", "'%s/%s/%s'", "%", "(", "context", ".", "dci_cs_api", ",", "RESOURCE", ",", "sequence", ")", "return", "context", ".", "session", ".", "delete", "(", "uri", ")" ]
Delete jobs events from a given sequence
[ "Delete", "jobs", "events", "from", "a", "given", "sequence" ]
a4aa5899062802bbe4c30a075d8447f8d222d214
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/api/jobs_events.py#L46-L49
train
DheerendraRathor/django-auth-ldap-ng
django_auth_ldap/config.py
_LDAPConfig.get_ldap
def get_ldap(cls, global_options=None): """ Returns the ldap module. The unit test harness will assign a mock object to _LDAPConfig.ldap. It is imperative that the ldap module not be imported anywhere else so that the unit tests will pass in the absence of python-ldap. """ if cls.ldap is None: import ldap.filter # Support for python-ldap < 2.0.6 try: import ldap.dn except ImportError: from django_auth_ldap import dn ldap.dn = dn cls.ldap = ldap # Apply global LDAP options once if (not cls._ldap_configured) and (global_options is not None): for opt, value in global_options.items(): cls.ldap.set_option(opt, value) cls._ldap_configured = True return cls.ldap
python
def get_ldap(cls, global_options=None): """ Returns the ldap module. The unit test harness will assign a mock object to _LDAPConfig.ldap. It is imperative that the ldap module not be imported anywhere else so that the unit tests will pass in the absence of python-ldap. """ if cls.ldap is None: import ldap.filter # Support for python-ldap < 2.0.6 try: import ldap.dn except ImportError: from django_auth_ldap import dn ldap.dn = dn cls.ldap = ldap # Apply global LDAP options once if (not cls._ldap_configured) and (global_options is not None): for opt, value in global_options.items(): cls.ldap.set_option(opt, value) cls._ldap_configured = True return cls.ldap
[ "def", "get_ldap", "(", "cls", ",", "global_options", "=", "None", ")", ":", "if", "cls", ".", "ldap", "is", "None", ":", "import", "ldap", ".", "filter", "# Support for python-ldap < 2.0.6", "try", ":", "import", "ldap", ".", "dn", "except", "ImportError", ":", "from", "django_auth_ldap", "import", "dn", "ldap", ".", "dn", "=", "dn", "cls", ".", "ldap", "=", "ldap", "# Apply global LDAP options once", "if", "(", "not", "cls", ".", "_ldap_configured", ")", "and", "(", "global_options", "is", "not", "None", ")", ":", "for", "opt", ",", "value", "in", "global_options", ".", "items", "(", ")", ":", "cls", ".", "ldap", ".", "set_option", "(", "opt", ",", "value", ")", "cls", ".", "_ldap_configured", "=", "True", "return", "cls", ".", "ldap" ]
Returns the ldap module. The unit test harness will assign a mock object to _LDAPConfig.ldap. It is imperative that the ldap module not be imported anywhere else so that the unit tests will pass in the absence of python-ldap.
[ "Returns", "the", "ldap", "module", ".", "The", "unit", "test", "harness", "will", "assign", "a", "mock", "object", "to", "_LDAPConfig", ".", "ldap", ".", "It", "is", "imperative", "that", "the", "ldap", "module", "not", "be", "imported", "anywhere", "else", "so", "that", "the", "unit", "tests", "will", "pass", "in", "the", "absence", "of", "python", "-", "ldap", "." ]
4d2458bd90c4539353c5bfd5ea793c1e59780ee8
https://github.com/DheerendraRathor/django-auth-ldap-ng/blob/4d2458bd90c4539353c5bfd5ea793c1e59780ee8/django_auth_ldap/config.py#L52-L78
train
DheerendraRathor/django-auth-ldap-ng
django_auth_ldap/config.py
LDAPSearch._begin
def _begin(self, connection, filterargs=(), escape=True): """ Begins an asynchronous search and returns the message id to retrieve the results. filterargs is an object that will be used for expansion of the filter string. If escape is True, values in filterargs will be escaped. """ if escape: filterargs = self._escape_filterargs(filterargs) try: filterstr = self.filterstr % filterargs msgid = connection.search(force_str(self.base_dn), self.scope, force_str(filterstr)) except ldap.LDAPError as e: msgid = None logger.error(u"search('%s', %d, '%s') raised %s" % (self.base_dn, self.scope, filterstr, pprint.pformat(e))) return msgid
python
def _begin(self, connection, filterargs=(), escape=True): """ Begins an asynchronous search and returns the message id to retrieve the results. filterargs is an object that will be used for expansion of the filter string. If escape is True, values in filterargs will be escaped. """ if escape: filterargs = self._escape_filterargs(filterargs) try: filterstr = self.filterstr % filterargs msgid = connection.search(force_str(self.base_dn), self.scope, force_str(filterstr)) except ldap.LDAPError as e: msgid = None logger.error(u"search('%s', %d, '%s') raised %s" % (self.base_dn, self.scope, filterstr, pprint.pformat(e))) return msgid
[ "def", "_begin", "(", "self", ",", "connection", ",", "filterargs", "=", "(", ")", ",", "escape", "=", "True", ")", ":", "if", "escape", ":", "filterargs", "=", "self", ".", "_escape_filterargs", "(", "filterargs", ")", "try", ":", "filterstr", "=", "self", ".", "filterstr", "%", "filterargs", "msgid", "=", "connection", ".", "search", "(", "force_str", "(", "self", ".", "base_dn", ")", ",", "self", ".", "scope", ",", "force_str", "(", "filterstr", ")", ")", "except", "ldap", ".", "LDAPError", "as", "e", ":", "msgid", "=", "None", "logger", ".", "error", "(", "u\"search('%s', %d, '%s') raised %s\"", "%", "(", "self", ".", "base_dn", ",", "self", ".", "scope", ",", "filterstr", ",", "pprint", ".", "pformat", "(", "e", ")", ")", ")", "return", "msgid" ]
Begins an asynchronous search and returns the message id to retrieve the results. filterargs is an object that will be used for expansion of the filter string. If escape is True, values in filterargs will be escaped.
[ "Begins", "an", "asynchronous", "search", "and", "returns", "the", "message", "id", "to", "retrieve", "the", "results", "." ]
4d2458bd90c4539353c5bfd5ea793c1e59780ee8
https://github.com/DheerendraRathor/django-auth-ldap-ng/blob/4d2458bd90c4539353c5bfd5ea793c1e59780ee8/django_auth_ldap/config.py#L170-L191
train
DheerendraRathor/django-auth-ldap-ng
django_auth_ldap/config.py
LDAPSearch._results
def _results(self, connection, msgid): """ Returns the result of a previous asynchronous query. """ try: kind, results = connection.result(msgid) if kind != ldap.RES_SEARCH_RESULT: results = [] except ldap.LDAPError as e: results = [] logger.error(u"result(%d) raised %s" % (msgid, pprint.pformat(e))) return self._process_results(results)
python
def _results(self, connection, msgid): """ Returns the result of a previous asynchronous query. """ try: kind, results = connection.result(msgid) if kind != ldap.RES_SEARCH_RESULT: results = [] except ldap.LDAPError as e: results = [] logger.error(u"result(%d) raised %s" % (msgid, pprint.pformat(e))) return self._process_results(results)
[ "def", "_results", "(", "self", ",", "connection", ",", "msgid", ")", ":", "try", ":", "kind", ",", "results", "=", "connection", ".", "result", "(", "msgid", ")", "if", "kind", "!=", "ldap", ".", "RES_SEARCH_RESULT", ":", "results", "=", "[", "]", "except", "ldap", ".", "LDAPError", "as", "e", ":", "results", "=", "[", "]", "logger", ".", "error", "(", "u\"result(%d) raised %s\"", "%", "(", "msgid", ",", "pprint", ".", "pformat", "(", "e", ")", ")", ")", "return", "self", ".", "_process_results", "(", "results", ")" ]
Returns the result of a previous asynchronous query.
[ "Returns", "the", "result", "of", "a", "previous", "asynchronous", "query", "." ]
4d2458bd90c4539353c5bfd5ea793c1e59780ee8
https://github.com/DheerendraRathor/django-auth-ldap-ng/blob/4d2458bd90c4539353c5bfd5ea793c1e59780ee8/django_auth_ldap/config.py#L193-L205
train
DheerendraRathor/django-auth-ldap-ng
django_auth_ldap/config.py
LDAPSearch._escape_filterargs
def _escape_filterargs(self, filterargs): """ Escapes values in filterargs. filterargs is a value suitable for Django's string formatting operator (%), which means it's either a tuple or a dict. This return a new tuple or dict with all values escaped for use in filter strings. """ if isinstance(filterargs, tuple): filterargs = tuple(self.ldap.filter.escape_filter_chars(value) for value in filterargs) elif isinstance(filterargs, dict): filterargs = dict((key, self.ldap.filter.escape_filter_chars(value)) for key, value in filterargs.items()) else: raise TypeError("filterargs must be a tuple or dict.") return filterargs
python
def _escape_filterargs(self, filterargs): """ Escapes values in filterargs. filterargs is a value suitable for Django's string formatting operator (%), which means it's either a tuple or a dict. This return a new tuple or dict with all values escaped for use in filter strings. """ if isinstance(filterargs, tuple): filterargs = tuple(self.ldap.filter.escape_filter_chars(value) for value in filterargs) elif isinstance(filterargs, dict): filterargs = dict((key, self.ldap.filter.escape_filter_chars(value)) for key, value in filterargs.items()) else: raise TypeError("filterargs must be a tuple or dict.") return filterargs
[ "def", "_escape_filterargs", "(", "self", ",", "filterargs", ")", ":", "if", "isinstance", "(", "filterargs", ",", "tuple", ")", ":", "filterargs", "=", "tuple", "(", "self", ".", "ldap", ".", "filter", ".", "escape_filter_chars", "(", "value", ")", "for", "value", "in", "filterargs", ")", "elif", "isinstance", "(", "filterargs", ",", "dict", ")", ":", "filterargs", "=", "dict", "(", "(", "key", ",", "self", ".", "ldap", ".", "filter", ".", "escape_filter_chars", "(", "value", ")", ")", "for", "key", ",", "value", "in", "filterargs", ".", "items", "(", ")", ")", "else", ":", "raise", "TypeError", "(", "\"filterargs must be a tuple or dict.\"", ")", "return", "filterargs" ]
Escapes values in filterargs. filterargs is a value suitable for Django's string formatting operator (%), which means it's either a tuple or a dict. This return a new tuple or dict with all values escaped for use in filter strings.
[ "Escapes", "values", "in", "filterargs", "." ]
4d2458bd90c4539353c5bfd5ea793c1e59780ee8
https://github.com/DheerendraRathor/django-auth-ldap-ng/blob/4d2458bd90c4539353c5bfd5ea793c1e59780ee8/django_auth_ldap/config.py#L207-L225
train
DheerendraRathor/django-auth-ldap-ng
django_auth_ldap/config.py
LDAPSearch._process_results
def _process_results(self, results): """ Returns a sanitized copy of raw LDAP results. This scrubs out references, decodes utf8, normalizes DNs, etc. """ results = [r for r in results if r[0] is not None] results = _DeepStringCoder('utf-8').decode(results) # The normal form of a DN is lower case. results = [(r[0].lower(), r[1]) for r in results] result_dns = [result[0] for result in results] logger.debug(u"search_s('%s', %d, '%s') returned %d objects: %s" % (self.base_dn, self.scope, self.filterstr, len(result_dns), "; ".join(result_dns))) return results
python
def _process_results(self, results): """ Returns a sanitized copy of raw LDAP results. This scrubs out references, decodes utf8, normalizes DNs, etc. """ results = [r for r in results if r[0] is not None] results = _DeepStringCoder('utf-8').decode(results) # The normal form of a DN is lower case. results = [(r[0].lower(), r[1]) for r in results] result_dns = [result[0] for result in results] logger.debug(u"search_s('%s', %d, '%s') returned %d objects: %s" % (self.base_dn, self.scope, self.filterstr, len(result_dns), "; ".join(result_dns))) return results
[ "def", "_process_results", "(", "self", ",", "results", ")", ":", "results", "=", "[", "r", "for", "r", "in", "results", "if", "r", "[", "0", "]", "is", "not", "None", "]", "results", "=", "_DeepStringCoder", "(", "'utf-8'", ")", ".", "decode", "(", "results", ")", "# The normal form of a DN is lower case.", "results", "=", "[", "(", "r", "[", "0", "]", ".", "lower", "(", ")", ",", "r", "[", "1", "]", ")", "for", "r", "in", "results", "]", "result_dns", "=", "[", "result", "[", "0", "]", "for", "result", "in", "results", "]", "logger", ".", "debug", "(", "u\"search_s('%s', %d, '%s') returned %d objects: %s\"", "%", "(", "self", ".", "base_dn", ",", "self", ".", "scope", ",", "self", ".", "filterstr", ",", "len", "(", "result_dns", ")", ",", "\"; \"", ".", "join", "(", "result_dns", ")", ")", ")", "return", "results" ]
Returns a sanitized copy of raw LDAP results. This scrubs out references, decodes utf8, normalizes DNs, etc.
[ "Returns", "a", "sanitized", "copy", "of", "raw", "LDAP", "results", ".", "This", "scrubs", "out", "references", "decodes", "utf8", "normalizes", "DNs", "etc", "." ]
4d2458bd90c4539353c5bfd5ea793c1e59780ee8
https://github.com/DheerendraRathor/django-auth-ldap-ng/blob/4d2458bd90c4539353c5bfd5ea793c1e59780ee8/django_auth_ldap/config.py#L227-L243
train
ehansis/ozelot
ozelot/client.py
Client.get_connection_string
def get_connection_string(params, hide_password=True): """Get a database connection string Args: params (dict): database configuration, as defined in :mod:`ozelot.config` hide_password (bool): if True, the password is hidden in the returned string (use this for logging purposes). Returns: str: connection string """ connection_string = params['driver'] + '://' user = params.get('user', None) password = params.get('password', None) host = params.get('host', None) port = params.get('port', None) database = params.get('database', None) if database is None: raise ValueError("Field 'database' of connection parameters cannot be None.") # if password is not set, try to get it from keyring if password is None and user is not None: # noinspection PyTypeChecker password = Client._get_password(params) if password is None: raise RuntimeError("Password not defined and not available in keyring.") # don't add host/port/user/password if no host given if host is not None: # don't add user/password if user not given if user is not None: connection_string += user # omit zero-length passwords if len(password) > 0: if hide_password: connection_string += ":[password hidden]" else: connection_string += ":" + password connection_string += "@" connection_string += host if port is not None: connection_string += ':' + str(port) # noinspection PyTypeChecker connection_string += '/' + database return connection_string
python
def get_connection_string(params, hide_password=True): """Get a database connection string Args: params (dict): database configuration, as defined in :mod:`ozelot.config` hide_password (bool): if True, the password is hidden in the returned string (use this for logging purposes). Returns: str: connection string """ connection_string = params['driver'] + '://' user = params.get('user', None) password = params.get('password', None) host = params.get('host', None) port = params.get('port', None) database = params.get('database', None) if database is None: raise ValueError("Field 'database' of connection parameters cannot be None.") # if password is not set, try to get it from keyring if password is None and user is not None: # noinspection PyTypeChecker password = Client._get_password(params) if password is None: raise RuntimeError("Password not defined and not available in keyring.") # don't add host/port/user/password if no host given if host is not None: # don't add user/password if user not given if user is not None: connection_string += user # omit zero-length passwords if len(password) > 0: if hide_password: connection_string += ":[password hidden]" else: connection_string += ":" + password connection_string += "@" connection_string += host if port is not None: connection_string += ':' + str(port) # noinspection PyTypeChecker connection_string += '/' + database return connection_string
[ "def", "get_connection_string", "(", "params", ",", "hide_password", "=", "True", ")", ":", "connection_string", "=", "params", "[", "'driver'", "]", "+", "'://'", "user", "=", "params", ".", "get", "(", "'user'", ",", "None", ")", "password", "=", "params", ".", "get", "(", "'password'", ",", "None", ")", "host", "=", "params", ".", "get", "(", "'host'", ",", "None", ")", "port", "=", "params", ".", "get", "(", "'port'", ",", "None", ")", "database", "=", "params", ".", "get", "(", "'database'", ",", "None", ")", "if", "database", "is", "None", ":", "raise", "ValueError", "(", "\"Field 'database' of connection parameters cannot be None.\"", ")", "# if password is not set, try to get it from keyring", "if", "password", "is", "None", "and", "user", "is", "not", "None", ":", "# noinspection PyTypeChecker", "password", "=", "Client", ".", "_get_password", "(", "params", ")", "if", "password", "is", "None", ":", "raise", "RuntimeError", "(", "\"Password not defined and not available in keyring.\"", ")", "# don't add host/port/user/password if no host given", "if", "host", "is", "not", "None", ":", "# don't add user/password if user not given", "if", "user", "is", "not", "None", ":", "connection_string", "+=", "user", "# omit zero-length passwords", "if", "len", "(", "password", ")", ">", "0", ":", "if", "hide_password", ":", "connection_string", "+=", "\":[password hidden]\"", "else", ":", "connection_string", "+=", "\":\"", "+", "password", "connection_string", "+=", "\"@\"", "connection_string", "+=", "host", "if", "port", "is", "not", "None", ":", "connection_string", "+=", "':'", "+", "str", "(", "port", ")", "# noinspection PyTypeChecker", "connection_string", "+=", "'/'", "+", "database", "return", "connection_string" ]
Get a database connection string Args: params (dict): database configuration, as defined in :mod:`ozelot.config` hide_password (bool): if True, the password is hidden in the returned string (use this for logging purposes). Returns: str: connection string
[ "Get", "a", "database", "connection", "string" ]
948675e02eb6fca940450f5cb814f53e97159e5b
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/ozelot/client.py#L133-L187
train
geminipy/geminipy
geminipy/__init__.py
Geminipy.pubticker
def pubticker(self, symbol='btcusd'): """Send a request for latest ticker info, return the response.""" url = self.base_url + '/v1/pubticker/' + symbol return requests.get(url)
python
def pubticker(self, symbol='btcusd'): """Send a request for latest ticker info, return the response.""" url = self.base_url + '/v1/pubticker/' + symbol return requests.get(url)
[ "def", "pubticker", "(", "self", ",", "symbol", "=", "'btcusd'", ")", ":", "url", "=", "self", ".", "base_url", "+", "'/v1/pubticker/'", "+", "symbol", "return", "requests", ".", "get", "(", "url", ")" ]
Send a request for latest ticker info, return the response.
[ "Send", "a", "request", "for", "latest", "ticker", "info", "return", "the", "response", "." ]
0d83fe225b746ac4c8bb800aa6091e1b606231e8
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L50-L54
train
geminipy/geminipy
geminipy/__init__.py
Geminipy.book
def book(self, symbol='btcusd', limit_bids=0, limit_asks=0): """ Send a request to get the public order book, return the response. Arguments: symbol -- currency symbol (default 'btcusd') limit_bids -- limit the number of bids returned (default 0) limit_asks -- limit the number of asks returned (default 0) """ url = self.base_url + '/v1/book/' + symbol params = { 'limit_bids': limit_bids, 'limit_asks': limit_asks } return requests.get(url, params)
python
def book(self, symbol='btcusd', limit_bids=0, limit_asks=0): """ Send a request to get the public order book, return the response. Arguments: symbol -- currency symbol (default 'btcusd') limit_bids -- limit the number of bids returned (default 0) limit_asks -- limit the number of asks returned (default 0) """ url = self.base_url + '/v1/book/' + symbol params = { 'limit_bids': limit_bids, 'limit_asks': limit_asks } return requests.get(url, params)
[ "def", "book", "(", "self", ",", "symbol", "=", "'btcusd'", ",", "limit_bids", "=", "0", ",", "limit_asks", "=", "0", ")", ":", "url", "=", "self", ".", "base_url", "+", "'/v1/book/'", "+", "symbol", "params", "=", "{", "'limit_bids'", ":", "limit_bids", ",", "'limit_asks'", ":", "limit_asks", "}", "return", "requests", ".", "get", "(", "url", ",", "params", ")" ]
Send a request to get the public order book, return the response. Arguments: symbol -- currency symbol (default 'btcusd') limit_bids -- limit the number of bids returned (default 0) limit_asks -- limit the number of asks returned (default 0)
[ "Send", "a", "request", "to", "get", "the", "public", "order", "book", "return", "the", "response", "." ]
0d83fe225b746ac4c8bb800aa6091e1b606231e8
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L56-L71
train
geminipy/geminipy
geminipy/__init__.py
Geminipy.trades
def trades(self, symbol='btcusd', since=0, limit_trades=50, include_breaks=0): """ Send a request to get all public trades, return the response. Arguments: symbol -- currency symbol (default 'btcusd') since -- only return trades after this unix timestamp (default 0) limit_trades -- maximum number of trades to return (default 50). include_breaks -- whether to display broken trades (default False) """ url = self.base_url + '/v1/trades/' + symbol params = { 'since': since, 'limit_trades': limit_trades, 'include_breaks': include_breaks } return requests.get(url, params)
python
def trades(self, symbol='btcusd', since=0, limit_trades=50, include_breaks=0): """ Send a request to get all public trades, return the response. Arguments: symbol -- currency symbol (default 'btcusd') since -- only return trades after this unix timestamp (default 0) limit_trades -- maximum number of trades to return (default 50). include_breaks -- whether to display broken trades (default False) """ url = self.base_url + '/v1/trades/' + symbol params = { 'since': since, 'limit_trades': limit_trades, 'include_breaks': include_breaks } return requests.get(url, params)
[ "def", "trades", "(", "self", ",", "symbol", "=", "'btcusd'", ",", "since", "=", "0", ",", "limit_trades", "=", "50", ",", "include_breaks", "=", "0", ")", ":", "url", "=", "self", ".", "base_url", "+", "'/v1/trades/'", "+", "symbol", "params", "=", "{", "'since'", ":", "since", ",", "'limit_trades'", ":", "limit_trades", ",", "'include_breaks'", ":", "include_breaks", "}", "return", "requests", ".", "get", "(", "url", ",", "params", ")" ]
Send a request to get all public trades, return the response. Arguments: symbol -- currency symbol (default 'btcusd') since -- only return trades after this unix timestamp (default 0) limit_trades -- maximum number of trades to return (default 50). include_breaks -- whether to display broken trades (default False)
[ "Send", "a", "request", "to", "get", "all", "public", "trades", "return", "the", "response", "." ]
0d83fe225b746ac4c8bb800aa6091e1b606231e8
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L73-L91
train
geminipy/geminipy
geminipy/__init__.py
Geminipy.auction
def auction(self, symbol='btcusd'): """Send a request for latest auction info, return the response.""" url = self.base_url + '/v1/auction/' + symbol return requests.get(url)
python
def auction(self, symbol='btcusd'): """Send a request for latest auction info, return the response.""" url = self.base_url + '/v1/auction/' + symbol return requests.get(url)
[ "def", "auction", "(", "self", ",", "symbol", "=", "'btcusd'", ")", ":", "url", "=", "self", ".", "base_url", "+", "'/v1/auction/'", "+", "symbol", "return", "requests", ".", "get", "(", "url", ")" ]
Send a request for latest auction info, return the response.
[ "Send", "a", "request", "for", "latest", "auction", "info", "return", "the", "response", "." ]
0d83fe225b746ac4c8bb800aa6091e1b606231e8
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L93-L97
train
geminipy/geminipy
geminipy/__init__.py
Geminipy.auction_history
def auction_history(self, symbol='btcusd', since=0, limit_auction_results=50, include_indicative=1): """ Send a request for auction history info, return the response. Arguments: symbol -- currency symbol (default 'btcusd') since -- only return auction events after this timestamp (default 0) limit_auction_results -- maximum number of auction events to return (default 50). include_indicative -- whether to include publication of indicative prices and quantities. (default True) """ url = self.base_url + '/v1/auction/' + symbol + '/history' params = { 'since': since, 'limit_auction_results': limit_auction_results, 'include_indicative': include_indicative } return requests.get(url, params)
python
def auction_history(self, symbol='btcusd', since=0, limit_auction_results=50, include_indicative=1): """ Send a request for auction history info, return the response. Arguments: symbol -- currency symbol (default 'btcusd') since -- only return auction events after this timestamp (default 0) limit_auction_results -- maximum number of auction events to return (default 50). include_indicative -- whether to include publication of indicative prices and quantities. (default True) """ url = self.base_url + '/v1/auction/' + symbol + '/history' params = { 'since': since, 'limit_auction_results': limit_auction_results, 'include_indicative': include_indicative } return requests.get(url, params)
[ "def", "auction_history", "(", "self", ",", "symbol", "=", "'btcusd'", ",", "since", "=", "0", ",", "limit_auction_results", "=", "50", ",", "include_indicative", "=", "1", ")", ":", "url", "=", "self", ".", "base_url", "+", "'/v1/auction/'", "+", "symbol", "+", "'/history'", "params", "=", "{", "'since'", ":", "since", ",", "'limit_auction_results'", ":", "limit_auction_results", ",", "'include_indicative'", ":", "include_indicative", "}", "return", "requests", ".", "get", "(", "url", ",", "params", ")" ]
Send a request for auction history info, return the response. Arguments: symbol -- currency symbol (default 'btcusd') since -- only return auction events after this timestamp (default 0) limit_auction_results -- maximum number of auction events to return (default 50). include_indicative -- whether to include publication of indicative prices and quantities. (default True)
[ "Send", "a", "request", "for", "auction", "history", "info", "return", "the", "response", "." ]
0d83fe225b746ac4c8bb800aa6091e1b606231e8
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L99-L119
train
geminipy/geminipy
geminipy/__init__.py
Geminipy.new_order
def new_order(self, amount, price, side, client_order_id=None, symbol='btcusd', type='exchange limit', options=None): """ Send a request to place an order, return the response. Arguments: amount -- quoted decimal amount of BTC to purchase price -- quoted decimal amount of USD to spend per BTC side -- 'buy' or 'sell' client_order_id -- an optional client-specified order id (default None) symbol -- currency symbol (default 'btcusd') type -- the order type (default 'exchange limit') """ request = '/v1/order/new' url = self.base_url + request params = { 'request': request, 'nonce': self.get_nonce(), 'symbol': symbol, 'amount': amount, 'price': price, 'side': side, 'type': type } if client_order_id is not None: params['client_order_id'] = client_order_id if options is not None: params['options'] = options return requests.post(url, headers=self.prepare(params))
python
def new_order(self, amount, price, side, client_order_id=None, symbol='btcusd', type='exchange limit', options=None): """ Send a request to place an order, return the response. Arguments: amount -- quoted decimal amount of BTC to purchase price -- quoted decimal amount of USD to spend per BTC side -- 'buy' or 'sell' client_order_id -- an optional client-specified order id (default None) symbol -- currency symbol (default 'btcusd') type -- the order type (default 'exchange limit') """ request = '/v1/order/new' url = self.base_url + request params = { 'request': request, 'nonce': self.get_nonce(), 'symbol': symbol, 'amount': amount, 'price': price, 'side': side, 'type': type } if client_order_id is not None: params['client_order_id'] = client_order_id if options is not None: params['options'] = options return requests.post(url, headers=self.prepare(params))
[ "def", "new_order", "(", "self", ",", "amount", ",", "price", ",", "side", ",", "client_order_id", "=", "None", ",", "symbol", "=", "'btcusd'", ",", "type", "=", "'exchange limit'", ",", "options", "=", "None", ")", ":", "request", "=", "'/v1/order/new'", "url", "=", "self", ".", "base_url", "+", "request", "params", "=", "{", "'request'", ":", "request", ",", "'nonce'", ":", "self", ".", "get_nonce", "(", ")", ",", "'symbol'", ":", "symbol", ",", "'amount'", ":", "amount", ",", "'price'", ":", "price", ",", "'side'", ":", "side", ",", "'type'", ":", "type", "}", "if", "client_order_id", "is", "not", "None", ":", "params", "[", "'client_order_id'", "]", "=", "client_order_id", "if", "options", "is", "not", "None", ":", "params", "[", "'options'", "]", "=", "options", "return", "requests", ".", "post", "(", "url", ",", "headers", "=", "self", ".", "prepare", "(", "params", ")", ")" ]
Send a request to place an order, return the response. Arguments: amount -- quoted decimal amount of BTC to purchase price -- quoted decimal amount of USD to spend per BTC side -- 'buy' or 'sell' client_order_id -- an optional client-specified order id (default None) symbol -- currency symbol (default 'btcusd') type -- the order type (default 'exchange limit')
[ "Send", "a", "request", "to", "place", "an", "order", "return", "the", "response", "." ]
0d83fe225b746ac4c8bb800aa6091e1b606231e8
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L122-L153
train
geminipy/geminipy
geminipy/__init__.py
Geminipy.cancel_order
def cancel_order(self, order_id): """ Send a request to cancel an order, return the response. Arguments: order_id - the order id to cancel """ request = '/v1/order/cancel' url = self.base_url + request params = { 'request': request, 'nonce': self.get_nonce(), 'order_id': order_id } return requests.post(url, headers=self.prepare(params))
python
def cancel_order(self, order_id): """ Send a request to cancel an order, return the response. Arguments: order_id - the order id to cancel """ request = '/v1/order/cancel' url = self.base_url + request params = { 'request': request, 'nonce': self.get_nonce(), 'order_id': order_id } return requests.post(url, headers=self.prepare(params))
[ "def", "cancel_order", "(", "self", ",", "order_id", ")", ":", "request", "=", "'/v1/order/cancel'", "url", "=", "self", ".", "base_url", "+", "request", "params", "=", "{", "'request'", ":", "request", ",", "'nonce'", ":", "self", ".", "get_nonce", "(", ")", ",", "'order_id'", ":", "order_id", "}", "return", "requests", ".", "post", "(", "url", ",", "headers", "=", "self", ".", "prepare", "(", "params", ")", ")" ]
Send a request to cancel an order, return the response. Arguments: order_id - the order id to cancel
[ "Send", "a", "request", "to", "cancel", "an", "order", "return", "the", "response", "." ]
0d83fe225b746ac4c8bb800aa6091e1b606231e8
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L155-L170
train
geminipy/geminipy
geminipy/__init__.py
Geminipy.past_trades
def past_trades(self, symbol='btcusd', limit_trades=50, timestamp=0): """ Send a trade history request, return the response. Arguements: symbol -- currency symbol (default 'btcusd') limit_trades -- maximum number of trades to return (default 50) timestamp -- only return trades after this unix timestamp (default 0) """ request = '/v1/mytrades' url = self.base_url + request params = { 'request': request, 'nonce': self.get_nonce(), 'symbol': symbol, 'limit_trades': limit_trades, 'timestamp': timestamp } return requests.post(url, headers=self.prepare(params))
python
def past_trades(self, symbol='btcusd', limit_trades=50, timestamp=0): """ Send a trade history request, return the response. Arguements: symbol -- currency symbol (default 'btcusd') limit_trades -- maximum number of trades to return (default 50) timestamp -- only return trades after this unix timestamp (default 0) """ request = '/v1/mytrades' url = self.base_url + request params = { 'request': request, 'nonce': self.get_nonce(), 'symbol': symbol, 'limit_trades': limit_trades, 'timestamp': timestamp } return requests.post(url, headers=self.prepare(params))
[ "def", "past_trades", "(", "self", ",", "symbol", "=", "'btcusd'", ",", "limit_trades", "=", "50", ",", "timestamp", "=", "0", ")", ":", "request", "=", "'/v1/mytrades'", "url", "=", "self", ".", "base_url", "+", "request", "params", "=", "{", "'request'", ":", "request", ",", "'nonce'", ":", "self", ".", "get_nonce", "(", ")", ",", "'symbol'", ":", "symbol", ",", "'limit_trades'", ":", "limit_trades", ",", "'timestamp'", ":", "timestamp", "}", "return", "requests", ".", "post", "(", "url", ",", "headers", "=", "self", ".", "prepare", "(", "params", ")", ")" ]
Send a trade history request, return the response. Arguements: symbol -- currency symbol (default 'btcusd') limit_trades -- maximum number of trades to return (default 50) timestamp -- only return trades after this unix timestamp (default 0)
[ "Send", "a", "trade", "history", "request", "return", "the", "response", "." ]
0d83fe225b746ac4c8bb800aa6091e1b606231e8
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L222-L241
train
geminipy/geminipy
geminipy/__init__.py
Geminipy.tradevolume
def tradevolume(self): """Send a request to get your trade volume, return the response.""" request = '/v1/tradevolume' url = self.base_url + request params = { 'request': request, 'nonce': self.get_nonce() } return requests.post(url, headers=self.prepare(params))
python
def tradevolume(self): """Send a request to get your trade volume, return the response.""" request = '/v1/tradevolume' url = self.base_url + request params = { 'request': request, 'nonce': self.get_nonce() } return requests.post(url, headers=self.prepare(params))
[ "def", "tradevolume", "(", "self", ")", ":", "request", "=", "'/v1/tradevolume'", "url", "=", "self", ".", "base_url", "+", "request", "params", "=", "{", "'request'", ":", "request", ",", "'nonce'", ":", "self", ".", "get_nonce", "(", ")", "}", "return", "requests", ".", "post", "(", "url", ",", "headers", "=", "self", ".", "prepare", "(", "params", ")", ")" ]
Send a request to get your trade volume, return the response.
[ "Send", "a", "request", "to", "get", "your", "trade", "volume", "return", "the", "response", "." ]
0d83fe225b746ac4c8bb800aa6091e1b606231e8
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L243-L252
train
geminipy/geminipy
geminipy/__init__.py
Geminipy.newAddress
def newAddress(self, currency='btc', label=''): """ Send a request for a new cryptocurrency deposit address with an optional label. Return the response. Arguements: currency -- a Gemini supported cryptocurrency (btc, eth) label -- optional label for the deposit address """ request = '/v1/deposit/' + currency + '/newAddress' url = self.base_url + request params = { 'request': request, 'nonce': self.get_nonce() } if label != '': params['label'] = label return requests.post(url, headers=self.prepare(params))
python
def newAddress(self, currency='btc', label=''): """ Send a request for a new cryptocurrency deposit address with an optional label. Return the response. Arguements: currency -- a Gemini supported cryptocurrency (btc, eth) label -- optional label for the deposit address """ request = '/v1/deposit/' + currency + '/newAddress' url = self.base_url + request params = { 'request': request, 'nonce': self.get_nonce() } if label != '': params['label'] = label return requests.post(url, headers=self.prepare(params))
[ "def", "newAddress", "(", "self", ",", "currency", "=", "'btc'", ",", "label", "=", "''", ")", ":", "request", "=", "'/v1/deposit/'", "+", "currency", "+", "'/newAddress'", "url", "=", "self", ".", "base_url", "+", "request", "params", "=", "{", "'request'", ":", "request", ",", "'nonce'", ":", "self", ".", "get_nonce", "(", ")", "}", "if", "label", "!=", "''", ":", "params", "[", "'label'", "]", "=", "label", "return", "requests", ".", "post", "(", "url", ",", "headers", "=", "self", ".", "prepare", "(", "params", ")", ")" ]
Send a request for a new cryptocurrency deposit address with an optional label. Return the response. Arguements: currency -- a Gemini supported cryptocurrency (btc, eth) label -- optional label for the deposit address
[ "Send", "a", "request", "for", "a", "new", "cryptocurrency", "deposit", "address", "with", "an", "optional", "label", ".", "Return", "the", "response", "." ]
0d83fe225b746ac4c8bb800aa6091e1b606231e8
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L265-L284
train
geminipy/geminipy
geminipy/__init__.py
Geminipy.prepare
def prepare(self, params): """ Prepare, return the required HTTP headers. Base 64 encode the parameters, sign it with the secret key, create the HTTP headers, return the whole payload. Arguments: params -- a dictionary of parameters """ jsonparams = json.dumps(params) payload = base64.b64encode(jsonparams.encode()) signature = hmac.new(self.secret_key.encode(), payload, hashlib.sha384).hexdigest() return {'X-GEMINI-APIKEY': self.api_key, 'X-GEMINI-PAYLOAD': payload, 'X-GEMINI-SIGNATURE': signature}
python
def prepare(self, params): """ Prepare, return the required HTTP headers. Base 64 encode the parameters, sign it with the secret key, create the HTTP headers, return the whole payload. Arguments: params -- a dictionary of parameters """ jsonparams = json.dumps(params) payload = base64.b64encode(jsonparams.encode()) signature = hmac.new(self.secret_key.encode(), payload, hashlib.sha384).hexdigest() return {'X-GEMINI-APIKEY': self.api_key, 'X-GEMINI-PAYLOAD': payload, 'X-GEMINI-SIGNATURE': signature}
[ "def", "prepare", "(", "self", ",", "params", ")", ":", "jsonparams", "=", "json", ".", "dumps", "(", "params", ")", "payload", "=", "base64", ".", "b64encode", "(", "jsonparams", ".", "encode", "(", ")", ")", "signature", "=", "hmac", ".", "new", "(", "self", ".", "secret_key", ".", "encode", "(", ")", ",", "payload", ",", "hashlib", ".", "sha384", ")", ".", "hexdigest", "(", ")", "return", "{", "'X-GEMINI-APIKEY'", ":", "self", ".", "api_key", ",", "'X-GEMINI-PAYLOAD'", ":", "payload", ",", "'X-GEMINI-SIGNATURE'", ":", "signature", "}" ]
Prepare, return the required HTTP headers. Base 64 encode the parameters, sign it with the secret key, create the HTTP headers, return the whole payload. Arguments: params -- a dictionary of parameters
[ "Prepare", "return", "the", "required", "HTTP", "headers", "." ]
0d83fe225b746ac4c8bb800aa6091e1b606231e8
https://github.com/geminipy/geminipy/blob/0d83fe225b746ac4c8bb800aa6091e1b606231e8/geminipy/__init__.py#L301-L318
train
kataev/flake8-rst
flake8_rst/sourceblock.py
SourceBlock.merge
def merge(cls, source_blocks): """Merge multiple SourceBlocks together""" if len(source_blocks) == 1: return source_blocks[0] source_blocks.sort(key=operator.attrgetter('start_line_number')) main_block = source_blocks[0] boot_lines = main_block.boot_lines source_lines = [source_line for source_block in source_blocks for source_line in source_block.source_lines] return cls(boot_lines, source_lines, directive=main_block.directive, language=main_block.language, roles=main_block.roles)
python
def merge(cls, source_blocks): """Merge multiple SourceBlocks together""" if len(source_blocks) == 1: return source_blocks[0] source_blocks.sort(key=operator.attrgetter('start_line_number')) main_block = source_blocks[0] boot_lines = main_block.boot_lines source_lines = [source_line for source_block in source_blocks for source_line in source_block.source_lines] return cls(boot_lines, source_lines, directive=main_block.directive, language=main_block.language, roles=main_block.roles)
[ "def", "merge", "(", "cls", ",", "source_blocks", ")", ":", "if", "len", "(", "source_blocks", ")", "==", "1", ":", "return", "source_blocks", "[", "0", "]", "source_blocks", ".", "sort", "(", "key", "=", "operator", ".", "attrgetter", "(", "'start_line_number'", ")", ")", "main_block", "=", "source_blocks", "[", "0", "]", "boot_lines", "=", "main_block", ".", "boot_lines", "source_lines", "=", "[", "source_line", "for", "source_block", "in", "source_blocks", "for", "source_line", "in", "source_block", ".", "source_lines", "]", "return", "cls", "(", "boot_lines", ",", "source_lines", ",", "directive", "=", "main_block", ".", "directive", ",", "language", "=", "main_block", ".", "language", ",", "roles", "=", "main_block", ".", "roles", ")" ]
Merge multiple SourceBlocks together
[ "Merge", "multiple", "SourceBlocks", "together" ]
ca6d41c7a309b9e8cd4fa6f428b82db96b6a986f
https://github.com/kataev/flake8-rst/blob/ca6d41c7a309b9e8cd4fa6f428b82db96b6a986f/flake8_rst/sourceblock.py#L72-L84
train
ehansis/ozelot
examples/superheroes/superheroes/analysis.py
character_summary_table
def character_summary_table(): """Export a table listing all characters and their data Output is a CSV file and an Excel file, saved as 'characters.csv/.xlsx' in the output directory. """ # a database client/session to run queries in cl = client.get_client() session = cl.create_session() # Define the query. Note that we need to rename the two joined-in name columns, # to make the labels intelligible and to not have two identical column names in the output. # Also, we need a left outer join on the place of birth (instead of the default left inner join) # if we want results for characters that have no place of birth set. query = session.query(models.Character, models.Universe.name.label('universe'), models.Place.name.label('place_of_birth')) \ .join(models.Character.universe) \ .outerjoin(models.Character.place_of_birth) # download all data as a pandas DataFrame, index it by the character ID characters = cl.df_query(query).set_index('id') # query the number of movie appearances per character query = session.query(sa.func.count(models.MovieAppearance.id).label('movie_appearances'), models.MovieAppearance.character_id) \ .group_by(models.MovieAppearance.character_id) appearances = cl.df_query(query).set_index('character_id') # join both tables, sort by name df = characters.join(appearances, how='left').sort_values(by='name') # drop the foreign key columns (have no meaning outside our DB) df = df.drop(['universe_id', 'place_of_birth_id'], axis=1) # write output as both CSV and Excel; do not include index column df.to_csv(path.join(out_dir, "characters.csv"), encoding='utf-8', index=False) df.to_excel(path.join(out_dir, "characters.xlsx"), encoding='utf-8', index=False) session.close()
python
def character_summary_table(): """Export a table listing all characters and their data Output is a CSV file and an Excel file, saved as 'characters.csv/.xlsx' in the output directory. """ # a database client/session to run queries in cl = client.get_client() session = cl.create_session() # Define the query. Note that we need to rename the two joined-in name columns, # to make the labels intelligible and to not have two identical column names in the output. # Also, we need a left outer join on the place of birth (instead of the default left inner join) # if we want results for characters that have no place of birth set. query = session.query(models.Character, models.Universe.name.label('universe'), models.Place.name.label('place_of_birth')) \ .join(models.Character.universe) \ .outerjoin(models.Character.place_of_birth) # download all data as a pandas DataFrame, index it by the character ID characters = cl.df_query(query).set_index('id') # query the number of movie appearances per character query = session.query(sa.func.count(models.MovieAppearance.id).label('movie_appearances'), models.MovieAppearance.character_id) \ .group_by(models.MovieAppearance.character_id) appearances = cl.df_query(query).set_index('character_id') # join both tables, sort by name df = characters.join(appearances, how='left').sort_values(by='name') # drop the foreign key columns (have no meaning outside our DB) df = df.drop(['universe_id', 'place_of_birth_id'], axis=1) # write output as both CSV and Excel; do not include index column df.to_csv(path.join(out_dir, "characters.csv"), encoding='utf-8', index=False) df.to_excel(path.join(out_dir, "characters.xlsx"), encoding='utf-8', index=False) session.close()
[ "def", "character_summary_table", "(", ")", ":", "# a database client/session to run queries in", "cl", "=", "client", ".", "get_client", "(", ")", "session", "=", "cl", ".", "create_session", "(", ")", "# Define the query. Note that we need to rename the two joined-in name columns,", "# to make the labels intelligible and to not have two identical column names in the output.", "# Also, we need a left outer join on the place of birth (instead of the default left inner join)", "# if we want results for characters that have no place of birth set.", "query", "=", "session", ".", "query", "(", "models", ".", "Character", ",", "models", ".", "Universe", ".", "name", ".", "label", "(", "'universe'", ")", ",", "models", ".", "Place", ".", "name", ".", "label", "(", "'place_of_birth'", ")", ")", ".", "join", "(", "models", ".", "Character", ".", "universe", ")", ".", "outerjoin", "(", "models", ".", "Character", ".", "place_of_birth", ")", "# download all data as a pandas DataFrame, index it by the character ID", "characters", "=", "cl", ".", "df_query", "(", "query", ")", ".", "set_index", "(", "'id'", ")", "# query the number of movie appearances per character", "query", "=", "session", ".", "query", "(", "sa", ".", "func", ".", "count", "(", "models", ".", "MovieAppearance", ".", "id", ")", ".", "label", "(", "'movie_appearances'", ")", ",", "models", ".", "MovieAppearance", ".", "character_id", ")", ".", "group_by", "(", "models", ".", "MovieAppearance", ".", "character_id", ")", "appearances", "=", "cl", ".", "df_query", "(", "query", ")", ".", "set_index", "(", "'character_id'", ")", "# join both tables, sort by name", "df", "=", "characters", ".", "join", "(", "appearances", ",", "how", "=", "'left'", ")", ".", "sort_values", "(", "by", "=", "'name'", ")", "# drop the foreign key columns (have no meaning outside our DB)", "df", "=", "df", ".", "drop", "(", "[", "'universe_id'", ",", "'place_of_birth_id'", "]", ",", "axis", "=", "1", ")", "# write output as both CSV and Excel; do not include index column", "df", ".", "to_csv", "(", "path", ".", "join", "(", "out_dir", ",", "\"characters.csv\"", ")", ",", "encoding", "=", "'utf-8'", ",", "index", "=", "False", ")", "df", ".", "to_excel", "(", "path", ".", "join", "(", "out_dir", ",", "\"characters.xlsx\"", ")", ",", "encoding", "=", "'utf-8'", ",", "index", "=", "False", ")", "session", ".", "close", "(", ")" ]
Export a table listing all characters and their data Output is a CSV file and an Excel file, saved as 'characters.csv/.xlsx' in the output directory.
[ "Export", "a", "table", "listing", "all", "characters", "and", "their", "data" ]
948675e02eb6fca940450f5cb814f53e97159e5b
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/superheroes/superheroes/analysis.py#L24-L63
train
ehansis/ozelot
examples/superheroes/superheroes/analysis.py
fig_to_svg
def fig_to_svg(fig): """Helper function to convert matplotlib figure to SVG string Returns: str: figure as SVG string """ buf = io.StringIO() fig.savefig(buf, format='svg') buf.seek(0) return buf.getvalue()
python
def fig_to_svg(fig): """Helper function to convert matplotlib figure to SVG string Returns: str: figure as SVG string """ buf = io.StringIO() fig.savefig(buf, format='svg') buf.seek(0) return buf.getvalue()
[ "def", "fig_to_svg", "(", "fig", ")", ":", "buf", "=", "io", ".", "StringIO", "(", ")", "fig", ".", "savefig", "(", "buf", ",", "format", "=", "'svg'", ")", "buf", ".", "seek", "(", "0", ")", "return", "buf", ".", "getvalue", "(", ")" ]
Helper function to convert matplotlib figure to SVG string Returns: str: figure as SVG string
[ "Helper", "function", "to", "convert", "matplotlib", "figure", "to", "SVG", "string" ]
948675e02eb6fca940450f5cb814f53e97159e5b
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/superheroes/superheroes/analysis.py#L66-L75
train
ehansis/ozelot
examples/superheroes/superheroes/analysis.py
movie_network
def movie_network(): """Generate interactive network graph of movie appearances Output is an html page, rendered to 'movie_network.html' in the output directory. """ # page template template = jenv.get_template("movie_network.html") # container for template context context = dict() # a database client/session to run queries in cl = client.get_client() session = cl.create_session() # # query data # # get all Movies query = session.query(models.Movie.id, models.Movie.name, models.Movie.url, models.Movie.budget_inflation_adjusted, models.Movie.imdb_rating) movies = cl.df_query(query) # get all Movie Appearances query = session.query(models.MovieAppearance.movie_id, models.MovieAppearance.character_id) appearances = cl.df_query(query) # get all Characters that have movie appearances query = session.query(models.Character.id, models.Character.url, models.Character.name) \ .filter(models.Character.id.in_([int(i) for i in appearances['character_id'].unique()])) characters = cl.df_query(query) # # transform to network graph # graph = dict(nodes=[], graph=[], # this stays empty links=[], directed=False, multigraph=True) # containers for lookups from movie/character IDs to node IDs movie_node_id = dict() character_node_id = dict() # normalization for movie node size: 100 = max budget movie_size_factor = 100. / movies['budget_inflation_adjusted'].max() # nodes for movies for _, data in movies.iterrows(): movie_node_id[data['id']] = len(graph['nodes']) # noinspection PyTypeChecker graph['nodes'].append(dict(id=data['name'], size=max(5., data['budget_inflation_adjusted'] * movie_size_factor), score=data['imdb_rating'] / 10., type='square', url="http://marvel.wikia.com" + data['url'])) # nodes for characters for _, data in characters.iterrows(): character_node_id[data['id']] = len(graph['nodes']) # noinspection PyTypeChecker graph['nodes'].append(dict(id=data['name'], size=10, type='circle', url="http://marvel.wikia.com" + data['url'])) # links: movie appearances for _, data in appearances.iterrows(): # noinspection PyTypeChecker graph['links'].append(dict(source=movie_node_id[data['movie_id']], target=character_node_id[data['character_id']])) context['graph'] = json.dumps(graph, indent=4) # # render template # out_file = path.join(out_dir, "movie_network.html") html_content = template.render(**context) with open(out_file, 'w') as f: f.write(html_content) # done, clean up plt.close('all') session.close()
python
def movie_network(): """Generate interactive network graph of movie appearances Output is an html page, rendered to 'movie_network.html' in the output directory. """ # page template template = jenv.get_template("movie_network.html") # container for template context context = dict() # a database client/session to run queries in cl = client.get_client() session = cl.create_session() # # query data # # get all Movies query = session.query(models.Movie.id, models.Movie.name, models.Movie.url, models.Movie.budget_inflation_adjusted, models.Movie.imdb_rating) movies = cl.df_query(query) # get all Movie Appearances query = session.query(models.MovieAppearance.movie_id, models.MovieAppearance.character_id) appearances = cl.df_query(query) # get all Characters that have movie appearances query = session.query(models.Character.id, models.Character.url, models.Character.name) \ .filter(models.Character.id.in_([int(i) for i in appearances['character_id'].unique()])) characters = cl.df_query(query) # # transform to network graph # graph = dict(nodes=[], graph=[], # this stays empty links=[], directed=False, multigraph=True) # containers for lookups from movie/character IDs to node IDs movie_node_id = dict() character_node_id = dict() # normalization for movie node size: 100 = max budget movie_size_factor = 100. / movies['budget_inflation_adjusted'].max() # nodes for movies for _, data in movies.iterrows(): movie_node_id[data['id']] = len(graph['nodes']) # noinspection PyTypeChecker graph['nodes'].append(dict(id=data['name'], size=max(5., data['budget_inflation_adjusted'] * movie_size_factor), score=data['imdb_rating'] / 10., type='square', url="http://marvel.wikia.com" + data['url'])) # nodes for characters for _, data in characters.iterrows(): character_node_id[data['id']] = len(graph['nodes']) # noinspection PyTypeChecker graph['nodes'].append(dict(id=data['name'], size=10, type='circle', url="http://marvel.wikia.com" + data['url'])) # links: movie appearances for _, data in appearances.iterrows(): # noinspection PyTypeChecker graph['links'].append(dict(source=movie_node_id[data['movie_id']], target=character_node_id[data['character_id']])) context['graph'] = json.dumps(graph, indent=4) # # render template # out_file = path.join(out_dir, "movie_network.html") html_content = template.render(**context) with open(out_file, 'w') as f: f.write(html_content) # done, clean up plt.close('all') session.close()
[ "def", "movie_network", "(", ")", ":", "# page template", "template", "=", "jenv", ".", "get_template", "(", "\"movie_network.html\"", ")", "# container for template context", "context", "=", "dict", "(", ")", "# a database client/session to run queries in", "cl", "=", "client", ".", "get_client", "(", ")", "session", "=", "cl", ".", "create_session", "(", ")", "#", "# query data", "#", "# get all Movies", "query", "=", "session", ".", "query", "(", "models", ".", "Movie", ".", "id", ",", "models", ".", "Movie", ".", "name", ",", "models", ".", "Movie", ".", "url", ",", "models", ".", "Movie", ".", "budget_inflation_adjusted", ",", "models", ".", "Movie", ".", "imdb_rating", ")", "movies", "=", "cl", ".", "df_query", "(", "query", ")", "# get all Movie Appearances", "query", "=", "session", ".", "query", "(", "models", ".", "MovieAppearance", ".", "movie_id", ",", "models", ".", "MovieAppearance", ".", "character_id", ")", "appearances", "=", "cl", ".", "df_query", "(", "query", ")", "# get all Characters that have movie appearances", "query", "=", "session", ".", "query", "(", "models", ".", "Character", ".", "id", ",", "models", ".", "Character", ".", "url", ",", "models", ".", "Character", ".", "name", ")", ".", "filter", "(", "models", ".", "Character", ".", "id", ".", "in_", "(", "[", "int", "(", "i", ")", "for", "i", "in", "appearances", "[", "'character_id'", "]", ".", "unique", "(", ")", "]", ")", ")", "characters", "=", "cl", ".", "df_query", "(", "query", ")", "#", "# transform to network graph", "#", "graph", "=", "dict", "(", "nodes", "=", "[", "]", ",", "graph", "=", "[", "]", ",", "# this stays empty", "links", "=", "[", "]", ",", "directed", "=", "False", ",", "multigraph", "=", "True", ")", "# containers for lookups from movie/character IDs to node IDs", "movie_node_id", "=", "dict", "(", ")", "character_node_id", "=", "dict", "(", ")", "# normalization for movie node size: 100 = max budget", "movie_size_factor", "=", "100.", "/", "movies", "[", "'budget_inflation_adjusted'", "]", ".", "max", "(", ")", "# nodes for movies", "for", "_", ",", "data", "in", "movies", ".", "iterrows", "(", ")", ":", "movie_node_id", "[", "data", "[", "'id'", "]", "]", "=", "len", "(", "graph", "[", "'nodes'", "]", ")", "# noinspection PyTypeChecker", "graph", "[", "'nodes'", "]", ".", "append", "(", "dict", "(", "id", "=", "data", "[", "'name'", "]", ",", "size", "=", "max", "(", "5.", ",", "data", "[", "'budget_inflation_adjusted'", "]", "*", "movie_size_factor", ")", ",", "score", "=", "data", "[", "'imdb_rating'", "]", "/", "10.", ",", "type", "=", "'square'", ",", "url", "=", "\"http://marvel.wikia.com\"", "+", "data", "[", "'url'", "]", ")", ")", "# nodes for characters", "for", "_", ",", "data", "in", "characters", ".", "iterrows", "(", ")", ":", "character_node_id", "[", "data", "[", "'id'", "]", "]", "=", "len", "(", "graph", "[", "'nodes'", "]", ")", "# noinspection PyTypeChecker", "graph", "[", "'nodes'", "]", ".", "append", "(", "dict", "(", "id", "=", "data", "[", "'name'", "]", ",", "size", "=", "10", ",", "type", "=", "'circle'", ",", "url", "=", "\"http://marvel.wikia.com\"", "+", "data", "[", "'url'", "]", ")", ")", "# links: movie appearances", "for", "_", ",", "data", "in", "appearances", ".", "iterrows", "(", ")", ":", "# noinspection PyTypeChecker", "graph", "[", "'links'", "]", ".", "append", "(", "dict", "(", "source", "=", "movie_node_id", "[", "data", "[", "'movie_id'", "]", "]", ",", "target", "=", "character_node_id", "[", "data", "[", "'character_id'", "]", "]", ")", ")", "context", "[", "'graph'", "]", "=", "json", ".", "dumps", "(", "graph", ",", "indent", "=", "4", ")", "#", "# render template", "#", "out_file", "=", "path", ".", "join", "(", "out_dir", ",", "\"movie_network.html\"", ")", "html_content", "=", "template", ".", "render", "(", "*", "*", "context", ")", "with", "open", "(", "out_file", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "html_content", ")", "# done, clean up", "plt", ".", "close", "(", "'all'", ")", "session", ".", "close", "(", ")" ]
Generate interactive network graph of movie appearances Output is an html page, rendered to 'movie_network.html' in the output directory.
[ "Generate", "interactive", "network", "graph", "of", "movie", "appearances" ]
948675e02eb6fca940450f5cb814f53e97159e5b
https://github.com/ehansis/ozelot/blob/948675e02eb6fca940450f5cb814f53e97159e5b/examples/superheroes/superheroes/analysis.py#L204-L298
train
maljovec/topopy
docs/_static/logo_generator.py
unpack2D
def unpack2D(_x): """ Helper function for splitting 2D data into x and y component to make equations simpler """ _x = np.atleast_2d(_x) x = _x[:, 0] y = _x[:, 1] return x, y
python
def unpack2D(_x): """ Helper function for splitting 2D data into x and y component to make equations simpler """ _x = np.atleast_2d(_x) x = _x[:, 0] y = _x[:, 1] return x, y
[ "def", "unpack2D", "(", "_x", ")", ":", "_x", "=", "np", ".", "atleast_2d", "(", "_x", ")", "x", "=", "_x", "[", ":", ",", "0", "]", "y", "=", "_x", "[", ":", ",", "1", "]", "return", "x", ",", "y" ]
Helper function for splitting 2D data into x and y component to make equations simpler
[ "Helper", "function", "for", "splitting", "2D", "data", "into", "x", "and", "y", "component", "to", "make", "equations", "simpler" ]
4be598d51c4e4043b73d4ad44beed6d289e2f088
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/docs/_static/logo_generator.py#L48-L56
train
albertz/py_better_exchook
better_exchook.py
is_at_exit
def is_at_exit(): """ Some heuristics to figure out whether this is called at a stage where the Python interpreter is shutting down. :return: whether the Python interpreter is currently in the process of shutting down :rtype: bool """ if _threading_main_thread is not None: if not hasattr(threading, "main_thread"): return True if threading.main_thread() != _threading_main_thread: return True if not _threading_main_thread.is_alive(): return True return False
python
def is_at_exit(): """ Some heuristics to figure out whether this is called at a stage where the Python interpreter is shutting down. :return: whether the Python interpreter is currently in the process of shutting down :rtype: bool """ if _threading_main_thread is not None: if not hasattr(threading, "main_thread"): return True if threading.main_thread() != _threading_main_thread: return True if not _threading_main_thread.is_alive(): return True return False
[ "def", "is_at_exit", "(", ")", ":", "if", "_threading_main_thread", "is", "not", "None", ":", "if", "not", "hasattr", "(", "threading", ",", "\"main_thread\"", ")", ":", "return", "True", "if", "threading", ".", "main_thread", "(", ")", "!=", "_threading_main_thread", ":", "return", "True", "if", "not", "_threading_main_thread", ".", "is_alive", "(", ")", ":", "return", "True", "return", "False" ]
Some heuristics to figure out whether this is called at a stage where the Python interpreter is shutting down. :return: whether the Python interpreter is currently in the process of shutting down :rtype: bool
[ "Some", "heuristics", "to", "figure", "out", "whether", "this", "is", "called", "at", "a", "stage", "where", "the", "Python", "interpreter", "is", "shutting", "down", "." ]
3d524a027d7fc4e83e47e39a1978849561da69b3
https://github.com/albertz/py_better_exchook/blob/3d524a027d7fc4e83e47e39a1978849561da69b3/better_exchook.py#L836-L850
train
albertz/py_better_exchook
better_exchook.py
better_exchook
def better_exchook(etype, value, tb, debugshell=False, autodebugshell=True, file=None, with_color=None): """ Replacement for sys.excepthook. :param etype: exception type :param value: exception value :param tb: traceback :param bool debugshell: spawn a debug shell at the context of the exception :param bool autodebugshell: if env DEBUG is an integer != 0, it will spawn a debug shell :param io.TextIOBase|io.StringIO file: output stream where we will print the traceback and exception information. stderr by default. :param bool|None with_color: whether to use ANSI escape codes for colored output """ if file is None: file = sys.stderr def output(ln): """ :param str ln: :return: nothing, prints to ``file`` """ file.write(ln + "\n") color = Color(enable=with_color) output(color("EXCEPTION", color.fg_colors[1], bold=True)) all_locals, all_globals = {}, {} if tb is not None: print_tb(tb, allLocals=all_locals, allGlobals=all_globals, file=file, withTitle=True, with_color=color.enable) else: output(color("better_exchook: traceback unknown", color.fg_colors[1])) import types # noinspection PyShadowingNames def _some_str(value): """ :param object value: :rtype: str """ # noinspection PyBroadException try: return str(value) except Exception: return '<unprintable %s object>' % type(value).__name__ # noinspection PyShadowingNames def _format_final_exc_line(etype, value): value_str = _some_str(value) if value is None or not value_str: line = color("%s" % etype, color.fg_colors[1]) else: line = color("%s" % etype, color.fg_colors[1]) + ": %s" % (value_str,) return line # noinspection PyUnresolvedReferences if (isinstance(etype, BaseException) or (hasattr(types, "InstanceType") and isinstance(etype, types.InstanceType)) or etype is None or type(etype) is str): output(_format_final_exc_line(etype, value)) else: output(_format_final_exc_line(etype.__name__, value)) if autodebugshell: # noinspection PyBroadException try: debugshell = int(os.environ["DEBUG"]) != 0 except Exception: pass if debugshell: output("---------- DEBUG SHELL -----------") debug_shell(user_ns=all_locals, user_global_ns=all_globals, traceback=tb) file.flush()
python
def better_exchook(etype, value, tb, debugshell=False, autodebugshell=True, file=None, with_color=None): """ Replacement for sys.excepthook. :param etype: exception type :param value: exception value :param tb: traceback :param bool debugshell: spawn a debug shell at the context of the exception :param bool autodebugshell: if env DEBUG is an integer != 0, it will spawn a debug shell :param io.TextIOBase|io.StringIO file: output stream where we will print the traceback and exception information. stderr by default. :param bool|None with_color: whether to use ANSI escape codes for colored output """ if file is None: file = sys.stderr def output(ln): """ :param str ln: :return: nothing, prints to ``file`` """ file.write(ln + "\n") color = Color(enable=with_color) output(color("EXCEPTION", color.fg_colors[1], bold=True)) all_locals, all_globals = {}, {} if tb is not None: print_tb(tb, allLocals=all_locals, allGlobals=all_globals, file=file, withTitle=True, with_color=color.enable) else: output(color("better_exchook: traceback unknown", color.fg_colors[1])) import types # noinspection PyShadowingNames def _some_str(value): """ :param object value: :rtype: str """ # noinspection PyBroadException try: return str(value) except Exception: return '<unprintable %s object>' % type(value).__name__ # noinspection PyShadowingNames def _format_final_exc_line(etype, value): value_str = _some_str(value) if value is None or not value_str: line = color("%s" % etype, color.fg_colors[1]) else: line = color("%s" % etype, color.fg_colors[1]) + ": %s" % (value_str,) return line # noinspection PyUnresolvedReferences if (isinstance(etype, BaseException) or (hasattr(types, "InstanceType") and isinstance(etype, types.InstanceType)) or etype is None or type(etype) is str): output(_format_final_exc_line(etype, value)) else: output(_format_final_exc_line(etype.__name__, value)) if autodebugshell: # noinspection PyBroadException try: debugshell = int(os.environ["DEBUG"]) != 0 except Exception: pass if debugshell: output("---------- DEBUG SHELL -----------") debug_shell(user_ns=all_locals, user_global_ns=all_globals, traceback=tb) file.flush()
[ "def", "better_exchook", "(", "etype", ",", "value", ",", "tb", ",", "debugshell", "=", "False", ",", "autodebugshell", "=", "True", ",", "file", "=", "None", ",", "with_color", "=", "None", ")", ":", "if", "file", "is", "None", ":", "file", "=", "sys", ".", "stderr", "def", "output", "(", "ln", ")", ":", "\"\"\"\n :param str ln:\n :return: nothing, prints to ``file``\n \"\"\"", "file", ".", "write", "(", "ln", "+", "\"\\n\"", ")", "color", "=", "Color", "(", "enable", "=", "with_color", ")", "output", "(", "color", "(", "\"EXCEPTION\"", ",", "color", ".", "fg_colors", "[", "1", "]", ",", "bold", "=", "True", ")", ")", "all_locals", ",", "all_globals", "=", "{", "}", ",", "{", "}", "if", "tb", "is", "not", "None", ":", "print_tb", "(", "tb", ",", "allLocals", "=", "all_locals", ",", "allGlobals", "=", "all_globals", ",", "file", "=", "file", ",", "withTitle", "=", "True", ",", "with_color", "=", "color", ".", "enable", ")", "else", ":", "output", "(", "color", "(", "\"better_exchook: traceback unknown\"", ",", "color", ".", "fg_colors", "[", "1", "]", ")", ")", "import", "types", "# noinspection PyShadowingNames", "def", "_some_str", "(", "value", ")", ":", "\"\"\"\n :param object value:\n :rtype: str\n \"\"\"", "# noinspection PyBroadException", "try", ":", "return", "str", "(", "value", ")", "except", "Exception", ":", "return", "'<unprintable %s object>'", "%", "type", "(", "value", ")", ".", "__name__", "# noinspection PyShadowingNames", "def", "_format_final_exc_line", "(", "etype", ",", "value", ")", ":", "value_str", "=", "_some_str", "(", "value", ")", "if", "value", "is", "None", "or", "not", "value_str", ":", "line", "=", "color", "(", "\"%s\"", "%", "etype", ",", "color", ".", "fg_colors", "[", "1", "]", ")", "else", ":", "line", "=", "color", "(", "\"%s\"", "%", "etype", ",", "color", ".", "fg_colors", "[", "1", "]", ")", "+", "\": %s\"", "%", "(", "value_str", ",", ")", "return", "line", "# noinspection PyUnresolvedReferences", "if", "(", "isinstance", "(", "etype", ",", "BaseException", ")", "or", "(", "hasattr", "(", "types", ",", "\"InstanceType\"", ")", "and", "isinstance", "(", "etype", ",", "types", ".", "InstanceType", ")", ")", "or", "etype", "is", "None", "or", "type", "(", "etype", ")", "is", "str", ")", ":", "output", "(", "_format_final_exc_line", "(", "etype", ",", "value", ")", ")", "else", ":", "output", "(", "_format_final_exc_line", "(", "etype", ".", "__name__", ",", "value", ")", ")", "if", "autodebugshell", ":", "# noinspection PyBroadException", "try", ":", "debugshell", "=", "int", "(", "os", ".", "environ", "[", "\"DEBUG\"", "]", ")", "!=", "0", "except", "Exception", ":", "pass", "if", "debugshell", ":", "output", "(", "\"---------- DEBUG SHELL -----------\"", ")", "debug_shell", "(", "user_ns", "=", "all_locals", ",", "user_global_ns", "=", "all_globals", ",", "traceback", "=", "tb", ")", "file", ".", "flush", "(", ")" ]
Replacement for sys.excepthook. :param etype: exception type :param value: exception value :param tb: traceback :param bool debugshell: spawn a debug shell at the context of the exception :param bool autodebugshell: if env DEBUG is an integer != 0, it will spawn a debug shell :param io.TextIOBase|io.StringIO file: output stream where we will print the traceback and exception information. stderr by default. :param bool|None with_color: whether to use ANSI escape codes for colored output
[ "Replacement", "for", "sys", ".", "excepthook", "." ]
3d524a027d7fc4e83e47e39a1978849561da69b3
https://github.com/albertz/py_better_exchook/blob/3d524a027d7fc4e83e47e39a1978849561da69b3/better_exchook.py#L1173-L1244
train
albertz/py_better_exchook
better_exchook.py
dump_all_thread_tracebacks
def dump_all_thread_tracebacks(exclude_thread_ids=None, file=None): """ Prints the traceback of all threads. :param set[int]|list[int]|None exclude_thread_ids: threads to exclude :param io.TextIOBase|io.StringIO file: output stream """ if exclude_thread_ids is None: exclude_thread_ids = [] if not file: file = sys.stdout import threading if hasattr(sys, "_current_frames"): print("", file=file) threads = {t.ident: t for t in threading.enumerate()} # noinspection PyProtectedMember for tid, stack in sys._current_frames().items(): if tid in exclude_thread_ids: continue # This is a bug in earlier Python versions. # http://bugs.python.org/issue17094 # Note that this leaves out all threads not created via the threading module. if tid not in threads: continue tags = [] thread = threads.get(tid) if thread: assert isinstance(thread, threading.Thread) if thread is threading.currentThread(): tags += ["current"] # noinspection PyProtectedMember,PyUnresolvedReferences if isinstance(thread, threading._MainThread): tags += ["main"] tags += [str(thread)] else: tags += ["unknown with id %i" % tid] print("Thread %s:" % ", ".join(tags), file=file) print_tb(stack, file=file) print("", file=file) print("That were all threads.", file=file) else: print("Does not have sys._current_frames, cannot get thread tracebacks.", file=file)
python
def dump_all_thread_tracebacks(exclude_thread_ids=None, file=None): """ Prints the traceback of all threads. :param set[int]|list[int]|None exclude_thread_ids: threads to exclude :param io.TextIOBase|io.StringIO file: output stream """ if exclude_thread_ids is None: exclude_thread_ids = [] if not file: file = sys.stdout import threading if hasattr(sys, "_current_frames"): print("", file=file) threads = {t.ident: t for t in threading.enumerate()} # noinspection PyProtectedMember for tid, stack in sys._current_frames().items(): if tid in exclude_thread_ids: continue # This is a bug in earlier Python versions. # http://bugs.python.org/issue17094 # Note that this leaves out all threads not created via the threading module. if tid not in threads: continue tags = [] thread = threads.get(tid) if thread: assert isinstance(thread, threading.Thread) if thread is threading.currentThread(): tags += ["current"] # noinspection PyProtectedMember,PyUnresolvedReferences if isinstance(thread, threading._MainThread): tags += ["main"] tags += [str(thread)] else: tags += ["unknown with id %i" % tid] print("Thread %s:" % ", ".join(tags), file=file) print_tb(stack, file=file) print("", file=file) print("That were all threads.", file=file) else: print("Does not have sys._current_frames, cannot get thread tracebacks.", file=file)
[ "def", "dump_all_thread_tracebacks", "(", "exclude_thread_ids", "=", "None", ",", "file", "=", "None", ")", ":", "if", "exclude_thread_ids", "is", "None", ":", "exclude_thread_ids", "=", "[", "]", "if", "not", "file", ":", "file", "=", "sys", ".", "stdout", "import", "threading", "if", "hasattr", "(", "sys", ",", "\"_current_frames\"", ")", ":", "print", "(", "\"\"", ",", "file", "=", "file", ")", "threads", "=", "{", "t", ".", "ident", ":", "t", "for", "t", "in", "threading", ".", "enumerate", "(", ")", "}", "# noinspection PyProtectedMember", "for", "tid", ",", "stack", "in", "sys", ".", "_current_frames", "(", ")", ".", "items", "(", ")", ":", "if", "tid", "in", "exclude_thread_ids", ":", "continue", "# This is a bug in earlier Python versions.", "# http://bugs.python.org/issue17094", "# Note that this leaves out all threads not created via the threading module.", "if", "tid", "not", "in", "threads", ":", "continue", "tags", "=", "[", "]", "thread", "=", "threads", ".", "get", "(", "tid", ")", "if", "thread", ":", "assert", "isinstance", "(", "thread", ",", "threading", ".", "Thread", ")", "if", "thread", "is", "threading", ".", "currentThread", "(", ")", ":", "tags", "+=", "[", "\"current\"", "]", "# noinspection PyProtectedMember,PyUnresolvedReferences", "if", "isinstance", "(", "thread", ",", "threading", ".", "_MainThread", ")", ":", "tags", "+=", "[", "\"main\"", "]", "tags", "+=", "[", "str", "(", "thread", ")", "]", "else", ":", "tags", "+=", "[", "\"unknown with id %i\"", "%", "tid", "]", "print", "(", "\"Thread %s:\"", "%", "\", \"", ".", "join", "(", "tags", ")", ",", "file", "=", "file", ")", "print_tb", "(", "stack", ",", "file", "=", "file", ")", "print", "(", "\"\"", ",", "file", "=", "file", ")", "print", "(", "\"That were all threads.\"", ",", "file", "=", "file", ")", "else", ":", "print", "(", "\"Does not have sys._current_frames, cannot get thread tracebacks.\"", ",", "file", "=", "file", ")" ]
Prints the traceback of all threads. :param set[int]|list[int]|None exclude_thread_ids: threads to exclude :param io.TextIOBase|io.StringIO file: output stream
[ "Prints", "the", "traceback", "of", "all", "threads", "." ]
3d524a027d7fc4e83e47e39a1978849561da69b3
https://github.com/albertz/py_better_exchook/blob/3d524a027d7fc4e83e47e39a1978849561da69b3/better_exchook.py#L1247-L1289
train
albertz/py_better_exchook
better_exchook.py
_main
def _main(): """ Some demo. """ if sys.argv[1:] == ["test"]: for k, v in sorted(globals().items()): if not k.startswith("test_"): continue print("running: %s()" % k) v() print("ok.") sys.exit() elif sys.argv[1:] == ["debug_shell"]: debug_shell(locals(), globals()) sys.exit() elif sys.argv[1:] == ["debug_shell_exception"]: try: raise Exception("demo exception") except Exception: better_exchook(*sys.exc_info(), debugshell=True) sys.exit() elif sys.argv[1:]: print("Usage: %s (test|...)" % sys.argv[0]) sys.exit(1) # some examples # this code produces this output: https://gist.github.com/922622 try: x = {1: 2, "a": "b"} # noinspection PyMissingOrEmptyDocstring def f(): y = "foo" # noinspection PyUnresolvedReferences,PyStatementEffect x, 42, sys.stdin.__class__, sys.exc_info, y, z f() except Exception: better_exchook(*sys.exc_info()) try: # noinspection PyArgumentList (lambda _x: None)(__name__, 42) # multiline except Exception: better_exchook(*sys.exc_info()) try: class Obj: def __repr__(self): return ( "<Obj multi-\n" + " line repr>") obj = Obj() assert not obj except Exception: better_exchook(*sys.exc_info()) # noinspection PyMissingOrEmptyDocstring def f1(a): f2(a + 1, 2) # noinspection PyMissingOrEmptyDocstring def f2(a, b): f3(a + b) # noinspection PyMissingOrEmptyDocstring def f3(a): b = ("abc" * 100) + "-interesting" # some long demo str a(b) # error, not callable try: f1(13) except Exception: better_exchook(*sys.exc_info()) # use this to overwrite the global exception handler install() # and fail # noinspection PyUnresolvedReferences finalfail(sys)
python
def _main(): """ Some demo. """ if sys.argv[1:] == ["test"]: for k, v in sorted(globals().items()): if not k.startswith("test_"): continue print("running: %s()" % k) v() print("ok.") sys.exit() elif sys.argv[1:] == ["debug_shell"]: debug_shell(locals(), globals()) sys.exit() elif sys.argv[1:] == ["debug_shell_exception"]: try: raise Exception("demo exception") except Exception: better_exchook(*sys.exc_info(), debugshell=True) sys.exit() elif sys.argv[1:]: print("Usage: %s (test|...)" % sys.argv[0]) sys.exit(1) # some examples # this code produces this output: https://gist.github.com/922622 try: x = {1: 2, "a": "b"} # noinspection PyMissingOrEmptyDocstring def f(): y = "foo" # noinspection PyUnresolvedReferences,PyStatementEffect x, 42, sys.stdin.__class__, sys.exc_info, y, z f() except Exception: better_exchook(*sys.exc_info()) try: # noinspection PyArgumentList (lambda _x: None)(__name__, 42) # multiline except Exception: better_exchook(*sys.exc_info()) try: class Obj: def __repr__(self): return ( "<Obj multi-\n" + " line repr>") obj = Obj() assert not obj except Exception: better_exchook(*sys.exc_info()) # noinspection PyMissingOrEmptyDocstring def f1(a): f2(a + 1, 2) # noinspection PyMissingOrEmptyDocstring def f2(a, b): f3(a + b) # noinspection PyMissingOrEmptyDocstring def f3(a): b = ("abc" * 100) + "-interesting" # some long demo str a(b) # error, not callable try: f1(13) except Exception: better_exchook(*sys.exc_info()) # use this to overwrite the global exception handler install() # and fail # noinspection PyUnresolvedReferences finalfail(sys)
[ "def", "_main", "(", ")", ":", "if", "sys", ".", "argv", "[", "1", ":", "]", "==", "[", "\"test\"", "]", ":", "for", "k", ",", "v", "in", "sorted", "(", "globals", "(", ")", ".", "items", "(", ")", ")", ":", "if", "not", "k", ".", "startswith", "(", "\"test_\"", ")", ":", "continue", "print", "(", "\"running: %s()\"", "%", "k", ")", "v", "(", ")", "print", "(", "\"ok.\"", ")", "sys", ".", "exit", "(", ")", "elif", "sys", ".", "argv", "[", "1", ":", "]", "==", "[", "\"debug_shell\"", "]", ":", "debug_shell", "(", "locals", "(", ")", ",", "globals", "(", ")", ")", "sys", ".", "exit", "(", ")", "elif", "sys", ".", "argv", "[", "1", ":", "]", "==", "[", "\"debug_shell_exception\"", "]", ":", "try", ":", "raise", "Exception", "(", "\"demo exception\"", ")", "except", "Exception", ":", "better_exchook", "(", "*", "sys", ".", "exc_info", "(", ")", ",", "debugshell", "=", "True", ")", "sys", ".", "exit", "(", ")", "elif", "sys", ".", "argv", "[", "1", ":", "]", ":", "print", "(", "\"Usage: %s (test|...)\"", "%", "sys", ".", "argv", "[", "0", "]", ")", "sys", ".", "exit", "(", "1", ")", "# some examples", "# this code produces this output: https://gist.github.com/922622", "try", ":", "x", "=", "{", "1", ":", "2", ",", "\"a\"", ":", "\"b\"", "}", "# noinspection PyMissingOrEmptyDocstring", "def", "f", "(", ")", ":", "y", "=", "\"foo\"", "# noinspection PyUnresolvedReferences,PyStatementEffect", "x", ",", "42", ",", "sys", ".", "stdin", ".", "__class__", ",", "sys", ".", "exc_info", ",", "y", ",", "z", "f", "(", ")", "except", "Exception", ":", "better_exchook", "(", "*", "sys", ".", "exc_info", "(", ")", ")", "try", ":", "# noinspection PyArgumentList", "(", "lambda", "_x", ":", "None", ")", "(", "__name__", ",", "42", ")", "# multiline", "except", "Exception", ":", "better_exchook", "(", "*", "sys", ".", "exc_info", "(", ")", ")", "try", ":", "class", "Obj", ":", "def", "__repr__", "(", "self", ")", ":", "return", "(", "\"<Obj multi-\\n\"", "+", "\" line repr>\"", ")", "obj", "=", "Obj", "(", ")", "assert", "not", "obj", "except", "Exception", ":", "better_exchook", "(", "*", "sys", ".", "exc_info", "(", ")", ")", "# noinspection PyMissingOrEmptyDocstring", "def", "f1", "(", "a", ")", ":", "f2", "(", "a", "+", "1", ",", "2", ")", "# noinspection PyMissingOrEmptyDocstring", "def", "f2", "(", "a", ",", "b", ")", ":", "f3", "(", "a", "+", "b", ")", "# noinspection PyMissingOrEmptyDocstring", "def", "f3", "(", "a", ")", ":", "b", "=", "(", "\"abc\"", "*", "100", ")", "+", "\"-interesting\"", "# some long demo str", "a", "(", "b", ")", "# error, not callable", "try", ":", "f1", "(", "13", ")", "except", "Exception", ":", "better_exchook", "(", "*", "sys", ".", "exc_info", "(", ")", ")", "# use this to overwrite the global exception handler", "install", "(", ")", "# and fail", "# noinspection PyUnresolvedReferences", "finalfail", "(", "sys", ")" ]
Some demo.
[ "Some", "demo", "." ]
3d524a027d7fc4e83e47e39a1978849561da69b3
https://github.com/albertz/py_better_exchook/blob/3d524a027d7fc4e83e47e39a1978849561da69b3/better_exchook.py#L1502-L1586
train
thorgate/django-esteid
esteid/signature.py
verify_mid_signature
def verify_mid_signature(certificate_data, sp_challenge, response_challenge, signature): """ Verify mobile id Authentication signature is valid :param certificate_data: binary certificate data, from 'CertificateData' field :param sp_challenge: binary challenge sent via 'SPChallenge' field :param response_challenge: response challenge, from 'Challenge' field :param signature: response signature :return: """ if not response_challenge.startswith(sp_challenge): return False try: key = RSA.importKey(certificate_data) verifier = PKCS1_v1_5.new(key) except ValueError: key = ECC.import_key(certificate_data) verifier = DSS.new(key, 'deterministic-rfc6979') digest = PrehashedMessageData(response_challenge) try: verifier.verify(digest, signature) return True except ValueError: return False
python
def verify_mid_signature(certificate_data, sp_challenge, response_challenge, signature): """ Verify mobile id Authentication signature is valid :param certificate_data: binary certificate data, from 'CertificateData' field :param sp_challenge: binary challenge sent via 'SPChallenge' field :param response_challenge: response challenge, from 'Challenge' field :param signature: response signature :return: """ if not response_challenge.startswith(sp_challenge): return False try: key = RSA.importKey(certificate_data) verifier = PKCS1_v1_5.new(key) except ValueError: key = ECC.import_key(certificate_data) verifier = DSS.new(key, 'deterministic-rfc6979') digest = PrehashedMessageData(response_challenge) try: verifier.verify(digest, signature) return True except ValueError: return False
[ "def", "verify_mid_signature", "(", "certificate_data", ",", "sp_challenge", ",", "response_challenge", ",", "signature", ")", ":", "if", "not", "response_challenge", ".", "startswith", "(", "sp_challenge", ")", ":", "return", "False", "try", ":", "key", "=", "RSA", ".", "importKey", "(", "certificate_data", ")", "verifier", "=", "PKCS1_v1_5", ".", "new", "(", "key", ")", "except", "ValueError", ":", "key", "=", "ECC", ".", "import_key", "(", "certificate_data", ")", "verifier", "=", "DSS", ".", "new", "(", "key", ",", "'deterministic-rfc6979'", ")", "digest", "=", "PrehashedMessageData", "(", "response_challenge", ")", "try", ":", "verifier", ".", "verify", "(", "digest", ",", "signature", ")", "return", "True", "except", "ValueError", ":", "return", "False" ]
Verify mobile id Authentication signature is valid :param certificate_data: binary certificate data, from 'CertificateData' field :param sp_challenge: binary challenge sent via 'SPChallenge' field :param response_challenge: response challenge, from 'Challenge' field :param signature: response signature :return:
[ "Verify", "mobile", "id", "Authentication", "signature", "is", "valid" ]
407ae513e357fedea0e3e42198df8eb9d9ff0646
https://github.com/thorgate/django-esteid/blob/407ae513e357fedea0e3e42198df8eb9d9ff0646/esteid/signature.py#L23-L52
train
untwisted/untwisted
untwisted/dispatcher.py
Dispatcher.drive
def drive(self, event, *args): """ Used to dispatch events. """ maps = self.base.get(event, self.step) for handle, data in maps[:]: params = args + data try: handle(self, *params) except Stop: break except StopIteration: pass except Kill as Root: raise except Erase: maps.remove((handle, data)) except Exception as e: debug(event, params) for handle in self.pool: handle(self, event, args)
python
def drive(self, event, *args): """ Used to dispatch events. """ maps = self.base.get(event, self.step) for handle, data in maps[:]: params = args + data try: handle(self, *params) except Stop: break except StopIteration: pass except Kill as Root: raise except Erase: maps.remove((handle, data)) except Exception as e: debug(event, params) for handle in self.pool: handle(self, event, args)
[ "def", "drive", "(", "self", ",", "event", ",", "*", "args", ")", ":", "maps", "=", "self", ".", "base", ".", "get", "(", "event", ",", "self", ".", "step", ")", "for", "handle", ",", "data", "in", "maps", "[", ":", "]", ":", "params", "=", "args", "+", "data", "try", ":", "handle", "(", "self", ",", "*", "params", ")", "except", "Stop", ":", "break", "except", "StopIteration", ":", "pass", "except", "Kill", "as", "Root", ":", "raise", "except", "Erase", ":", "maps", ".", "remove", "(", "(", "handle", ",", "data", ")", ")", "except", "Exception", "as", "e", ":", "debug", "(", "event", ",", "params", ")", "for", "handle", "in", "self", ".", "pool", ":", "handle", "(", "self", ",", "event", ",", "args", ")" ]
Used to dispatch events.
[ "Used", "to", "dispatch", "events", "." ]
8a8d9c8a8d0f3452d5de67cd760297bb5759f637
https://github.com/untwisted/untwisted/blob/8a8d9c8a8d0f3452d5de67cd760297bb5759f637/untwisted/dispatcher.py#L16-L38
train
untwisted/untwisted
untwisted/expect.py
Expect.send
def send(self, data): """ Send data to the child process through. """ self.stdin.write(data) self.stdin.flush()
python
def send(self, data): """ Send data to the child process through. """ self.stdin.write(data) self.stdin.flush()
[ "def", "send", "(", "self", ",", "data", ")", ":", "self", ".", "stdin", ".", "write", "(", "data", ")", "self", ".", "stdin", ".", "flush", "(", ")" ]
Send data to the child process through.
[ "Send", "data", "to", "the", "child", "process", "through", "." ]
8a8d9c8a8d0f3452d5de67cd760297bb5759f637
https://github.com/untwisted/untwisted/blob/8a8d9c8a8d0f3452d5de67cd760297bb5759f637/untwisted/expect.py#L43-L48
train
potash/drain
drain/step.py
_simplify_arguments
def _simplify_arguments(arguments): """ If positional or keyword arguments are empty return only one or the other. """ if len(arguments.args) == 0: return arguments.kwargs elif len(arguments.kwargs) == 0: return arguments.args else: return arguments
python
def _simplify_arguments(arguments): """ If positional or keyword arguments are empty return only one or the other. """ if len(arguments.args) == 0: return arguments.kwargs elif len(arguments.kwargs) == 0: return arguments.args else: return arguments
[ "def", "_simplify_arguments", "(", "arguments", ")", ":", "if", "len", "(", "arguments", ".", "args", ")", "==", "0", ":", "return", "arguments", ".", "kwargs", "elif", "len", "(", "arguments", ".", "kwargs", ")", "==", "0", ":", "return", "arguments", ".", "args", "else", ":", "return", "arguments" ]
If positional or keyword arguments are empty return only one or the other.
[ "If", "positional", "or", "keyword", "arguments", "are", "empty", "return", "only", "one", "or", "the", "other", "." ]
ddd62081cb9317beb5d21f86c8b4bb196ca3d222
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/step.py#L310-L319
train
potash/drain
drain/step.py
Step.load
def load(self): """ Load this step's result from its dump directory """ hdf_filename = os.path.join(self._dump_dirname, 'result.h5') if os.path.isfile(hdf_filename): store = pd.HDFStore(hdf_filename, mode='r') keys = store.keys() if keys == ['/df']: self.result = store['df'] else: if set(keys) == set(map(lambda i: '/%s' % i, range(len(keys)))): # keys are not necessarily ordered self.result = [store[str(k)] for k in range(len(keys))] else: self.result = {k[1:]: store[k] for k in keys} else: self.result = joblib.load( os.path.join(self._output_dirname, 'dump', 'result.pkl'))
python
def load(self): """ Load this step's result from its dump directory """ hdf_filename = os.path.join(self._dump_dirname, 'result.h5') if os.path.isfile(hdf_filename): store = pd.HDFStore(hdf_filename, mode='r') keys = store.keys() if keys == ['/df']: self.result = store['df'] else: if set(keys) == set(map(lambda i: '/%s' % i, range(len(keys)))): # keys are not necessarily ordered self.result = [store[str(k)] for k in range(len(keys))] else: self.result = {k[1:]: store[k] for k in keys} else: self.result = joblib.load( os.path.join(self._output_dirname, 'dump', 'result.pkl'))
[ "def", "load", "(", "self", ")", ":", "hdf_filename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_dump_dirname", ",", "'result.h5'", ")", "if", "os", ".", "path", ".", "isfile", "(", "hdf_filename", ")", ":", "store", "=", "pd", ".", "HDFStore", "(", "hdf_filename", ",", "mode", "=", "'r'", ")", "keys", "=", "store", ".", "keys", "(", ")", "if", "keys", "==", "[", "'/df'", "]", ":", "self", ".", "result", "=", "store", "[", "'df'", "]", "else", ":", "if", "set", "(", "keys", ")", "==", "set", "(", "map", "(", "lambda", "i", ":", "'/%s'", "%", "i", ",", "range", "(", "len", "(", "keys", ")", ")", ")", ")", ":", "# keys are not necessarily ordered", "self", ".", "result", "=", "[", "store", "[", "str", "(", "k", ")", "]", "for", "k", "in", "range", "(", "len", "(", "keys", ")", ")", "]", "else", ":", "self", ".", "result", "=", "{", "k", "[", "1", ":", "]", ":", "store", "[", "k", "]", "for", "k", "in", "keys", "}", "else", ":", "self", ".", "result", "=", "joblib", ".", "load", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_output_dirname", ",", "'dump'", ",", "'result.pkl'", ")", ")" ]
Load this step's result from its dump directory
[ "Load", "this", "step", "s", "result", "from", "its", "dump", "directory" ]
ddd62081cb9317beb5d21f86c8b4bb196ca3d222
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/step.py#L211-L230
train
potash/drain
drain/step.py
Step.setup_dump
def setup_dump(self): """ Set up dump, creating directories and writing step.yaml file containing yaml dump of this step. {drain.PATH}/{self._digest}/ step.yaml dump/ """ dumpdir = self._dump_dirname if not os.path.isdir(dumpdir): os.makedirs(dumpdir) dump = False yaml_filename = self._yaml_filename if not os.path.isfile(yaml_filename): dump = True else: with open(yaml_filename) as f: if f.read() != yaml.dump(self): logging.warning('Existing step.yaml does not match hash, regenerating') dump = True if dump: with open(yaml_filename, 'w') as f: yaml.dump(self, f)
python
def setup_dump(self): """ Set up dump, creating directories and writing step.yaml file containing yaml dump of this step. {drain.PATH}/{self._digest}/ step.yaml dump/ """ dumpdir = self._dump_dirname if not os.path.isdir(dumpdir): os.makedirs(dumpdir) dump = False yaml_filename = self._yaml_filename if not os.path.isfile(yaml_filename): dump = True else: with open(yaml_filename) as f: if f.read() != yaml.dump(self): logging.warning('Existing step.yaml does not match hash, regenerating') dump = True if dump: with open(yaml_filename, 'w') as f: yaml.dump(self, f)
[ "def", "setup_dump", "(", "self", ")", ":", "dumpdir", "=", "self", ".", "_dump_dirname", "if", "not", "os", ".", "path", ".", "isdir", "(", "dumpdir", ")", ":", "os", ".", "makedirs", "(", "dumpdir", ")", "dump", "=", "False", "yaml_filename", "=", "self", ".", "_yaml_filename", "if", "not", "os", ".", "path", ".", "isfile", "(", "yaml_filename", ")", ":", "dump", "=", "True", "else", ":", "with", "open", "(", "yaml_filename", ")", "as", "f", ":", "if", "f", ".", "read", "(", ")", "!=", "yaml", ".", "dump", "(", "self", ")", ":", "logging", ".", "warning", "(", "'Existing step.yaml does not match hash, regenerating'", ")", "dump", "=", "True", "if", "dump", ":", "with", "open", "(", "yaml_filename", ",", "'w'", ")", "as", "f", ":", "yaml", ".", "dump", "(", "self", ",", "f", ")" ]
Set up dump, creating directories and writing step.yaml file containing yaml dump of this step. {drain.PATH}/{self._digest}/ step.yaml dump/
[ "Set", "up", "dump", "creating", "directories", "and", "writing", "step", ".", "yaml", "file", "containing", "yaml", "dump", "of", "this", "step", "." ]
ddd62081cb9317beb5d21f86c8b4bb196ca3d222
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/step.py#L232-L258
train
lsst-sqre/documenteer
documenteer/stackdocs/packagecli.py
main
def main(ctx, root_dir, verbose): """package-docs is a CLI for building single-package previews of documentation in the LSST Stack. Use package-docs during development to quickly preview your documentation and docstrings. .. warning:: Using package-docs to compile standalone documentation for a single package will generate warnings related to missing references. This is normal because the full documentation set is not built in the mode. Before shipping revised documentation for a package, always make sure cross-package references work by doing a full-site build either locally with the stack-docs CLI or the site's Jenkins job. The key commands provided by package-docs are: - ``package-docs build``: compile the package's documentation. - ``package-docs clean``: removes documentation build products from a package. """ root_dir = discover_package_doc_dir(root_dir) # Subcommands should use the click.pass_obj decorator to get this # ctx.obj object as the first argument. ctx.obj = {'root_dir': root_dir, 'verbose': verbose} # Set up application logging. This ensures that only documenteer's # logger is activated. If necessary, we can add other app's loggers too. if verbose: log_level = logging.DEBUG else: log_level = logging.INFO logger = logging.getLogger('documenteer') logger.addHandler(logging.StreamHandler()) logger.setLevel(log_level)
python
def main(ctx, root_dir, verbose): """package-docs is a CLI for building single-package previews of documentation in the LSST Stack. Use package-docs during development to quickly preview your documentation and docstrings. .. warning:: Using package-docs to compile standalone documentation for a single package will generate warnings related to missing references. This is normal because the full documentation set is not built in the mode. Before shipping revised documentation for a package, always make sure cross-package references work by doing a full-site build either locally with the stack-docs CLI or the site's Jenkins job. The key commands provided by package-docs are: - ``package-docs build``: compile the package's documentation. - ``package-docs clean``: removes documentation build products from a package. """ root_dir = discover_package_doc_dir(root_dir) # Subcommands should use the click.pass_obj decorator to get this # ctx.obj object as the first argument. ctx.obj = {'root_dir': root_dir, 'verbose': verbose} # Set up application logging. This ensures that only documenteer's # logger is activated. If necessary, we can add other app's loggers too. if verbose: log_level = logging.DEBUG else: log_level = logging.INFO logger = logging.getLogger('documenteer') logger.addHandler(logging.StreamHandler()) logger.setLevel(log_level)
[ "def", "main", "(", "ctx", ",", "root_dir", ",", "verbose", ")", ":", "root_dir", "=", "discover_package_doc_dir", "(", "root_dir", ")", "# Subcommands should use the click.pass_obj decorator to get this", "# ctx.obj object as the first argument.", "ctx", ".", "obj", "=", "{", "'root_dir'", ":", "root_dir", ",", "'verbose'", ":", "verbose", "}", "# Set up application logging. This ensures that only documenteer's", "# logger is activated. If necessary, we can add other app's loggers too.", "if", "verbose", ":", "log_level", "=", "logging", ".", "DEBUG", "else", ":", "log_level", "=", "logging", ".", "INFO", "logger", "=", "logging", ".", "getLogger", "(", "'documenteer'", ")", "logger", ".", "addHandler", "(", "logging", ".", "StreamHandler", "(", ")", ")", "logger", ".", "setLevel", "(", "log_level", ")" ]
package-docs is a CLI for building single-package previews of documentation in the LSST Stack. Use package-docs during development to quickly preview your documentation and docstrings. .. warning:: Using package-docs to compile standalone documentation for a single package will generate warnings related to missing references. This is normal because the full documentation set is not built in the mode. Before shipping revised documentation for a package, always make sure cross-package references work by doing a full-site build either locally with the stack-docs CLI or the site's Jenkins job. The key commands provided by package-docs are: - ``package-docs build``: compile the package's documentation. - ``package-docs clean``: removes documentation build products from a package.
[ "package", "-", "docs", "is", "a", "CLI", "for", "building", "single", "-", "package", "previews", "of", "documentation", "in", "the", "LSST", "Stack", "." ]
75f02901a80042b28d074df1cc1dca32eb8e38c8
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/stackdocs/packagecli.py#L41-L78
train
potash/drain
drain/aggregate.py
ColumnFunction.apply_and_name
def apply_and_name(self, aggregator): """Fetches the row-aggregated input columns for this ColumnFunction. Args: aggregator (Aggregator) Returns: pd.DataFrame: The dataframe has columns with names self.names that were created by this ColumnFunction, and is indexed by the index that was passed to aggregator.aggregate(index). """ reduced_df = self._apply(aggregator) if len(self.names) != len(reduced_df.columns): raise IndexError("ColumnFunction creates more columns than it has names for.") reduced_df.columns = self.names return reduced_df
python
def apply_and_name(self, aggregator): """Fetches the row-aggregated input columns for this ColumnFunction. Args: aggregator (Aggregator) Returns: pd.DataFrame: The dataframe has columns with names self.names that were created by this ColumnFunction, and is indexed by the index that was passed to aggregator.aggregate(index). """ reduced_df = self._apply(aggregator) if len(self.names) != len(reduced_df.columns): raise IndexError("ColumnFunction creates more columns than it has names for.") reduced_df.columns = self.names return reduced_df
[ "def", "apply_and_name", "(", "self", ",", "aggregator", ")", ":", "reduced_df", "=", "self", ".", "_apply", "(", "aggregator", ")", "if", "len", "(", "self", ".", "names", ")", "!=", "len", "(", "reduced_df", ".", "columns", ")", ":", "raise", "IndexError", "(", "\"ColumnFunction creates more columns than it has names for.\"", ")", "reduced_df", ".", "columns", "=", "self", ".", "names", "return", "reduced_df" ]
Fetches the row-aggregated input columns for this ColumnFunction. Args: aggregator (Aggregator) Returns: pd.DataFrame: The dataframe has columns with names self.names that were created by this ColumnFunction, and is indexed by the index that was passed to aggregator.aggregate(index).
[ "Fetches", "the", "row", "-", "aggregated", "input", "columns", "for", "this", "ColumnFunction", "." ]
ddd62081cb9317beb5d21f86c8b4bb196ca3d222
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/aggregate.py#L106-L122
train
potash/drain
drain/aggregate.py
Aggregator.aggregate
def aggregate(self, index): """Performs a groupby of the unique Columns by index, as constructed from self.df. Args: index (str, or pd.Index): Index or column name of self.df. Returns: pd.DataFrame: A dataframe, aggregated by index, that contains the result of the various ColumnFunctions, and named accordingly. """ # deal with index as a string vs index as a index/MultiIndex if isinstance(index, string_types): col_df_grouped = self.col_df.groupby(self.df[index]) else: self.col_df.index = pd.MultiIndex.from_arrays([self.df[i] for i in index]) col_df_grouped = self.col_df.groupby(level=index) self.col_df.index = self.df.index # perform the actual aggregation self.reduced_df = pd.DataFrame({ colred: col_df_grouped[colred.column].agg(colred.agg_func) for colred in self.column_reductions }) # then apply the functions to produce the final dataframe reduced_dfs = [] for cf in self.column_functions: # each apply_and_name() calls get_reduced() with the column reductions it wants reduced_dfs.append(cf.apply_and_name(self)) return pd.concat(reduced_dfs, axis=1)
python
def aggregate(self, index): """Performs a groupby of the unique Columns by index, as constructed from self.df. Args: index (str, or pd.Index): Index or column name of self.df. Returns: pd.DataFrame: A dataframe, aggregated by index, that contains the result of the various ColumnFunctions, and named accordingly. """ # deal with index as a string vs index as a index/MultiIndex if isinstance(index, string_types): col_df_grouped = self.col_df.groupby(self.df[index]) else: self.col_df.index = pd.MultiIndex.from_arrays([self.df[i] for i in index]) col_df_grouped = self.col_df.groupby(level=index) self.col_df.index = self.df.index # perform the actual aggregation self.reduced_df = pd.DataFrame({ colred: col_df_grouped[colred.column].agg(colred.agg_func) for colred in self.column_reductions }) # then apply the functions to produce the final dataframe reduced_dfs = [] for cf in self.column_functions: # each apply_and_name() calls get_reduced() with the column reductions it wants reduced_dfs.append(cf.apply_and_name(self)) return pd.concat(reduced_dfs, axis=1)
[ "def", "aggregate", "(", "self", ",", "index", ")", ":", "# deal with index as a string vs index as a index/MultiIndex", "if", "isinstance", "(", "index", ",", "string_types", ")", ":", "col_df_grouped", "=", "self", ".", "col_df", ".", "groupby", "(", "self", ".", "df", "[", "index", "]", ")", "else", ":", "self", ".", "col_df", ".", "index", "=", "pd", ".", "MultiIndex", ".", "from_arrays", "(", "[", "self", ".", "df", "[", "i", "]", "for", "i", "in", "index", "]", ")", "col_df_grouped", "=", "self", ".", "col_df", ".", "groupby", "(", "level", "=", "index", ")", "self", ".", "col_df", ".", "index", "=", "self", ".", "df", ".", "index", "# perform the actual aggregation", "self", ".", "reduced_df", "=", "pd", ".", "DataFrame", "(", "{", "colred", ":", "col_df_grouped", "[", "colred", ".", "column", "]", ".", "agg", "(", "colred", ".", "agg_func", ")", "for", "colred", "in", "self", ".", "column_reductions", "}", ")", "# then apply the functions to produce the final dataframe", "reduced_dfs", "=", "[", "]", "for", "cf", "in", "self", ".", "column_functions", ":", "# each apply_and_name() calls get_reduced() with the column reductions it wants", "reduced_dfs", ".", "append", "(", "cf", ".", "apply_and_name", "(", "self", ")", ")", "return", "pd", ".", "concat", "(", "reduced_dfs", ",", "axis", "=", "1", ")" ]
Performs a groupby of the unique Columns by index, as constructed from self.df. Args: index (str, or pd.Index): Index or column name of self.df. Returns: pd.DataFrame: A dataframe, aggregated by index, that contains the result of the various ColumnFunctions, and named accordingly.
[ "Performs", "a", "groupby", "of", "the", "unique", "Columns", "by", "index", "as", "constructed", "from", "self", ".", "df", "." ]
ddd62081cb9317beb5d21f86c8b4bb196ca3d222
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/aggregate.py#L260-L291
train
potash/drain
drain/aggregate.py
Fraction._apply
def _apply(self, aggregator): """Returns a dataframe with the requested ColumnReductions. """ reduced_dfs = [] if self.include_fraction: n_df = self.numerator.apply_and_name(aggregator) d_df = self.denominator.apply_and_name(aggregator) reduced_dfs.extend([n_df[cn]/d_df[cd] for cn, cd in product(n_df.columns, d_df.columns)]) if self.include_numerator: reduced_dfs.append(self.numerator.apply_and_name(aggregator)) if self.include_denominator: reduced_dfs.append(self.denominator.apply_and_name(aggregator)) return pd.concat(reduced_dfs, axis=1)
python
def _apply(self, aggregator): """Returns a dataframe with the requested ColumnReductions. """ reduced_dfs = [] if self.include_fraction: n_df = self.numerator.apply_and_name(aggregator) d_df = self.denominator.apply_and_name(aggregator) reduced_dfs.extend([n_df[cn]/d_df[cd] for cn, cd in product(n_df.columns, d_df.columns)]) if self.include_numerator: reduced_dfs.append(self.numerator.apply_and_name(aggregator)) if self.include_denominator: reduced_dfs.append(self.denominator.apply_and_name(aggregator)) return pd.concat(reduced_dfs, axis=1)
[ "def", "_apply", "(", "self", ",", "aggregator", ")", ":", "reduced_dfs", "=", "[", "]", "if", "self", ".", "include_fraction", ":", "n_df", "=", "self", ".", "numerator", ".", "apply_and_name", "(", "aggregator", ")", "d_df", "=", "self", ".", "denominator", ".", "apply_and_name", "(", "aggregator", ")", "reduced_dfs", ".", "extend", "(", "[", "n_df", "[", "cn", "]", "/", "d_df", "[", "cd", "]", "for", "cn", ",", "cd", "in", "product", "(", "n_df", ".", "columns", ",", "d_df", ".", "columns", ")", "]", ")", "if", "self", ".", "include_numerator", ":", "reduced_dfs", ".", "append", "(", "self", ".", "numerator", ".", "apply_and_name", "(", "aggregator", ")", ")", "if", "self", ".", "include_denominator", ":", "reduced_dfs", ".", "append", "(", "self", ".", "denominator", ".", "apply_and_name", "(", "aggregator", ")", ")", "return", "pd", ".", "concat", "(", "reduced_dfs", ",", "axis", "=", "1", ")" ]
Returns a dataframe with the requested ColumnReductions.
[ "Returns", "a", "dataframe", "with", "the", "requested", "ColumnReductions", "." ]
ddd62081cb9317beb5d21f86c8b4bb196ca3d222
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/aggregate.py#L350-L367
train
fkarb/xltable
xltable/table.py
Table.clone
def clone(self, **kwargs): """Create a clone of the Table, optionally with some properties changed""" init_kwargs = { "name": self.__name, "dataframe": self.__df, "include_columns": self.__include_columns, "include_index": self.__include_index, "style": self.__style, "column_styles": self.__col_styles, "column_widths": self.__column_widths, "row_styles": self.__row_styles, "header_style": self.header_style, "index_style": self.index_style } init_kwargs.update(kwargs) return self.__class__(**init_kwargs)
python
def clone(self, **kwargs): """Create a clone of the Table, optionally with some properties changed""" init_kwargs = { "name": self.__name, "dataframe": self.__df, "include_columns": self.__include_columns, "include_index": self.__include_index, "style": self.__style, "column_styles": self.__col_styles, "column_widths": self.__column_widths, "row_styles": self.__row_styles, "header_style": self.header_style, "index_style": self.index_style } init_kwargs.update(kwargs) return self.__class__(**init_kwargs)
[ "def", "clone", "(", "self", ",", "*", "*", "kwargs", ")", ":", "init_kwargs", "=", "{", "\"name\"", ":", "self", ".", "__name", ",", "\"dataframe\"", ":", "self", ".", "__df", ",", "\"include_columns\"", ":", "self", ".", "__include_columns", ",", "\"include_index\"", ":", "self", ".", "__include_index", ",", "\"style\"", ":", "self", ".", "__style", ",", "\"column_styles\"", ":", "self", ".", "__col_styles", ",", "\"column_widths\"", ":", "self", ".", "__column_widths", ",", "\"row_styles\"", ":", "self", ".", "__row_styles", ",", "\"header_style\"", ":", "self", ".", "header_style", ",", "\"index_style\"", ":", "self", ".", "index_style", "}", "init_kwargs", ".", "update", "(", "kwargs", ")", "return", "self", ".", "__class__", "(", "*", "*", "init_kwargs", ")" ]
Create a clone of the Table, optionally with some properties changed
[ "Create", "a", "clone", "of", "the", "Table", "optionally", "with", "some", "properties", "changed" ]
7a592642d27ad5ee90d2aa8c26338abaa9d84bea
https://github.com/fkarb/xltable/blob/7a592642d27ad5ee90d2aa8c26338abaa9d84bea/xltable/table.py#L105-L120
train
mojaie/chorus
chorus/v2000reader.py
inspect
def inspect(lines): """Inspect SDFile list of string Returns: tuple: (data label list, number of records) """ labels = set() count = 0 exp = re.compile(r">.*?<([\w ]+)>") # Space should be accepted valid = False for line in lines: if line.startswith("M END\n"): valid = True elif line.startswith("$$$$"): count += 1 valid = False else: result = exp.match(line) if result: labels.add(result.group(1)) if valid: count += 1 return list(labels), count
python
def inspect(lines): """Inspect SDFile list of string Returns: tuple: (data label list, number of records) """ labels = set() count = 0 exp = re.compile(r">.*?<([\w ]+)>") # Space should be accepted valid = False for line in lines: if line.startswith("M END\n"): valid = True elif line.startswith("$$$$"): count += 1 valid = False else: result = exp.match(line) if result: labels.add(result.group(1)) if valid: count += 1 return list(labels), count
[ "def", "inspect", "(", "lines", ")", ":", "labels", "=", "set", "(", ")", "count", "=", "0", "exp", "=", "re", ".", "compile", "(", "r\">.*?<([\\w ]+)>\"", ")", "# Space should be accepted", "valid", "=", "False", "for", "line", "in", "lines", ":", "if", "line", ".", "startswith", "(", "\"M END\\n\"", ")", ":", "valid", "=", "True", "elif", "line", ".", "startswith", "(", "\"$$$$\"", ")", ":", "count", "+=", "1", "valid", "=", "False", "else", ":", "result", "=", "exp", ".", "match", "(", "line", ")", "if", "result", ":", "labels", ".", "add", "(", "result", ".", "group", "(", "1", ")", ")", "if", "valid", ":", "count", "+=", "1", "return", "list", "(", "labels", ")", ",", "count" ]
Inspect SDFile list of string Returns: tuple: (data label list, number of records)
[ "Inspect", "SDFile", "list", "of", "string" ]
fc7fe23a0272554c67671645ab07830b315eeb1b
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/v2000reader.py#L17-L39
train
mojaie/chorus
chorus/v2000reader.py
inspect_file
def inspect_file(path): """Inspect SDFile structure Returns: tuple: (data label list, number of records) """ with open(path, 'rb') as f: labels, count = inspect(tx.decode(line) for line in f) return labels, count
python
def inspect_file(path): """Inspect SDFile structure Returns: tuple: (data label list, number of records) """ with open(path, 'rb') as f: labels, count = inspect(tx.decode(line) for line in f) return labels, count
[ "def", "inspect_file", "(", "path", ")", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "f", ":", "labels", ",", "count", "=", "inspect", "(", "tx", ".", "decode", "(", "line", ")", "for", "line", "in", "f", ")", "return", "labels", ",", "count" ]
Inspect SDFile structure Returns: tuple: (data label list, number of records)
[ "Inspect", "SDFile", "structure" ]
fc7fe23a0272554c67671645ab07830b315eeb1b
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/v2000reader.py#L49-L57
train
mojaie/chorus
chorus/v2000reader.py
optional_data
def optional_data(lines): """Parse SDFile data part into dict""" data = {} exp = re.compile(r">.*?<([\w ]+)>") # Space should be accepted for i, line in enumerate(lines): result = exp.match(line) if result: data[result.group(1)] = lines[i + 1] return data
python
def optional_data(lines): """Parse SDFile data part into dict""" data = {} exp = re.compile(r">.*?<([\w ]+)>") # Space should be accepted for i, line in enumerate(lines): result = exp.match(line) if result: data[result.group(1)] = lines[i + 1] return data
[ "def", "optional_data", "(", "lines", ")", ":", "data", "=", "{", "}", "exp", "=", "re", ".", "compile", "(", "r\">.*?<([\\w ]+)>\"", ")", "# Space should be accepted", "for", "i", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "result", "=", "exp", ".", "match", "(", "line", ")", "if", "result", ":", "data", "[", "result", ".", "group", "(", "1", ")", "]", "=", "lines", "[", "i", "+", "1", "]", "return", "data" ]
Parse SDFile data part into dict
[ "Parse", "SDFile", "data", "part", "into", "dict" ]
fc7fe23a0272554c67671645ab07830b315eeb1b
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/v2000reader.py#L60-L68
train