Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
1,300
hectane/python-hectane
pyhectane/connection.py
Connection.send
def send(self, from_, to, subject, text='', html='', cc=[], bcc=[], headers={}, attachments=[]): """ Send an email. """ if isinstance(to, string_types): raise TypeError('"to" parameter must be enumerable') if text == '' and html == '': raise ValueError('"text" and "html" must not both be empty') return self._session.post('{}/send'.format(self._url), json={ 'from': from_, 'to': to, 'cc': cc, 'bcc': bcc, 'subject': subject, 'headers': headers, 'text': text, 'html': html, 'attachments': list(self._process_attachments(attachments)), }).json()
python
def send(self, from_, to, subject, text='', html='', cc=[], bcc=[], headers={}, attachments=[]): """ Send an email. """ if isinstance(to, string_types): raise TypeError('"to" parameter must be enumerable') if text == '' and html == '': raise ValueError('"text" and "html" must not both be empty') return self._session.post('{}/send'.format(self._url), json={ 'from': from_, 'to': to, 'cc': cc, 'bcc': bcc, 'subject': subject, 'headers': headers, 'text': text, 'html': html, 'attachments': list(self._process_attachments(attachments)), }).json()
['def', 'send', '(', 'self', ',', 'from_', ',', 'to', ',', 'subject', ',', 'text', '=', "''", ',', 'html', '=', "''", ',', 'cc', '=', '[', ']', ',', 'bcc', '=', '[', ']', ',', 'headers', '=', '{', '}', ',', 'attachments', '=', '[', ']', ')', ':', 'if', 'isinstance', '(', 'to', ',', 'string_types', ')', ':', 'raise', 'TypeError', '(', '\'"to" parameter must be enumerable\'', ')', 'if', 'text', '==', "''", 'and', 'html', '==', "''", ':', 'raise', 'ValueError', '(', '\'"text" and "html" must not both be empty\'', ')', 'return', 'self', '.', '_session', '.', 'post', '(', "'{}/send'", '.', 'format', '(', 'self', '.', '_url', ')', ',', 'json', '=', '{', "'from'", ':', 'from_', ',', "'to'", ':', 'to', ',', "'cc'", ':', 'cc', ',', "'bcc'", ':', 'bcc', ',', "'subject'", ':', 'subject', ',', "'headers'", ':', 'headers', ',', "'text'", ':', 'text', ',', "'html'", ':', 'html', ',', "'attachments'", ':', 'list', '(', 'self', '.', '_process_attachments', '(', 'attachments', ')', ')', ',', '}', ')', '.', 'json', '(', ')']
Send an email.
['Send', 'an', 'email', '.']
train
https://github.com/hectane/python-hectane/blob/e0fe1df576f776566e813f71782f8adf60146383/pyhectane/connection.py#L64-L83
1,301
raphaelvallat/pingouin
pingouin/plotting.py
plot_paired
def plot_paired(data=None, dv=None, within=None, subject=None, order=None, boxplot=True, figsize=(4, 4), dpi=100, ax=None, colors=['green', 'grey', 'indianred'], pointplot_kwargs={'scale': .6, 'markers': '.'}, boxplot_kwargs={'color': 'lightslategrey', 'width': .2}): """ Paired plot. Parameters ---------- data : pandas DataFrame Long-format dataFrame. dv : string Name of column containing the dependant variable. within : string Name of column containing the within-subject factor. Note that ``within`` must have exactly two within-subject levels (= two unique values). subject : string Name of column containing the subject identifier. order : list of str List of values in ``within`` that define the order of elements on the x-axis of the plot. If None, uses alphabetical order. boxplot : boolean If True, add a boxplot to the paired lines using the :py:func:`seaborn.boxplot` function. figsize : tuple Figsize in inches dpi : int Resolution of the figure in dots per inches. ax : matplotlib axes Axis on which to draw the plot. colors : list of str Line colors names. Default is green when value increases from A to B, indianred when value decreases from A to B and grey when the value is the same in both measurements. pointplot_kwargs : dict Dictionnary of optional arguments that are passed to the :py:func:`seaborn.pointplot` function. boxplot_kwargs : dict Dictionnary of optional arguments that are passed to the :py:func:`seaborn.boxplot` function. Returns ------- ax : Matplotlib Axes instance Returns the Axes object with the plot for further tweaking. Notes ----- Data must be a long-format pandas DataFrame. Examples -------- Default paired plot: .. plot:: >>> from pingouin import read_dataset >>> df = read_dataset('mixed_anova') >>> df = df.query("Group == 'Meditation' and Subject > 40") >>> df = df.query("Time == 'August' or Time == 'June'") >>> import pingouin as pg >>> ax = pg.plot_paired(data=df, dv='Scores', within='Time', ... subject='Subject', dpi=150) Paired plot on an existing axis (no boxplot and uniform color): .. plot:: >>> from pingouin import read_dataset >>> df = read_dataset('mixed_anova').query("Time != 'January'") >>> import pingouin as pg >>> import matplotlib.pyplot as plt >>> fig, ax1 = plt.subplots(1, 1, figsize=(5, 4)) >>> pg.plot_paired(data=df[df['Group'] == 'Meditation'], ... dv='Scores', within='Time', subject='Subject', ... ax=ax1, boxplot=False, ... colors=['grey', 'grey', 'grey']) # doctest: +SKIP """ from pingouin.utils import _check_dataframe, remove_rm_na # Validate args _check_dataframe(data=data, dv=dv, within=within, subject=subject, effects='within') # Remove NaN values data = remove_rm_na(dv=dv, within=within, subject=subject, data=data) # Extract subjects subj = data[subject].unique() # Extract within-subject level (alphabetical order) x_cat = np.unique(data[within]) assert len(x_cat) == 2, 'Within must have exactly two unique levels.' if order is None: order = x_cat else: assert len(order) == 2, 'Order must have exactly two elements.' # Start the plot if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) for idx, s in enumerate(subj): tmp = data.loc[data[subject] == s, [dv, within, subject]] x_val = tmp[tmp[within] == order[0]][dv].values[0] y_val = tmp[tmp[within] == order[1]][dv].values[0] if x_val < y_val: color = colors[0] elif x_val > y_val: color = colors[2] elif x_val == y_val: color = colors[1] # Plot individual lines using Seaborn sns.pointplot(data=tmp, x=within, y=dv, order=order, color=color, ax=ax, **pointplot_kwargs) if boxplot: sns.boxplot(data=data, x=within, y=dv, order=order, ax=ax, **boxplot_kwargs) # Despine and trim sns.despine(trim=True, ax=ax) return ax
python
def plot_paired(data=None, dv=None, within=None, subject=None, order=None, boxplot=True, figsize=(4, 4), dpi=100, ax=None, colors=['green', 'grey', 'indianred'], pointplot_kwargs={'scale': .6, 'markers': '.'}, boxplot_kwargs={'color': 'lightslategrey', 'width': .2}): """ Paired plot. Parameters ---------- data : pandas DataFrame Long-format dataFrame. dv : string Name of column containing the dependant variable. within : string Name of column containing the within-subject factor. Note that ``within`` must have exactly two within-subject levels (= two unique values). subject : string Name of column containing the subject identifier. order : list of str List of values in ``within`` that define the order of elements on the x-axis of the plot. If None, uses alphabetical order. boxplot : boolean If True, add a boxplot to the paired lines using the :py:func:`seaborn.boxplot` function. figsize : tuple Figsize in inches dpi : int Resolution of the figure in dots per inches. ax : matplotlib axes Axis on which to draw the plot. colors : list of str Line colors names. Default is green when value increases from A to B, indianred when value decreases from A to B and grey when the value is the same in both measurements. pointplot_kwargs : dict Dictionnary of optional arguments that are passed to the :py:func:`seaborn.pointplot` function. boxplot_kwargs : dict Dictionnary of optional arguments that are passed to the :py:func:`seaborn.boxplot` function. Returns ------- ax : Matplotlib Axes instance Returns the Axes object with the plot for further tweaking. Notes ----- Data must be a long-format pandas DataFrame. Examples -------- Default paired plot: .. plot:: >>> from pingouin import read_dataset >>> df = read_dataset('mixed_anova') >>> df = df.query("Group == 'Meditation' and Subject > 40") >>> df = df.query("Time == 'August' or Time == 'June'") >>> import pingouin as pg >>> ax = pg.plot_paired(data=df, dv='Scores', within='Time', ... subject='Subject', dpi=150) Paired plot on an existing axis (no boxplot and uniform color): .. plot:: >>> from pingouin import read_dataset >>> df = read_dataset('mixed_anova').query("Time != 'January'") >>> import pingouin as pg >>> import matplotlib.pyplot as plt >>> fig, ax1 = plt.subplots(1, 1, figsize=(5, 4)) >>> pg.plot_paired(data=df[df['Group'] == 'Meditation'], ... dv='Scores', within='Time', subject='Subject', ... ax=ax1, boxplot=False, ... colors=['grey', 'grey', 'grey']) # doctest: +SKIP """ from pingouin.utils import _check_dataframe, remove_rm_na # Validate args _check_dataframe(data=data, dv=dv, within=within, subject=subject, effects='within') # Remove NaN values data = remove_rm_na(dv=dv, within=within, subject=subject, data=data) # Extract subjects subj = data[subject].unique() # Extract within-subject level (alphabetical order) x_cat = np.unique(data[within]) assert len(x_cat) == 2, 'Within must have exactly two unique levels.' if order is None: order = x_cat else: assert len(order) == 2, 'Order must have exactly two elements.' # Start the plot if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi) for idx, s in enumerate(subj): tmp = data.loc[data[subject] == s, [dv, within, subject]] x_val = tmp[tmp[within] == order[0]][dv].values[0] y_val = tmp[tmp[within] == order[1]][dv].values[0] if x_val < y_val: color = colors[0] elif x_val > y_val: color = colors[2] elif x_val == y_val: color = colors[1] # Plot individual lines using Seaborn sns.pointplot(data=tmp, x=within, y=dv, order=order, color=color, ax=ax, **pointplot_kwargs) if boxplot: sns.boxplot(data=data, x=within, y=dv, order=order, ax=ax, **boxplot_kwargs) # Despine and trim sns.despine(trim=True, ax=ax) return ax
['def', 'plot_paired', '(', 'data', '=', 'None', ',', 'dv', '=', 'None', ',', 'within', '=', 'None', ',', 'subject', '=', 'None', ',', 'order', '=', 'None', ',', 'boxplot', '=', 'True', ',', 'figsize', '=', '(', '4', ',', '4', ')', ',', 'dpi', '=', '100', ',', 'ax', '=', 'None', ',', 'colors', '=', '[', "'green'", ',', "'grey'", ',', "'indianred'", ']', ',', 'pointplot_kwargs', '=', '{', "'scale'", ':', '.6', ',', "'markers'", ':', "'.'", '}', ',', 'boxplot_kwargs', '=', '{', "'color'", ':', "'lightslategrey'", ',', "'width'", ':', '.2', '}', ')', ':', 'from', 'pingouin', '.', 'utils', 'import', '_check_dataframe', ',', 'remove_rm_na', '# Validate args', '_check_dataframe', '(', 'data', '=', 'data', ',', 'dv', '=', 'dv', ',', 'within', '=', 'within', ',', 'subject', '=', 'subject', ',', 'effects', '=', "'within'", ')', '# Remove NaN values', 'data', '=', 'remove_rm_na', '(', 'dv', '=', 'dv', ',', 'within', '=', 'within', ',', 'subject', '=', 'subject', ',', 'data', '=', 'data', ')', '# Extract subjects', 'subj', '=', 'data', '[', 'subject', ']', '.', 'unique', '(', ')', '# Extract within-subject level (alphabetical order)', 'x_cat', '=', 'np', '.', 'unique', '(', 'data', '[', 'within', ']', ')', 'assert', 'len', '(', 'x_cat', ')', '==', '2', ',', "'Within must have exactly two unique levels.'", 'if', 'order', 'is', 'None', ':', 'order', '=', 'x_cat', 'else', ':', 'assert', 'len', '(', 'order', ')', '==', '2', ',', "'Order must have exactly two elements.'", '# Start the plot', 'if', 'ax', 'is', 'None', ':', 'fig', ',', 'ax', '=', 'plt', '.', 'subplots', '(', '1', ',', '1', ',', 'figsize', '=', 'figsize', ',', 'dpi', '=', 'dpi', ')', 'for', 'idx', ',', 's', 'in', 'enumerate', '(', 'subj', ')', ':', 'tmp', '=', 'data', '.', 'loc', '[', 'data', '[', 'subject', ']', '==', 's', ',', '[', 'dv', ',', 'within', ',', 'subject', ']', ']', 'x_val', '=', 'tmp', '[', 'tmp', '[', 'within', ']', '==', 'order', '[', '0', ']', ']', '[', 'dv', ']', '.', 'values', '[', '0', ']', 'y_val', '=', 'tmp', '[', 'tmp', '[', 'within', ']', '==', 'order', '[', '1', ']', ']', '[', 'dv', ']', '.', 'values', '[', '0', ']', 'if', 'x_val', '<', 'y_val', ':', 'color', '=', 'colors', '[', '0', ']', 'elif', 'x_val', '>', 'y_val', ':', 'color', '=', 'colors', '[', '2', ']', 'elif', 'x_val', '==', 'y_val', ':', 'color', '=', 'colors', '[', '1', ']', '# Plot individual lines using Seaborn', 'sns', '.', 'pointplot', '(', 'data', '=', 'tmp', ',', 'x', '=', 'within', ',', 'y', '=', 'dv', ',', 'order', '=', 'order', ',', 'color', '=', 'color', ',', 'ax', '=', 'ax', ',', '*', '*', 'pointplot_kwargs', ')', 'if', 'boxplot', ':', 'sns', '.', 'boxplot', '(', 'data', '=', 'data', ',', 'x', '=', 'within', ',', 'y', '=', 'dv', ',', 'order', '=', 'order', ',', 'ax', '=', 'ax', ',', '*', '*', 'boxplot_kwargs', ')', '# Despine and trim', 'sns', '.', 'despine', '(', 'trim', '=', 'True', ',', 'ax', '=', 'ax', ')', 'return', 'ax']
Paired plot. Parameters ---------- data : pandas DataFrame Long-format dataFrame. dv : string Name of column containing the dependant variable. within : string Name of column containing the within-subject factor. Note that ``within`` must have exactly two within-subject levels (= two unique values). subject : string Name of column containing the subject identifier. order : list of str List of values in ``within`` that define the order of elements on the x-axis of the plot. If None, uses alphabetical order. boxplot : boolean If True, add a boxplot to the paired lines using the :py:func:`seaborn.boxplot` function. figsize : tuple Figsize in inches dpi : int Resolution of the figure in dots per inches. ax : matplotlib axes Axis on which to draw the plot. colors : list of str Line colors names. Default is green when value increases from A to B, indianred when value decreases from A to B and grey when the value is the same in both measurements. pointplot_kwargs : dict Dictionnary of optional arguments that are passed to the :py:func:`seaborn.pointplot` function. boxplot_kwargs : dict Dictionnary of optional arguments that are passed to the :py:func:`seaborn.boxplot` function. Returns ------- ax : Matplotlib Axes instance Returns the Axes object with the plot for further tweaking. Notes ----- Data must be a long-format pandas DataFrame. Examples -------- Default paired plot: .. plot:: >>> from pingouin import read_dataset >>> df = read_dataset('mixed_anova') >>> df = df.query("Group == 'Meditation' and Subject > 40") >>> df = df.query("Time == 'August' or Time == 'June'") >>> import pingouin as pg >>> ax = pg.plot_paired(data=df, dv='Scores', within='Time', ... subject='Subject', dpi=150) Paired plot on an existing axis (no boxplot and uniform color): .. plot:: >>> from pingouin import read_dataset >>> df = read_dataset('mixed_anova').query("Time != 'January'") >>> import pingouin as pg >>> import matplotlib.pyplot as plt >>> fig, ax1 = plt.subplots(1, 1, figsize=(5, 4)) >>> pg.plot_paired(data=df[df['Group'] == 'Meditation'], ... dv='Scores', within='Time', subject='Subject', ... ax=ax1, boxplot=False, ... colors=['grey', 'grey', 'grey']) # doctest: +SKIP
['Paired', 'plot', '.']
train
https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/plotting.py#L489-L617
1,302
buguroo/pyknow
pyknow/matchers/rete/abstract.py
TwoInputNode.activate_right
def activate_right(self, token): """Make a copy of the received token and call `_activate_right`.""" watchers.MATCHER.debug( "Node <%s> activated right with token %r", self, token) return self._activate_right(token.copy())
python
def activate_right(self, token): """Make a copy of the received token and call `_activate_right`.""" watchers.MATCHER.debug( "Node <%s> activated right with token %r", self, token) return self._activate_right(token.copy())
['def', 'activate_right', '(', 'self', ',', 'token', ')', ':', 'watchers', '.', 'MATCHER', '.', 'debug', '(', '"Node <%s> activated right with token %r"', ',', 'self', ',', 'token', ')', 'return', 'self', '.', '_activate_right', '(', 'token', '.', 'copy', '(', ')', ')']
Make a copy of the received token and call `_activate_right`.
['Make', 'a', 'copy', 'of', 'the', 'received', 'token', 'and', 'call', '_activate_right', '.']
train
https://github.com/buguroo/pyknow/blob/48818336f2e9a126f1964f2d8dc22d37ff800fe8/pyknow/matchers/rete/abstract.py#L68-L72
1,303
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
dskstl
def dskstl(keywrd, dpval): """ Set the value of a specified DSK tolerance or margin parameter. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskstl_c.html :param keywrd: Code specifying parameter to set. :type keywrd: int :param dpval: Value of parameter. :type dpval: float :return: """ keywrd = ctypes.c_int(keywrd) dpval = ctypes.c_double(dpval) libspice.dskstl_c(keywrd, dpval)
python
def dskstl(keywrd, dpval): """ Set the value of a specified DSK tolerance or margin parameter. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskstl_c.html :param keywrd: Code specifying parameter to set. :type keywrd: int :param dpval: Value of parameter. :type dpval: float :return: """ keywrd = ctypes.c_int(keywrd) dpval = ctypes.c_double(dpval) libspice.dskstl_c(keywrd, dpval)
['def', 'dskstl', '(', 'keywrd', ',', 'dpval', ')', ':', 'keywrd', '=', 'ctypes', '.', 'c_int', '(', 'keywrd', ')', 'dpval', '=', 'ctypes', '.', 'c_double', '(', 'dpval', ')', 'libspice', '.', 'dskstl_c', '(', 'keywrd', ',', 'dpval', ')']
Set the value of a specified DSK tolerance or margin parameter. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskstl_c.html :param keywrd: Code specifying parameter to set. :type keywrd: int :param dpval: Value of parameter. :type dpval: float :return:
['Set', 'the', 'value', 'of', 'a', 'specified', 'DSK', 'tolerance', 'or', 'margin', 'parameter', '.', 'https', ':', '//', 'naif', '.', 'jpl', '.', 'nasa', '.', 'gov', '/', 'pub', '/', 'naif', '/', 'toolkit_docs', '/', 'C', '/', 'cspice', '/', 'dskstl_c', '.', 'html', ':', 'param', 'keywrd', ':', 'Code', 'specifying', 'parameter', 'to', 'set', '.', ':', 'type', 'keywrd', ':', 'int', ':', 'param', 'dpval', ':', 'Value', 'of', 'parameter', '.', ':', 'type', 'dpval', ':', 'float', ':', 'return', ':']
train
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L3164-L3178
1,304
ssato/python-anyconfig
src/anyconfig/backend/xml.py
container_to_etree
def container_to_etree(obj, parent=None, to_str=None, **options): """ Convert a dict-like object to XML ElementTree. :param obj: Container instance to convert to :param parent: XML ElementTree parent node object or None :param to_str: Callable to convert value to string or None :param options: Keyword options, - tags: Dict of tags for special nodes to keep XML info, attributes, text and children nodes, e.g. {"attrs": "@attrs", "text": "#text"} """ if to_str is None: to_str = _to_str_fn(**options) if not anyconfig.utils.is_dict_like(obj): if parent is not None and obj: parent.text = to_str(obj) # Parent is a leaf text node. return parent # All attributes and text should be set already. options = _complement_tag_options(options) (attrs, text, children) = operator.itemgetter(*_ATC)(options) for key, val in anyconfig.compat.iteritems(obj): if key == attrs: _elem_set_attrs(val, parent, to_str) elif key == text: parent.text = to_str(val) elif key == children: for celem in _elem_from_descendants(val, **options): parent.append(celem) else: parent = _get_or_update_parent(key, val, to_str, parent=parent, **options) return ET.ElementTree(parent)
python
def container_to_etree(obj, parent=None, to_str=None, **options): """ Convert a dict-like object to XML ElementTree. :param obj: Container instance to convert to :param parent: XML ElementTree parent node object or None :param to_str: Callable to convert value to string or None :param options: Keyword options, - tags: Dict of tags for special nodes to keep XML info, attributes, text and children nodes, e.g. {"attrs": "@attrs", "text": "#text"} """ if to_str is None: to_str = _to_str_fn(**options) if not anyconfig.utils.is_dict_like(obj): if parent is not None and obj: parent.text = to_str(obj) # Parent is a leaf text node. return parent # All attributes and text should be set already. options = _complement_tag_options(options) (attrs, text, children) = operator.itemgetter(*_ATC)(options) for key, val in anyconfig.compat.iteritems(obj): if key == attrs: _elem_set_attrs(val, parent, to_str) elif key == text: parent.text = to_str(val) elif key == children: for celem in _elem_from_descendants(val, **options): parent.append(celem) else: parent = _get_or_update_parent(key, val, to_str, parent=parent, **options) return ET.ElementTree(parent)
['def', 'container_to_etree', '(', 'obj', ',', 'parent', '=', 'None', ',', 'to_str', '=', 'None', ',', '*', '*', 'options', ')', ':', 'if', 'to_str', 'is', 'None', ':', 'to_str', '=', '_to_str_fn', '(', '*', '*', 'options', ')', 'if', 'not', 'anyconfig', '.', 'utils', '.', 'is_dict_like', '(', 'obj', ')', ':', 'if', 'parent', 'is', 'not', 'None', 'and', 'obj', ':', 'parent', '.', 'text', '=', 'to_str', '(', 'obj', ')', '# Parent is a leaf text node.', 'return', 'parent', '# All attributes and text should be set already.', 'options', '=', '_complement_tag_options', '(', 'options', ')', '(', 'attrs', ',', 'text', ',', 'children', ')', '=', 'operator', '.', 'itemgetter', '(', '*', '_ATC', ')', '(', 'options', ')', 'for', 'key', ',', 'val', 'in', 'anyconfig', '.', 'compat', '.', 'iteritems', '(', 'obj', ')', ':', 'if', 'key', '==', 'attrs', ':', '_elem_set_attrs', '(', 'val', ',', 'parent', ',', 'to_str', ')', 'elif', 'key', '==', 'text', ':', 'parent', '.', 'text', '=', 'to_str', '(', 'val', ')', 'elif', 'key', '==', 'children', ':', 'for', 'celem', 'in', '_elem_from_descendants', '(', 'val', ',', '*', '*', 'options', ')', ':', 'parent', '.', 'append', '(', 'celem', ')', 'else', ':', 'parent', '=', '_get_or_update_parent', '(', 'key', ',', 'val', ',', 'to_str', ',', 'parent', '=', 'parent', ',', '*', '*', 'options', ')', 'return', 'ET', '.', 'ElementTree', '(', 'parent', ')']
Convert a dict-like object to XML ElementTree. :param obj: Container instance to convert to :param parent: XML ElementTree parent node object or None :param to_str: Callable to convert value to string or None :param options: Keyword options, - tags: Dict of tags for special nodes to keep XML info, attributes, text and children nodes, e.g. {"attrs": "@attrs", "text": "#text"}
['Convert', 'a', 'dict', '-', 'like', 'object', 'to', 'XML', 'ElementTree', '.']
train
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/backend/xml.py#L410-L445
1,305
coin-or/GiMPy
src/gimpy/graph.py
Graph.set_node_attr
def set_node_attr(self, name, attr, value): ''' API: set_node_attr(self, name, attr) Description: Sets attr attribute of node named name to value. Input: name: Name of node. attr: Attribute of node to set. Pre: Graph should have this node. Post: Node attribute will be updated. ''' self.get_node(name).set_attr(attr, value)
python
def set_node_attr(self, name, attr, value): ''' API: set_node_attr(self, name, attr) Description: Sets attr attribute of node named name to value. Input: name: Name of node. attr: Attribute of node to set. Pre: Graph should have this node. Post: Node attribute will be updated. ''' self.get_node(name).set_attr(attr, value)
['def', 'set_node_attr', '(', 'self', ',', 'name', ',', 'attr', ',', 'value', ')', ':', 'self', '.', 'get_node', '(', 'name', ')', '.', 'set_attr', '(', 'attr', ',', 'value', ')']
API: set_node_attr(self, name, attr) Description: Sets attr attribute of node named name to value. Input: name: Name of node. attr: Attribute of node to set. Pre: Graph should have this node. Post: Node attribute will be updated.
['API', ':', 'set_node_attr', '(', 'self', 'name', 'attr', ')', 'Description', ':', 'Sets', 'attr', 'attribute', 'of', 'node', 'named', 'name', 'to', 'value', '.', 'Input', ':', 'name', ':', 'Name', 'of', 'node', '.', 'attr', ':', 'Attribute', 'of', 'node', 'to', 'set', '.', 'Pre', ':', 'Graph', 'should', 'have', 'this', 'node', '.', 'Post', ':', 'Node', 'attribute', 'will', 'be', 'updated', '.']
train
https://github.com/coin-or/GiMPy/blob/51853122a50eb6019d06bbdedbfc396a833b5a22/src/gimpy/graph.py#L523-L536
1,306
praekeltfoundation/molo
molo/core/api/importers.py
ContentImporter.recreate_article_body
def recreate_article_body(self): ''' Handles case where article body contained page or image. Assumes all articles and images have been created. ''' for foreign_id, body in iteritems(self.record_keeper.article_bodies): try: local_page_id = self.record_keeper.get_local_page(foreign_id) page = Page.objects.get(id=local_page_id).specific # iterate through the body new_body = [] for item in body: if not item['value']: continue if item['type'] == 'page': new_page_id = self.record_keeper.get_local_page( item['value']) item['value'] = new_page_id elif item['type'] == 'image': new_image_id = self.record_keeper.get_local_image( item['value']) item['value'] = new_image_id new_body.append(item) setattr(page, 'body', json.dumps(new_body)) page.save_revision().publish() except Exception as e: self.log(ERROR, "recreating article body", { "exception": e, "foreign_id": foreign_id, "body": body, }, depth=1)
python
def recreate_article_body(self): ''' Handles case where article body contained page or image. Assumes all articles and images have been created. ''' for foreign_id, body in iteritems(self.record_keeper.article_bodies): try: local_page_id = self.record_keeper.get_local_page(foreign_id) page = Page.objects.get(id=local_page_id).specific # iterate through the body new_body = [] for item in body: if not item['value']: continue if item['type'] == 'page': new_page_id = self.record_keeper.get_local_page( item['value']) item['value'] = new_page_id elif item['type'] == 'image': new_image_id = self.record_keeper.get_local_image( item['value']) item['value'] = new_image_id new_body.append(item) setattr(page, 'body', json.dumps(new_body)) page.save_revision().publish() except Exception as e: self.log(ERROR, "recreating article body", { "exception": e, "foreign_id": foreign_id, "body": body, }, depth=1)
['def', 'recreate_article_body', '(', 'self', ')', ':', 'for', 'foreign_id', ',', 'body', 'in', 'iteritems', '(', 'self', '.', 'record_keeper', '.', 'article_bodies', ')', ':', 'try', ':', 'local_page_id', '=', 'self', '.', 'record_keeper', '.', 'get_local_page', '(', 'foreign_id', ')', 'page', '=', 'Page', '.', 'objects', '.', 'get', '(', 'id', '=', 'local_page_id', ')', '.', 'specific', '# iterate through the body', 'new_body', '=', '[', ']', 'for', 'item', 'in', 'body', ':', 'if', 'not', 'item', '[', "'value'", ']', ':', 'continue', 'if', 'item', '[', "'type'", ']', '==', "'page'", ':', 'new_page_id', '=', 'self', '.', 'record_keeper', '.', 'get_local_page', '(', 'item', '[', "'value'", ']', ')', 'item', '[', "'value'", ']', '=', 'new_page_id', 'elif', 'item', '[', "'type'", ']', '==', "'image'", ':', 'new_image_id', '=', 'self', '.', 'record_keeper', '.', 'get_local_image', '(', 'item', '[', "'value'", ']', ')', 'item', '[', "'value'", ']', '=', 'new_image_id', 'new_body', '.', 'append', '(', 'item', ')', 'setattr', '(', 'page', ',', "'body'", ',', 'json', '.', 'dumps', '(', 'new_body', ')', ')', 'page', '.', 'save_revision', '(', ')', '.', 'publish', '(', ')', 'except', 'Exception', 'as', 'e', ':', 'self', '.', 'log', '(', 'ERROR', ',', '"recreating article body"', ',', '{', '"exception"', ':', 'e', ',', '"foreign_id"', ':', 'foreign_id', ',', '"body"', ':', 'body', ',', '}', ',', 'depth', '=', '1', ')']
Handles case where article body contained page or image. Assumes all articles and images have been created.
['Handles', 'case', 'where', 'article', 'body', 'contained', 'page', 'or', 'image', '.']
train
https://github.com/praekeltfoundation/molo/blob/57702fda4fab261d67591415f7d46bc98fa38525/molo/core/api/importers.py#L718-L755
1,307
jobovy/galpy
galpy/df/diskdf.py
surfacemass
def surfacemass(self,R,romberg=False,nsigma=None,relative=False): """ NAME: surfacemass PURPOSE: calculate the surface-mass at R by marginalizing over velocity INPUT: R - radius at which to calculate the surfacemass density (can be Quantity) OPTIONAL INPUT: nsigma - number of sigma to integrate the velocities over KEYWORDS: romberg - if True, use a romberg integrator (default: False) OUTPUT: surface mass at R HISTORY: 2010-03-XX - Written - Bovy (NYU) """ if nsigma == None: nsigma= _NSIGMA logSigmaR= self.targetSurfacemass(R,log=True,use_physical=False) sigmaR2= self.targetSigma2(R,use_physical=False) sigmaR1= sc.sqrt(sigmaR2) logsigmaR2= sc.log(sigmaR2) if relative: norm= 1. else: norm= sc.exp(logSigmaR) #Use the asymmetric drift equation to estimate va va= sigmaR2/2./R**self._beta*(1./self._gamma**2.-1. -R*self._surfaceSigmaProfile.surfacemassDerivative(R,log=True) -R*self._surfaceSigmaProfile.sigma2Derivative(R,log=True)) if math.fabs(va) > sigmaR1: va = 0.#To avoid craziness near the center if romberg: return sc.real(bovy_dblquad(_surfaceIntegrand, self._gamma*(R**self._beta-va)/sigmaR1-nsigma, self._gamma*(R**self._beta-va)/sigmaR1+nsigma, lambda x: 0., lambda x: nsigma, [R,self,logSigmaR,logsigmaR2,sigmaR1, self._gamma], tol=10.**-8)/sc.pi*norm) else: return integrate.dblquad(_surfaceIntegrand, self._gamma*(R**self._beta-va)/sigmaR1-nsigma, self._gamma*(R**self._beta-va)/sigmaR1+nsigma, lambda x: 0., lambda x: nsigma, (R,self,logSigmaR,logsigmaR2,sigmaR1, self._gamma), epsrel=_EPSREL)[0]/sc.pi*norm
python
def surfacemass(self,R,romberg=False,nsigma=None,relative=False): """ NAME: surfacemass PURPOSE: calculate the surface-mass at R by marginalizing over velocity INPUT: R - radius at which to calculate the surfacemass density (can be Quantity) OPTIONAL INPUT: nsigma - number of sigma to integrate the velocities over KEYWORDS: romberg - if True, use a romberg integrator (default: False) OUTPUT: surface mass at R HISTORY: 2010-03-XX - Written - Bovy (NYU) """ if nsigma == None: nsigma= _NSIGMA logSigmaR= self.targetSurfacemass(R,log=True,use_physical=False) sigmaR2= self.targetSigma2(R,use_physical=False) sigmaR1= sc.sqrt(sigmaR2) logsigmaR2= sc.log(sigmaR2) if relative: norm= 1. else: norm= sc.exp(logSigmaR) #Use the asymmetric drift equation to estimate va va= sigmaR2/2./R**self._beta*(1./self._gamma**2.-1. -R*self._surfaceSigmaProfile.surfacemassDerivative(R,log=True) -R*self._surfaceSigmaProfile.sigma2Derivative(R,log=True)) if math.fabs(va) > sigmaR1: va = 0.#To avoid craziness near the center if romberg: return sc.real(bovy_dblquad(_surfaceIntegrand, self._gamma*(R**self._beta-va)/sigmaR1-nsigma, self._gamma*(R**self._beta-va)/sigmaR1+nsigma, lambda x: 0., lambda x: nsigma, [R,self,logSigmaR,logsigmaR2,sigmaR1, self._gamma], tol=10.**-8)/sc.pi*norm) else: return integrate.dblquad(_surfaceIntegrand, self._gamma*(R**self._beta-va)/sigmaR1-nsigma, self._gamma*(R**self._beta-va)/sigmaR1+nsigma, lambda x: 0., lambda x: nsigma, (R,self,logSigmaR,logsigmaR2,sigmaR1, self._gamma), epsrel=_EPSREL)[0]/sc.pi*norm
['def', 'surfacemass', '(', 'self', ',', 'R', ',', 'romberg', '=', 'False', ',', 'nsigma', '=', 'None', ',', 'relative', '=', 'False', ')', ':', 'if', 'nsigma', '==', 'None', ':', 'nsigma', '=', '_NSIGMA', 'logSigmaR', '=', 'self', '.', 'targetSurfacemass', '(', 'R', ',', 'log', '=', 'True', ',', 'use_physical', '=', 'False', ')', 'sigmaR2', '=', 'self', '.', 'targetSigma2', '(', 'R', ',', 'use_physical', '=', 'False', ')', 'sigmaR1', '=', 'sc', '.', 'sqrt', '(', 'sigmaR2', ')', 'logsigmaR2', '=', 'sc', '.', 'log', '(', 'sigmaR2', ')', 'if', 'relative', ':', 'norm', '=', '1.', 'else', ':', 'norm', '=', 'sc', '.', 'exp', '(', 'logSigmaR', ')', '#Use the asymmetric drift equation to estimate va', 'va', '=', 'sigmaR2', '/', '2.', '/', 'R', '**', 'self', '.', '_beta', '*', '(', '1.', '/', 'self', '.', '_gamma', '**', '2.', '-', '1.', '-', 'R', '*', 'self', '.', '_surfaceSigmaProfile', '.', 'surfacemassDerivative', '(', 'R', ',', 'log', '=', 'True', ')', '-', 'R', '*', 'self', '.', '_surfaceSigmaProfile', '.', 'sigma2Derivative', '(', 'R', ',', 'log', '=', 'True', ')', ')', 'if', 'math', '.', 'fabs', '(', 'va', ')', '>', 'sigmaR1', ':', 'va', '=', '0.', '#To avoid craziness near the center', 'if', 'romberg', ':', 'return', 'sc', '.', 'real', '(', 'bovy_dblquad', '(', '_surfaceIntegrand', ',', 'self', '.', '_gamma', '*', '(', 'R', '**', 'self', '.', '_beta', '-', 'va', ')', '/', 'sigmaR1', '-', 'nsigma', ',', 'self', '.', '_gamma', '*', '(', 'R', '**', 'self', '.', '_beta', '-', 'va', ')', '/', 'sigmaR1', '+', 'nsigma', ',', 'lambda', 'x', ':', '0.', ',', 'lambda', 'x', ':', 'nsigma', ',', '[', 'R', ',', 'self', ',', 'logSigmaR', ',', 'logsigmaR2', ',', 'sigmaR1', ',', 'self', '.', '_gamma', ']', ',', 'tol', '=', '10.', '**', '-', '8', ')', '/', 'sc', '.', 'pi', '*', 'norm', ')', 'else', ':', 'return', 'integrate', '.', 'dblquad', '(', '_surfaceIntegrand', ',', 'self', '.', '_gamma', '*', '(', 'R', '**', 'self', '.', '_beta', '-', 'va', ')', '/', 'sigmaR1', '-', 'nsigma', ',', 'self', '.', '_gamma', '*', '(', 'R', '**', 'self', '.', '_beta', '-', 'va', ')', '/', 'sigmaR1', '+', 'nsigma', ',', 'lambda', 'x', ':', '0.', ',', 'lambda', 'x', ':', 'nsigma', ',', '(', 'R', ',', 'self', ',', 'logSigmaR', ',', 'logsigmaR2', ',', 'sigmaR1', ',', 'self', '.', '_gamma', ')', ',', 'epsrel', '=', '_EPSREL', ')', '[', '0', ']', '/', 'sc', '.', 'pi', '*', 'norm']
NAME: surfacemass PURPOSE: calculate the surface-mass at R by marginalizing over velocity INPUT: R - radius at which to calculate the surfacemass density (can be Quantity) OPTIONAL INPUT: nsigma - number of sigma to integrate the velocities over KEYWORDS: romberg - if True, use a romberg integrator (default: False) OUTPUT: surface mass at R HISTORY: 2010-03-XX - Written - Bovy (NYU)
['NAME', ':']
train
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/df/diskdf.py#L664-L725
1,308
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/msvs.py
msvs_parse_version
def msvs_parse_version(s): """ Split a Visual Studio version, which may in fact be something like '7.0Exp', into is version number (returned as a float) and trailing "suite" portion. """ num, suite = version_re.match(s).groups() return float(num), suite
python
def msvs_parse_version(s): """ Split a Visual Studio version, which may in fact be something like '7.0Exp', into is version number (returned as a float) and trailing "suite" portion. """ num, suite = version_re.match(s).groups() return float(num), suite
['def', 'msvs_parse_version', '(', 's', ')', ':', 'num', ',', 'suite', '=', 'version_re', '.', 'match', '(', 's', ')', '.', 'groups', '(', ')', 'return', 'float', '(', 'num', ')', ',', 'suite']
Split a Visual Studio version, which may in fact be something like '7.0Exp', into is version number (returned as a float) and trailing "suite" portion.
['Split', 'a', 'Visual', 'Studio', 'version', 'which', 'may', 'in', 'fact', 'be', 'something', 'like', '7', '.', '0Exp', 'into', 'is', 'version', 'number', '(', 'returned', 'as', 'a', 'float', ')', 'and', 'trailing', 'suite', 'portion', '.']
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/msvs.py#L97-L104
1,309
linode/linode_api4-python
linode_api4/objects/tag.py
Tag._get_raw_objects
def _get_raw_objects(self): """ Helper function to populate the first page of raw objects for this tag. This has the side effect of creating the ``_raw_objects`` attribute of this object. """ if not hasattr(self, '_raw_objects'): result = self._client.get(type(self).api_endpoint, model=self) # I want to cache this to avoid making duplicate requests, but I don't # want it in the __init__ self._raw_objects = result # pylint: disable=attribute-defined-outside-init return self._raw_objects
python
def _get_raw_objects(self): """ Helper function to populate the first page of raw objects for this tag. This has the side effect of creating the ``_raw_objects`` attribute of this object. """ if not hasattr(self, '_raw_objects'): result = self._client.get(type(self).api_endpoint, model=self) # I want to cache this to avoid making duplicate requests, but I don't # want it in the __init__ self._raw_objects = result # pylint: disable=attribute-defined-outside-init return self._raw_objects
['def', '_get_raw_objects', '(', 'self', ')', ':', 'if', 'not', 'hasattr', '(', 'self', ',', "'_raw_objects'", ')', ':', 'result', '=', 'self', '.', '_client', '.', 'get', '(', 'type', '(', 'self', ')', '.', 'api_endpoint', ',', 'model', '=', 'self', ')', "# I want to cache this to avoid making duplicate requests, but I don't", '# want it in the __init__', 'self', '.', '_raw_objects', '=', 'result', '# pylint: disable=attribute-defined-outside-init', 'return', 'self', '.', '_raw_objects']
Helper function to populate the first page of raw objects for this tag. This has the side effect of creating the ``_raw_objects`` attribute of this object.
['Helper', 'function', 'to', 'populate', 'the', 'first', 'page', 'of', 'raw', 'objects', 'for', 'this', 'tag', '.', 'This', 'has', 'the', 'side', 'effect', 'of', 'creating', 'the', '_raw_objects', 'attribute', 'of', 'this', 'object', '.']
train
https://github.com/linode/linode_api4-python/blob/1dd7318d2aed014c746d48c7957464c57af883ca/linode_api4/objects/tag.py#L32-L45
1,310
marazmiki/django-ulogin
django_ulogin/views.py
PostBackView.handle_authenticated_user
def handle_authenticated_user(self, response): """ Handles the ULogin response if user is already authenticated """ current_user = get_user(self.request) ulogin, registered = ULoginUser.objects.get_or_create( uid=response['uid'], network=response['network'], defaults={'identity': response['identity'], 'user': current_user}) if not registered: ulogin_user = ulogin.user logger.debug('uLogin user already exists') if current_user != ulogin_user: logger.debug( "Mismatch: %s is not a %s. Take over it!" % (current_user, ulogin_user) ) ulogin.user = current_user ulogin.save() return get_user(self.request), ulogin, registered
python
def handle_authenticated_user(self, response): """ Handles the ULogin response if user is already authenticated """ current_user = get_user(self.request) ulogin, registered = ULoginUser.objects.get_or_create( uid=response['uid'], network=response['network'], defaults={'identity': response['identity'], 'user': current_user}) if not registered: ulogin_user = ulogin.user logger.debug('uLogin user already exists') if current_user != ulogin_user: logger.debug( "Mismatch: %s is not a %s. Take over it!" % (current_user, ulogin_user) ) ulogin.user = current_user ulogin.save() return get_user(self.request), ulogin, registered
['def', 'handle_authenticated_user', '(', 'self', ',', 'response', ')', ':', 'current_user', '=', 'get_user', '(', 'self', '.', 'request', ')', 'ulogin', ',', 'registered', '=', 'ULoginUser', '.', 'objects', '.', 'get_or_create', '(', 'uid', '=', 'response', '[', "'uid'", ']', ',', 'network', '=', 'response', '[', "'network'", ']', ',', 'defaults', '=', '{', "'identity'", ':', 'response', '[', "'identity'", ']', ',', "'user'", ':', 'current_user', '}', ')', 'if', 'not', 'registered', ':', 'ulogin_user', '=', 'ulogin', '.', 'user', 'logger', '.', 'debug', '(', "'uLogin user already exists'", ')', 'if', 'current_user', '!=', 'ulogin_user', ':', 'logger', '.', 'debug', '(', '"Mismatch: %s is not a %s. Take over it!"', '%', '(', 'current_user', ',', 'ulogin_user', ')', ')', 'ulogin', '.', 'user', '=', 'current_user', 'ulogin', '.', 'save', '(', ')', 'return', 'get_user', '(', 'self', '.', 'request', ')', ',', 'ulogin', ',', 'registered']
Handles the ULogin response if user is already authenticated
['Handles', 'the', 'ULogin', 'response', 'if', 'user', 'is', 'already', 'authenticated']
train
https://github.com/marazmiki/django-ulogin/blob/f41ad4b4ca130ad8af25be72ad882c8cf94a80dc/django_ulogin/views.py#L82-L107
1,311
bfontaine/term2048
term2048/ui.py
parse_cli_args
def parse_cli_args(): """parse args from the CLI and return a dict""" parser = argparse.ArgumentParser(description='2048 in your terminal') parser.add_argument('--mode', dest='mode', type=str, default=None, help='colors mode (dark or light)') parser.add_argument('--az', dest='azmode', action='store_true', help='Use the letters a-z instead of numbers') parser.add_argument('--resume', dest='resume', action='store_true', help='restart the game from where you left') parser.add_argument('-v', '--version', action='store_true') parser.add_argument('-r', '--rules', action='store_true') return vars(parser.parse_args())
python
def parse_cli_args(): """parse args from the CLI and return a dict""" parser = argparse.ArgumentParser(description='2048 in your terminal') parser.add_argument('--mode', dest='mode', type=str, default=None, help='colors mode (dark or light)') parser.add_argument('--az', dest='azmode', action='store_true', help='Use the letters a-z instead of numbers') parser.add_argument('--resume', dest='resume', action='store_true', help='restart the game from where you left') parser.add_argument('-v', '--version', action='store_true') parser.add_argument('-r', '--rules', action='store_true') return vars(parser.parse_args())
['def', 'parse_cli_args', '(', ')', ':', 'parser', '=', 'argparse', '.', 'ArgumentParser', '(', 'description', '=', "'2048 in your terminal'", ')', 'parser', '.', 'add_argument', '(', "'--mode'", ',', 'dest', '=', "'mode'", ',', 'type', '=', 'str', ',', 'default', '=', 'None', ',', 'help', '=', "'colors mode (dark or light)'", ')', 'parser', '.', 'add_argument', '(', "'--az'", ',', 'dest', '=', "'azmode'", ',', 'action', '=', "'store_true'", ',', 'help', '=', "'Use the letters a-z instead of numbers'", ')', 'parser', '.', 'add_argument', '(', "'--resume'", ',', 'dest', '=', "'resume'", ',', 'action', '=', "'store_true'", ',', 'help', '=', "'restart the game from where you left'", ')', 'parser', '.', 'add_argument', '(', "'-v'", ',', "'--version'", ',', 'action', '=', "'store_true'", ')', 'parser', '.', 'add_argument', '(', "'-r'", ',', "'--rules'", ',', 'action', '=', "'store_true'", ')', 'return', 'vars', '(', 'parser', '.', 'parse_args', '(', ')', ')']
parse args from the CLI and return a dict
['parse', 'args', 'from', 'the', 'CLI', 'and', 'return', 'a', 'dict']
train
https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/ui.py#L30-L41
1,312
pecan/pecan
pecan/commands/serve.py
ServeCommand.serve
def serve(self, app, conf): """ A very simple approach for a WSGI server. """ if self.args.reload: try: self.watch_and_spawn(conf) except ImportError: print('The `--reload` option requires `watchdog` to be ' 'installed.') print(' $ pip install watchdog') else: self._serve(app, conf)
python
def serve(self, app, conf): """ A very simple approach for a WSGI server. """ if self.args.reload: try: self.watch_and_spawn(conf) except ImportError: print('The `--reload` option requires `watchdog` to be ' 'installed.') print(' $ pip install watchdog') else: self._serve(app, conf)
['def', 'serve', '(', 'self', ',', 'app', ',', 'conf', ')', ':', 'if', 'self', '.', 'args', '.', 'reload', ':', 'try', ':', 'self', '.', 'watch_and_spawn', '(', 'conf', ')', 'except', 'ImportError', ':', 'print', '(', "'The `--reload` option requires `watchdog` to be '", "'installed.'", ')', 'print', '(', "' $ pip install watchdog'", ')', 'else', ':', 'self', '.', '_serve', '(', 'app', ',', 'conf', ')']
A very simple approach for a WSGI server.
['A', 'very', 'simple', 'approach', 'for', 'a', 'WSGI', 'server', '.']
train
https://github.com/pecan/pecan/blob/833d0653fa0e6bbfb52545b091c30182105f4a82/pecan/commands/serve.py#L139-L152
1,313
clld/pycdstar
src/pycdstar/commands.py
c_metadata
def c_metadata(api, args, verbose=False): """ Set or get metadata associated with an object:: usage: cdstar metadata <URL> [<JSON>] <JSON> Path to metadata in JSON, or JSON literal. """ obj = api.get_object(args['<URL>'].split('/')[-1]) if not set_metadata(args['<JSON>'], obj): return json.dumps(obj.metadata.read(), indent=4)
python
def c_metadata(api, args, verbose=False): """ Set or get metadata associated with an object:: usage: cdstar metadata <URL> [<JSON>] <JSON> Path to metadata in JSON, or JSON literal. """ obj = api.get_object(args['<URL>'].split('/')[-1]) if not set_metadata(args['<JSON>'], obj): return json.dumps(obj.metadata.read(), indent=4)
['def', 'c_metadata', '(', 'api', ',', 'args', ',', 'verbose', '=', 'False', ')', ':', 'obj', '=', 'api', '.', 'get_object', '(', 'args', '[', "'<URL>'", ']', '.', 'split', '(', "'/'", ')', '[', '-', '1', ']', ')', 'if', 'not', 'set_metadata', '(', 'args', '[', "'<JSON>'", ']', ',', 'obj', ')', ':', 'return', 'json', '.', 'dumps', '(', 'obj', '.', 'metadata', '.', 'read', '(', ')', ',', 'indent', '=', '4', ')']
Set or get metadata associated with an object:: usage: cdstar metadata <URL> [<JSON>] <JSON> Path to metadata in JSON, or JSON literal.
['Set', 'or', 'get', 'metadata', 'associated', 'with', 'an', 'object', '::']
train
https://github.com/clld/pycdstar/blob/1a225b472c4e6bf9b8078fa3198f939395c53d22/src/pycdstar/commands.py#L24-L34
1,314
TissueMAPS/TmDeploy
tmdeploy/config.py
CloudSection.key_file_public
def key_file_public(self): '''str: path to the public key that will be uploaded to the cloud provider (by default looks for a ``.pub`` file with name :attr:`key_name <tmdeploy.config.CloudSection.key_name>` in ``~/.ssh`` directory) ''' if not hasattr(self, '_key_file_public'): self.key_file_public = '~/.ssh/{key}.pub'.format(key=self.key_name) return self._key_file_public
python
def key_file_public(self): '''str: path to the public key that will be uploaded to the cloud provider (by default looks for a ``.pub`` file with name :attr:`key_name <tmdeploy.config.CloudSection.key_name>` in ``~/.ssh`` directory) ''' if not hasattr(self, '_key_file_public'): self.key_file_public = '~/.ssh/{key}.pub'.format(key=self.key_name) return self._key_file_public
['def', 'key_file_public', '(', 'self', ')', ':', 'if', 'not', 'hasattr', '(', 'self', ',', "'_key_file_public'", ')', ':', 'self', '.', 'key_file_public', '=', "'~/.ssh/{key}.pub'", '.', 'format', '(', 'key', '=', 'self', '.', 'key_name', ')', 'return', 'self', '.', '_key_file_public']
str: path to the public key that will be uploaded to the cloud provider (by default looks for a ``.pub`` file with name :attr:`key_name <tmdeploy.config.CloudSection.key_name>` in ``~/.ssh`` directory)
['str', ':', 'path', 'to', 'the', 'public', 'key', 'that', 'will', 'be', 'uploaded', 'to', 'the', 'cloud', 'provider', '(', 'by', 'default', 'looks', 'for', 'a', '.', 'pub', 'file', 'with', 'name', ':', 'attr', ':', 'key_name', '<tmdeploy', '.', 'config', '.', 'CloudSection', '.', 'key_name', '>', 'in', '~', '/', '.', 'ssh', 'directory', ')']
train
https://github.com/TissueMAPS/TmDeploy/blob/f891b4ffb21431988bc4a063ae871da3bf284a45/tmdeploy/config.py#L310-L318
1,315
log2timeline/dfvfs
dfvfs/file_io/gzip_file_io.py
GzipFile.read
def read(self, size=None): """Reads a byte string from the gzip file at the current offset. The function will read a byte string up to the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed. """ data = b'' while ((size and len(data) < size) and self._current_offset < self.uncompressed_data_size): member = self._GetMemberForOffset(self._current_offset) member_offset = self._current_offset - member.uncompressed_data_offset data_read = member.ReadAtOffset(member_offset, size) if data_read: self._current_offset += len(data_read) data = b''.join([data, data_read]) return data
python
def read(self, size=None): """Reads a byte string from the gzip file at the current offset. The function will read a byte string up to the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed. """ data = b'' while ((size and len(data) < size) and self._current_offset < self.uncompressed_data_size): member = self._GetMemberForOffset(self._current_offset) member_offset = self._current_offset - member.uncompressed_data_offset data_read = member.ReadAtOffset(member_offset, size) if data_read: self._current_offset += len(data_read) data = b''.join([data, data_read]) return data
['def', 'read', '(', 'self', ',', 'size', '=', 'None', ')', ':', 'data', '=', "b''", 'while', '(', '(', 'size', 'and', 'len', '(', 'data', ')', '<', 'size', ')', 'and', 'self', '.', '_current_offset', '<', 'self', '.', 'uncompressed_data_size', ')', ':', 'member', '=', 'self', '.', '_GetMemberForOffset', '(', 'self', '.', '_current_offset', ')', 'member_offset', '=', 'self', '.', '_current_offset', '-', 'member', '.', 'uncompressed_data_offset', 'data_read', '=', 'member', '.', 'ReadAtOffset', '(', 'member_offset', ',', 'size', ')', 'if', 'data_read', ':', 'self', '.', '_current_offset', '+=', 'len', '(', 'data_read', ')', 'data', '=', "b''", '.', 'join', '(', '[', 'data', ',', 'data_read', ']', ')', 'return', 'data']
Reads a byte string from the gzip file at the current offset. The function will read a byte string up to the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed.
['Reads', 'a', 'byte', 'string', 'from', 'the', 'gzip', 'file', 'at', 'the', 'current', 'offset', '.']
train
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/file_io/gzip_file_io.py#L117-L144
1,316
Esri/ArcREST
src/arcrest/manageags/administration.py
AGSAdministration.services
def services(self): """ Gets the services object which will provide the ArcGIS Server's admin information about services and folders. """ if self._resources is None: self.__init() if "services" in self._resources: url = self._url + "/services" return _services.Services(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initialize=True) else: return None
python
def services(self): """ Gets the services object which will provide the ArcGIS Server's admin information about services and folders. """ if self._resources is None: self.__init() if "services" in self._resources: url = self._url + "/services" return _services.Services(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initialize=True) else: return None
['def', 'services', '(', 'self', ')', ':', 'if', 'self', '.', '_resources', 'is', 'None', ':', 'self', '.', '__init', '(', ')', 'if', '"services"', 'in', 'self', '.', '_resources', ':', 'url', '=', 'self', '.', '_url', '+', '"/services"', 'return', '_services', '.', 'Services', '(', 'url', '=', 'url', ',', 'securityHandler', '=', 'self', '.', '_securityHandler', ',', 'proxy_url', '=', 'self', '.', '_proxy_url', ',', 'proxy_port', '=', 'self', '.', '_proxy_port', ',', 'initialize', '=', 'True', ')', 'else', ':', 'return', 'None']
Gets the services object which will provide the ArcGIS Server's admin information about services and folders.
['Gets', 'the', 'services', 'object', 'which', 'will', 'provide', 'the', 'ArcGIS', 'Server', 's', 'admin', 'information', 'about', 'services', 'and', 'folders', '.']
train
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageags/administration.py#L382-L397
1,317
saltstack/salt
salt/netapi/__init__.py
NetapiClient.ssh
def ssh(self, *args, **kwargs): ''' Run salt-ssh commands synchronously Wraps :py:meth:`salt.client.ssh.client.SSHClient.cmd_sync`. :return: Returns the result from the salt-ssh command ''' ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts, disable_custom_roster=True) return ssh_client.cmd_sync(kwargs)
python
def ssh(self, *args, **kwargs): ''' Run salt-ssh commands synchronously Wraps :py:meth:`salt.client.ssh.client.SSHClient.cmd_sync`. :return: Returns the result from the salt-ssh command ''' ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts, disable_custom_roster=True) return ssh_client.cmd_sync(kwargs)
['def', 'ssh', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'ssh_client', '=', 'salt', '.', 'client', '.', 'ssh', '.', 'client', '.', 'SSHClient', '(', 'mopts', '=', 'self', '.', 'opts', ',', 'disable_custom_roster', '=', 'True', ')', 'return', 'ssh_client', '.', 'cmd_sync', '(', 'kwargs', ')']
Run salt-ssh commands synchronously Wraps :py:meth:`salt.client.ssh.client.SSHClient.cmd_sync`. :return: Returns the result from the salt-ssh command
['Run', 'salt', '-', 'ssh', 'commands', 'synchronously']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/__init__.py#L136-L146
1,318
boriel/zxbasic
zxbparser.py
p_let_arr_substr_in_args3
def p_let_arr_substr_in_args3(p): """ statement : LET ARRAY_ID LP arguments COMMA TO RP EQ expr | ARRAY_ID LP arguments COMMA TO RP EQ expr """ i = 2 if p[1].upper() == 'LET' else 1 id_ = p[i] arg_list = p[i + 2] substr = (make_number(0, lineno=p.lineno(i + 4)), make_number(gl.MAX_STRSLICE_IDX, lineno=p.lineno(i + 3))) expr_ = p[i + 7] p[0] = make_array_substr_assign(p.lineno(i), id_, arg_list, substr, expr_)
python
def p_let_arr_substr_in_args3(p): """ statement : LET ARRAY_ID LP arguments COMMA TO RP EQ expr | ARRAY_ID LP arguments COMMA TO RP EQ expr """ i = 2 if p[1].upper() == 'LET' else 1 id_ = p[i] arg_list = p[i + 2] substr = (make_number(0, lineno=p.lineno(i + 4)), make_number(gl.MAX_STRSLICE_IDX, lineno=p.lineno(i + 3))) expr_ = p[i + 7] p[0] = make_array_substr_assign(p.lineno(i), id_, arg_list, substr, expr_)
['def', 'p_let_arr_substr_in_args3', '(', 'p', ')', ':', 'i', '=', '2', 'if', 'p', '[', '1', ']', '.', 'upper', '(', ')', '==', "'LET'", 'else', '1', 'id_', '=', 'p', '[', 'i', ']', 'arg_list', '=', 'p', '[', 'i', '+', '2', ']', 'substr', '=', '(', 'make_number', '(', '0', ',', 'lineno', '=', 'p', '.', 'lineno', '(', 'i', '+', '4', ')', ')', ',', 'make_number', '(', 'gl', '.', 'MAX_STRSLICE_IDX', ',', 'lineno', '=', 'p', '.', 'lineno', '(', 'i', '+', '3', ')', ')', ')', 'expr_', '=', 'p', '[', 'i', '+', '7', ']', 'p', '[', '0', ']', '=', 'make_array_substr_assign', '(', 'p', '.', 'lineno', '(', 'i', ')', ',', 'id_', ',', 'arg_list', ',', 'substr', ',', 'expr_', ')']
statement : LET ARRAY_ID LP arguments COMMA TO RP EQ expr | ARRAY_ID LP arguments COMMA TO RP EQ expr
['statement', ':', 'LET', 'ARRAY_ID', 'LP', 'arguments', 'COMMA', 'TO', 'RP', 'EQ', 'expr', '|', 'ARRAY_ID', 'LP', 'arguments', 'COMMA', 'TO', 'RP', 'EQ', 'expr']
train
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/zxbparser.py#L2646-L2657
1,319
andycasey/sick
sick/models/model.py
Model.infer
def infer(self, data, initial_proposal=None, full_output=False,**kwargs): """ Infer the model parameters, given the data. auto_convergence=True, walkers=100, burn=2000, sample=2000, minimum_sample=2000, convergence_check_frequency=1000, a=2.0, threads=1, """ # Apply data masks now so we don't have to do it on the fly. data, pixels_affected = self._apply_data_mask(data) # Any channels / parameters to ignore? matched_channels, missing_channels, ignore_parameters \ = self._match_channels_to_data(data) parameters = [p for p in self.parameters if p not in ignore_parameters] #parameters = list(set(self.parameters).difference(ignore_parameters)) logger.debug("Inferring {0} parameters: {1}".format(len(parameters), ", ".join(parameters))) # What sampling behaviour will we have? # - Auto-convergence: # + Sample for `minimum_sample` (default 2000, 200 walkers) # + Calculate the maximum exponential autocorrelation time for # all parameters # + For the rest of the chain, calculate the autocorrelation time # + Ensure that the number of samples we have is more than # `effectively_independent_samples` (default 100) times. # - Specified convergence: # + Burn for `burn` (default 2000) steps # + Sample for `sample` (default 2000) steps kwd = { "auto_convergence": False, # TODO CHANGE ME "walkers": 100, "burn": 2000, "sample": 2000, # The minimum_sample, n_tau_exp_as_burn_in, minimum_eis are only # used if auto_convergence is turned on. "minimum_sample": 2000, "maximum_sample": 100000, "n_tau_exp_as_burn_in": 3, "minimum_effective_independent_samples": 100, "check_convergence_frequency": 1000, "a": 2.0, "threads": 1 } # Update from the model, then update from any keyword arguments given. kwd.update(self._configuration.get("infer", {}).copy()) kwd.update(**kwargs) # Make some checks. if kwd["walkers"] % 2 > 0 or kwd["walkers"] < 2 * len(parameters): raise ValueError("the number of walkers must be an even number and " "be at least twice the number of model parameters") check_keywords = ["threads", "a"] if kwd["auto_convergence"]: logger.info("Convergence will be estimated automatically.") check_keywords += ["minimum_sample", "check_convergence_frequency", "minimum_effective_independent_samples", "n_tau_exp_as_burn_in", "maximum_sample"] else: check_keywords += ["burn", "sample"] logger.warn("No convergence checks will be done!") logger.info("Burning for {0} steps and sampling for {1} with {2} "\ "walkers".format(kwd["burn"], kwd["sample"], kwd["walkers"])) for keyword in check_keywords: if kwd[keyword] < 1: raise ValueError("keyword {} must be a positive value".format( keyword)) # Check for non-standard proposal scales. if kwd["a"] != 2.0: logger.warn("Using proposal scale of {0:.2f}".format(kwd["a"])) # If no initial proposal given, estimate the model parameters. if initial_proposal is None: initial_proposal = self.estimate(data) # Initial proposal could be: # - an array (N_walkers, N_dimensions) # - a dictionary containing key/value pairs for the dimensions if isinstance(initial_proposal, dict): wavelengths_required = [] for channel, spectrum in zip(matched_channels, data): if channel is None: continue z = initial_proposal.get("z", initial_proposal.get("z_{}".format(channel), 0)) wavelengths_required.append( [spectrum.disp[0] * (1 - z), spectrum.disp[-1] * (1 - z)]) closest_point = [initial_proposal[p] \ for p in self.grid_points.dtype.names] subset_bounds = self._initialise_approximator( closest_point=closest_point, wavelengths_required=wavelengths_required, force=True, **kwargs) initial_proposal = self._initial_proposal_distribution( parameters, initial_proposal, kwd["walkers"]) elif isinstance(initial_proposal, np.ndarray): initial_proposal = np.atleast_2d(initial_proposal) if initial_proposal.shape != (kwd["walkers"], len(parameters)): raise ValueError("initial proposal must be an array of shape "\ "(N_parameters, N_walkers) ({0}, {1})".format(kwd["walkers"], len(parameters))) # Prepare the convolution functions. self._create_convolution_functions(matched_channels, data, parameters) # Create the sampler. logger.info("Creating sampler with {0} walkers and {1} threads".format( kwd["walkers"], kwd["threads"])) debug = kwargs.get("debug", False) sampler = emcee.EnsembleSampler(kwd["walkers"], len(parameters), inference.ln_probability, a=kwd["a"], threads=kwd["threads"], args=(parameters, self, data, debug), kwargs={"matched_channels": matched_channels}) # Regardless of whether we automatically check for convergence or not, # we will still need to burn in for some minimum amount of time. if kwd["auto_convergence"]: # Sample for `minimum_sample` period. descr, iterations = "", kwd["minimum_sample"] else: # Sample for `burn` period descr, iterations = "burn-in", kwd["burn"] # Start sampling. t_init = time() acceptance_fractions = [] progress_bar = kwargs.get("__show_progress_bar", True) sampler, init_acceptance_fractions, pos, lnprob, rstate, init_elapsed \ = self._sample(sampler, initial_proposal, iterations, descr=descr, parameters=parameters, __show_progress_bar=progress_bar) acceptance_fractions.append(init_acceptance_fractions) # If we don't have to check for convergence, it's easy: if not kwd["auto_convergence"]: # Save the chain and log probabilities before we reset the chain. burn, sample = kwd["burn"], kwd["sample"] converged = None # we don't know! burn_chains = sampler.chain burn_ln_probabilities = sampler.lnprobability # Reset the chain. logger.debug("Resetting chain...") sampler.reset() # Sample the posterior. sampler, prod_acceptance_fractions, pos, lnprob, rstate, t_elapsed \ = self._sample(sampler, pos, kwd["sample"], lnprob0=lnprob, rstate0=rstate, descr="production", parameters=parameters, __show_progress_bar=progress_bar) production_chains = sampler.chain production_ln_probabilities = sampler.lnprobability acceptance_fractions.append(prod_acceptance_fractions) else: # Start checking for convergence at a frequency # of check_convergence_frequency last_state = [pos, lnprob, rstate] converged, total_steps = False, 0 + iterations min_eis_required = kwd["minimum_effective_independent_samples"] while not converged and kwd["maximum_sample"] > total_steps: # Check for convergence. # Estimate the exponential autocorrelation time. try: tau_exp, rho, rho_max_fit \ = utils.estimate_tau_exp(sampler.chain) except: logger.exception("Exception occurred when trying to " "estimate the exponential autocorrelation time:") logger.info("To recover, we are temporarily setting tau_exp" " to {0}".format(total_steps)) tau_exp = total_steps logger.info("Estimated tau_exp at {0} is {1:.0f}".format( total_steps, tau_exp)) # Grab everything n_tau_exp_as_burn_in times that. burn = int(np.ceil(tau_exp)) * kwd["n_tau_exp_as_burn_in"] sample = sampler.chain.shape[1] - burn if 1 > sample: logger.info("Sampler has not converged because {0}x the " "estimated exponential autocorrelation time of {1:.0f}" " is step {2}, and we are only at step {3}".format( kwd["n_tau_exp_as_burn_in"], tau_exp, burn, total_steps)) else: # Calculate the integrated autocorrelation time in the # remaining sample, for every parameter. tau_int = utils.estimate_tau_int(sampler.chain[:, burn:]) # Calculate the effective number of independent samples in # each parameter. num_effective = (kwd["walkers"] * sample)/(2*tau_int) logger.info("Effective number of independent samples in " "each parameter:") for parameter, n_eis in zip(parameters, num_effective): logger.info("\t{0}: {1:.0f}".format(parameter, n_eis)) if num_effective.min() > min_eis_required: # Converged. converged = True logger.info("Convergence achieved ({0:.0f} > {1:.0f})"\ .format(num_effective.min() > min_eis_required)) # Separate the samples into burn and production.. burn_chains = sampler.chain[:, :burn, :] burn_ln_probabilities = sampler.lnprobability[:burn] production_chains = sampler.chain[:, burn:, :] production_ln_probabilities = sampler.lnprobability[burn:] break else: # Nope. logger.info("Sampler has not converged because it did " "not meet the minimum number of effective " "independent samples ({0:.0f})".format(kwd["n"])) # Keep sampling. iterations = kwd["check_convergence_frequency"] logger.info("Trying for another {0} steps".format(iterations)) pos, lnprob, rstate = last_state sampler, af, pos, lnprob, rstate, t_elapsed = self._sample( sampler, pos, iterations, lnprob0=lnprob, rstate0=rstate, descr="", parameters=parameters, __show_progress_bar=progress_bar) total_steps += iterations acceptance_fractions.append(af) last_state.extend(pos, lnprob, rstate) del last_state[:3] if not converged: logger.warn("Maximum number of samples ({:.0f}) reached without" "convergence!".format(kwd["maximum_sample"])) logger.info("Total time elapsed: {0} seconds".format(time() - t_init)) if sampler.pool: sampler.pool.close() sampler.pool.join() # Stack burn and production information together. chains = np.hstack([burn_chains, production_chains]) lnprobability = np.hstack([ burn_ln_probabilities, production_ln_probabilities]) acceptance_fractions = np.hstack(acceptance_fractions) chi_sq, dof, model_fluxes = self._chi_sq(dict(zip(parameters, [np.percentile(chains[:, burn:, i], 50) for i in range(len(parameters))])), data) # Convert velocity scales. symbol, scale, units = self._preferred_redshift_scale labels = [] + parameters scales = np.ones(len(parameters)) if symbol != "z": for i, parameter in enumerate(parameters): if parameter == "z" or parameter.startswith("z_"): chains[:, :, i] *= scale scales[i] = scale if "_" in parameter: labels[i] = "_".join([symbol, parameter.split("_")[1:]]) else: labels[i] = symbol logger.debug("Scaled {0} (now {1}) to units of {2}".format( parameter, labels[i], units)) # Calculate MAP values and associated uncertainties. theta = OrderedDict() for i, label in enumerate(labels): l, c, u = np.percentile(chains[:, burn:, i], [16, 50, 84]) theta[label] = (c, u-c, l-c) # Re-arrange the chains to be in the same order as the model parameters. indices = np.array([parameters.index(p) \ for p in self.parameters if p in parameters]) chains = chains[:, :, indices] # Remove the convolution functions. if not kwargs.get("__keep_convolution_functions", False): self._destroy_convolution_functions() if full_output: metadata = { "burn": burn, "walkers": kwd["walkers"], "sample": sample, "parameters": labels, "scales": scales, "chi_sq": chi_sq, "dof": dof } return (theta, chains, lnprobability, acceptance_fractions, sampler, metadata) return theta
python
def infer(self, data, initial_proposal=None, full_output=False,**kwargs): """ Infer the model parameters, given the data. auto_convergence=True, walkers=100, burn=2000, sample=2000, minimum_sample=2000, convergence_check_frequency=1000, a=2.0, threads=1, """ # Apply data masks now so we don't have to do it on the fly. data, pixels_affected = self._apply_data_mask(data) # Any channels / parameters to ignore? matched_channels, missing_channels, ignore_parameters \ = self._match_channels_to_data(data) parameters = [p for p in self.parameters if p not in ignore_parameters] #parameters = list(set(self.parameters).difference(ignore_parameters)) logger.debug("Inferring {0} parameters: {1}".format(len(parameters), ", ".join(parameters))) # What sampling behaviour will we have? # - Auto-convergence: # + Sample for `minimum_sample` (default 2000, 200 walkers) # + Calculate the maximum exponential autocorrelation time for # all parameters # + For the rest of the chain, calculate the autocorrelation time # + Ensure that the number of samples we have is more than # `effectively_independent_samples` (default 100) times. # - Specified convergence: # + Burn for `burn` (default 2000) steps # + Sample for `sample` (default 2000) steps kwd = { "auto_convergence": False, # TODO CHANGE ME "walkers": 100, "burn": 2000, "sample": 2000, # The minimum_sample, n_tau_exp_as_burn_in, minimum_eis are only # used if auto_convergence is turned on. "minimum_sample": 2000, "maximum_sample": 100000, "n_tau_exp_as_burn_in": 3, "minimum_effective_independent_samples": 100, "check_convergence_frequency": 1000, "a": 2.0, "threads": 1 } # Update from the model, then update from any keyword arguments given. kwd.update(self._configuration.get("infer", {}).copy()) kwd.update(**kwargs) # Make some checks. if kwd["walkers"] % 2 > 0 or kwd["walkers"] < 2 * len(parameters): raise ValueError("the number of walkers must be an even number and " "be at least twice the number of model parameters") check_keywords = ["threads", "a"] if kwd["auto_convergence"]: logger.info("Convergence will be estimated automatically.") check_keywords += ["minimum_sample", "check_convergence_frequency", "minimum_effective_independent_samples", "n_tau_exp_as_burn_in", "maximum_sample"] else: check_keywords += ["burn", "sample"] logger.warn("No convergence checks will be done!") logger.info("Burning for {0} steps and sampling for {1} with {2} "\ "walkers".format(kwd["burn"], kwd["sample"], kwd["walkers"])) for keyword in check_keywords: if kwd[keyword] < 1: raise ValueError("keyword {} must be a positive value".format( keyword)) # Check for non-standard proposal scales. if kwd["a"] != 2.0: logger.warn("Using proposal scale of {0:.2f}".format(kwd["a"])) # If no initial proposal given, estimate the model parameters. if initial_proposal is None: initial_proposal = self.estimate(data) # Initial proposal could be: # - an array (N_walkers, N_dimensions) # - a dictionary containing key/value pairs for the dimensions if isinstance(initial_proposal, dict): wavelengths_required = [] for channel, spectrum in zip(matched_channels, data): if channel is None: continue z = initial_proposal.get("z", initial_proposal.get("z_{}".format(channel), 0)) wavelengths_required.append( [spectrum.disp[0] * (1 - z), spectrum.disp[-1] * (1 - z)]) closest_point = [initial_proposal[p] \ for p in self.grid_points.dtype.names] subset_bounds = self._initialise_approximator( closest_point=closest_point, wavelengths_required=wavelengths_required, force=True, **kwargs) initial_proposal = self._initial_proposal_distribution( parameters, initial_proposal, kwd["walkers"]) elif isinstance(initial_proposal, np.ndarray): initial_proposal = np.atleast_2d(initial_proposal) if initial_proposal.shape != (kwd["walkers"], len(parameters)): raise ValueError("initial proposal must be an array of shape "\ "(N_parameters, N_walkers) ({0}, {1})".format(kwd["walkers"], len(parameters))) # Prepare the convolution functions. self._create_convolution_functions(matched_channels, data, parameters) # Create the sampler. logger.info("Creating sampler with {0} walkers and {1} threads".format( kwd["walkers"], kwd["threads"])) debug = kwargs.get("debug", False) sampler = emcee.EnsembleSampler(kwd["walkers"], len(parameters), inference.ln_probability, a=kwd["a"], threads=kwd["threads"], args=(parameters, self, data, debug), kwargs={"matched_channels": matched_channels}) # Regardless of whether we automatically check for convergence or not, # we will still need to burn in for some minimum amount of time. if kwd["auto_convergence"]: # Sample for `minimum_sample` period. descr, iterations = "", kwd["minimum_sample"] else: # Sample for `burn` period descr, iterations = "burn-in", kwd["burn"] # Start sampling. t_init = time() acceptance_fractions = [] progress_bar = kwargs.get("__show_progress_bar", True) sampler, init_acceptance_fractions, pos, lnprob, rstate, init_elapsed \ = self._sample(sampler, initial_proposal, iterations, descr=descr, parameters=parameters, __show_progress_bar=progress_bar) acceptance_fractions.append(init_acceptance_fractions) # If we don't have to check for convergence, it's easy: if not kwd["auto_convergence"]: # Save the chain and log probabilities before we reset the chain. burn, sample = kwd["burn"], kwd["sample"] converged = None # we don't know! burn_chains = sampler.chain burn_ln_probabilities = sampler.lnprobability # Reset the chain. logger.debug("Resetting chain...") sampler.reset() # Sample the posterior. sampler, prod_acceptance_fractions, pos, lnprob, rstate, t_elapsed \ = self._sample(sampler, pos, kwd["sample"], lnprob0=lnprob, rstate0=rstate, descr="production", parameters=parameters, __show_progress_bar=progress_bar) production_chains = sampler.chain production_ln_probabilities = sampler.lnprobability acceptance_fractions.append(prod_acceptance_fractions) else: # Start checking for convergence at a frequency # of check_convergence_frequency last_state = [pos, lnprob, rstate] converged, total_steps = False, 0 + iterations min_eis_required = kwd["minimum_effective_independent_samples"] while not converged and kwd["maximum_sample"] > total_steps: # Check for convergence. # Estimate the exponential autocorrelation time. try: tau_exp, rho, rho_max_fit \ = utils.estimate_tau_exp(sampler.chain) except: logger.exception("Exception occurred when trying to " "estimate the exponential autocorrelation time:") logger.info("To recover, we are temporarily setting tau_exp" " to {0}".format(total_steps)) tau_exp = total_steps logger.info("Estimated tau_exp at {0} is {1:.0f}".format( total_steps, tau_exp)) # Grab everything n_tau_exp_as_burn_in times that. burn = int(np.ceil(tau_exp)) * kwd["n_tau_exp_as_burn_in"] sample = sampler.chain.shape[1] - burn if 1 > sample: logger.info("Sampler has not converged because {0}x the " "estimated exponential autocorrelation time of {1:.0f}" " is step {2}, and we are only at step {3}".format( kwd["n_tau_exp_as_burn_in"], tau_exp, burn, total_steps)) else: # Calculate the integrated autocorrelation time in the # remaining sample, for every parameter. tau_int = utils.estimate_tau_int(sampler.chain[:, burn:]) # Calculate the effective number of independent samples in # each parameter. num_effective = (kwd["walkers"] * sample)/(2*tau_int) logger.info("Effective number of independent samples in " "each parameter:") for parameter, n_eis in zip(parameters, num_effective): logger.info("\t{0}: {1:.0f}".format(parameter, n_eis)) if num_effective.min() > min_eis_required: # Converged. converged = True logger.info("Convergence achieved ({0:.0f} > {1:.0f})"\ .format(num_effective.min() > min_eis_required)) # Separate the samples into burn and production.. burn_chains = sampler.chain[:, :burn, :] burn_ln_probabilities = sampler.lnprobability[:burn] production_chains = sampler.chain[:, burn:, :] production_ln_probabilities = sampler.lnprobability[burn:] break else: # Nope. logger.info("Sampler has not converged because it did " "not meet the minimum number of effective " "independent samples ({0:.0f})".format(kwd["n"])) # Keep sampling. iterations = kwd["check_convergence_frequency"] logger.info("Trying for another {0} steps".format(iterations)) pos, lnprob, rstate = last_state sampler, af, pos, lnprob, rstate, t_elapsed = self._sample( sampler, pos, iterations, lnprob0=lnprob, rstate0=rstate, descr="", parameters=parameters, __show_progress_bar=progress_bar) total_steps += iterations acceptance_fractions.append(af) last_state.extend(pos, lnprob, rstate) del last_state[:3] if not converged: logger.warn("Maximum number of samples ({:.0f}) reached without" "convergence!".format(kwd["maximum_sample"])) logger.info("Total time elapsed: {0} seconds".format(time() - t_init)) if sampler.pool: sampler.pool.close() sampler.pool.join() # Stack burn and production information together. chains = np.hstack([burn_chains, production_chains]) lnprobability = np.hstack([ burn_ln_probabilities, production_ln_probabilities]) acceptance_fractions = np.hstack(acceptance_fractions) chi_sq, dof, model_fluxes = self._chi_sq(dict(zip(parameters, [np.percentile(chains[:, burn:, i], 50) for i in range(len(parameters))])), data) # Convert velocity scales. symbol, scale, units = self._preferred_redshift_scale labels = [] + parameters scales = np.ones(len(parameters)) if symbol != "z": for i, parameter in enumerate(parameters): if parameter == "z" or parameter.startswith("z_"): chains[:, :, i] *= scale scales[i] = scale if "_" in parameter: labels[i] = "_".join([symbol, parameter.split("_")[1:]]) else: labels[i] = symbol logger.debug("Scaled {0} (now {1}) to units of {2}".format( parameter, labels[i], units)) # Calculate MAP values and associated uncertainties. theta = OrderedDict() for i, label in enumerate(labels): l, c, u = np.percentile(chains[:, burn:, i], [16, 50, 84]) theta[label] = (c, u-c, l-c) # Re-arrange the chains to be in the same order as the model parameters. indices = np.array([parameters.index(p) \ for p in self.parameters if p in parameters]) chains = chains[:, :, indices] # Remove the convolution functions. if not kwargs.get("__keep_convolution_functions", False): self._destroy_convolution_functions() if full_output: metadata = { "burn": burn, "walkers": kwd["walkers"], "sample": sample, "parameters": labels, "scales": scales, "chi_sq": chi_sq, "dof": dof } return (theta, chains, lnprobability, acceptance_fractions, sampler, metadata) return theta
['def', 'infer', '(', 'self', ',', 'data', ',', 'initial_proposal', '=', 'None', ',', 'full_output', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', "# Apply data masks now so we don't have to do it on the fly.", 'data', ',', 'pixels_affected', '=', 'self', '.', '_apply_data_mask', '(', 'data', ')', '# Any channels / parameters to ignore?', 'matched_channels', ',', 'missing_channels', ',', 'ignore_parameters', '=', 'self', '.', '_match_channels_to_data', '(', 'data', ')', 'parameters', '=', '[', 'p', 'for', 'p', 'in', 'self', '.', 'parameters', 'if', 'p', 'not', 'in', 'ignore_parameters', ']', '#parameters = list(set(self.parameters).difference(ignore_parameters))', 'logger', '.', 'debug', '(', '"Inferring {0} parameters: {1}"', '.', 'format', '(', 'len', '(', 'parameters', ')', ',', '", "', '.', 'join', '(', 'parameters', ')', ')', ')', '# What sampling behaviour will we have?', '# - Auto-convergence:', '# + Sample for `minimum_sample` (default 2000, 200 walkers)', '# + Calculate the maximum exponential autocorrelation time for', '# all parameters', '# + For the rest of the chain, calculate the autocorrelation time', '# + Ensure that the number of samples we have is more than ', '# `effectively_independent_samples` (default 100) times.', '# - Specified convergence:', '# + Burn for `burn` (default 2000) steps', '# + Sample for `sample` (default 2000) steps', 'kwd', '=', '{', '"auto_convergence"', ':', 'False', ',', '# TODO CHANGE ME', '"walkers"', ':', '100', ',', '"burn"', ':', '2000', ',', '"sample"', ':', '2000', ',', '# The minimum_sample, n_tau_exp_as_burn_in, minimum_eis are only', '# used if auto_convergence is turned on.', '"minimum_sample"', ':', '2000', ',', '"maximum_sample"', ':', '100000', ',', '"n_tau_exp_as_burn_in"', ':', '3', ',', '"minimum_effective_independent_samples"', ':', '100', ',', '"check_convergence_frequency"', ':', '1000', ',', '"a"', ':', '2.0', ',', '"threads"', ':', '1', '}', '# Update from the model, then update from any keyword arguments given.', 'kwd', '.', 'update', '(', 'self', '.', '_configuration', '.', 'get', '(', '"infer"', ',', '{', '}', ')', '.', 'copy', '(', ')', ')', 'kwd', '.', 'update', '(', '*', '*', 'kwargs', ')', '# Make some checks.', 'if', 'kwd', '[', '"walkers"', ']', '%', '2', '>', '0', 'or', 'kwd', '[', '"walkers"', ']', '<', '2', '*', 'len', '(', 'parameters', ')', ':', 'raise', 'ValueError', '(', '"the number of walkers must be an even number and "', '"be at least twice the number of model parameters"', ')', 'check_keywords', '=', '[', '"threads"', ',', '"a"', ']', 'if', 'kwd', '[', '"auto_convergence"', ']', ':', 'logger', '.', 'info', '(', '"Convergence will be estimated automatically."', ')', 'check_keywords', '+=', '[', '"minimum_sample"', ',', '"check_convergence_frequency"', ',', '"minimum_effective_independent_samples"', ',', '"n_tau_exp_as_burn_in"', ',', '"maximum_sample"', ']', 'else', ':', 'check_keywords', '+=', '[', '"burn"', ',', '"sample"', ']', 'logger', '.', 'warn', '(', '"No convergence checks will be done!"', ')', 'logger', '.', 'info', '(', '"Burning for {0} steps and sampling for {1} with {2} "', '"walkers"', '.', 'format', '(', 'kwd', '[', '"burn"', ']', ',', 'kwd', '[', '"sample"', ']', ',', 'kwd', '[', '"walkers"', ']', ')', ')', 'for', 'keyword', 'in', 'check_keywords', ':', 'if', 'kwd', '[', 'keyword', ']', '<', '1', ':', 'raise', 'ValueError', '(', '"keyword {} must be a positive value"', '.', 'format', '(', 'keyword', ')', ')', '# Check for non-standard proposal scales.', 'if', 'kwd', '[', '"a"', ']', '!=', '2.0', ':', 'logger', '.', 'warn', '(', '"Using proposal scale of {0:.2f}"', '.', 'format', '(', 'kwd', '[', '"a"', ']', ')', ')', '# If no initial proposal given, estimate the model parameters.', 'if', 'initial_proposal', 'is', 'None', ':', 'initial_proposal', '=', 'self', '.', 'estimate', '(', 'data', ')', '# Initial proposal could be:', '# - an array (N_walkers, N_dimensions)', '# - a dictionary containing key/value pairs for the dimensions', 'if', 'isinstance', '(', 'initial_proposal', ',', 'dict', ')', ':', 'wavelengths_required', '=', '[', ']', 'for', 'channel', ',', 'spectrum', 'in', 'zip', '(', 'matched_channels', ',', 'data', ')', ':', 'if', 'channel', 'is', 'None', ':', 'continue', 'z', '=', 'initial_proposal', '.', 'get', '(', '"z"', ',', 'initial_proposal', '.', 'get', '(', '"z_{}"', '.', 'format', '(', 'channel', ')', ',', '0', ')', ')', 'wavelengths_required', '.', 'append', '(', '[', 'spectrum', '.', 'disp', '[', '0', ']', '*', '(', '1', '-', 'z', ')', ',', 'spectrum', '.', 'disp', '[', '-', '1', ']', '*', '(', '1', '-', 'z', ')', ']', ')', 'closest_point', '=', '[', 'initial_proposal', '[', 'p', ']', 'for', 'p', 'in', 'self', '.', 'grid_points', '.', 'dtype', '.', 'names', ']', 'subset_bounds', '=', 'self', '.', '_initialise_approximator', '(', 'closest_point', '=', 'closest_point', ',', 'wavelengths_required', '=', 'wavelengths_required', ',', 'force', '=', 'True', ',', '*', '*', 'kwargs', ')', 'initial_proposal', '=', 'self', '.', '_initial_proposal_distribution', '(', 'parameters', ',', 'initial_proposal', ',', 'kwd', '[', '"walkers"', ']', ')', 'elif', 'isinstance', '(', 'initial_proposal', ',', 'np', '.', 'ndarray', ')', ':', 'initial_proposal', '=', 'np', '.', 'atleast_2d', '(', 'initial_proposal', ')', 'if', 'initial_proposal', '.', 'shape', '!=', '(', 'kwd', '[', '"walkers"', ']', ',', 'len', '(', 'parameters', ')', ')', ':', 'raise', 'ValueError', '(', '"initial proposal must be an array of shape "', '"(N_parameters, N_walkers) ({0}, {1})"', '.', 'format', '(', 'kwd', '[', '"walkers"', ']', ',', 'len', '(', 'parameters', ')', ')', ')', '# Prepare the convolution functions.', 'self', '.', '_create_convolution_functions', '(', 'matched_channels', ',', 'data', ',', 'parameters', ')', '# Create the sampler.', 'logger', '.', 'info', '(', '"Creating sampler with {0} walkers and {1} threads"', '.', 'format', '(', 'kwd', '[', '"walkers"', ']', ',', 'kwd', '[', '"threads"', ']', ')', ')', 'debug', '=', 'kwargs', '.', 'get', '(', '"debug"', ',', 'False', ')', 'sampler', '=', 'emcee', '.', 'EnsembleSampler', '(', 'kwd', '[', '"walkers"', ']', ',', 'len', '(', 'parameters', ')', ',', 'inference', '.', 'ln_probability', ',', 'a', '=', 'kwd', '[', '"a"', ']', ',', 'threads', '=', 'kwd', '[', '"threads"', ']', ',', 'args', '=', '(', 'parameters', ',', 'self', ',', 'data', ',', 'debug', ')', ',', 'kwargs', '=', '{', '"matched_channels"', ':', 'matched_channels', '}', ')', '# Regardless of whether we automatically check for convergence or not,', '# we will still need to burn in for some minimum amount of time.', 'if', 'kwd', '[', '"auto_convergence"', ']', ':', '# Sample for `minimum_sample` period.', 'descr', ',', 'iterations', '=', '""', ',', 'kwd', '[', '"minimum_sample"', ']', 'else', ':', '# Sample for `burn` period', 'descr', ',', 'iterations', '=', '"burn-in"', ',', 'kwd', '[', '"burn"', ']', '# Start sampling.', 't_init', '=', 'time', '(', ')', 'acceptance_fractions', '=', '[', ']', 'progress_bar', '=', 'kwargs', '.', 'get', '(', '"__show_progress_bar"', ',', 'True', ')', 'sampler', ',', 'init_acceptance_fractions', ',', 'pos', ',', 'lnprob', ',', 'rstate', ',', 'init_elapsed', '=', 'self', '.', '_sample', '(', 'sampler', ',', 'initial_proposal', ',', 'iterations', ',', 'descr', '=', 'descr', ',', 'parameters', '=', 'parameters', ',', '__show_progress_bar', '=', 'progress_bar', ')', 'acceptance_fractions', '.', 'append', '(', 'init_acceptance_fractions', ')', "# If we don't have to check for convergence, it's easy:", 'if', 'not', 'kwd', '[', '"auto_convergence"', ']', ':', '# Save the chain and log probabilities before we reset the chain.', 'burn', ',', 'sample', '=', 'kwd', '[', '"burn"', ']', ',', 'kwd', '[', '"sample"', ']', 'converged', '=', 'None', "# we don't know!", 'burn_chains', '=', 'sampler', '.', 'chain', 'burn_ln_probabilities', '=', 'sampler', '.', 'lnprobability', '# Reset the chain.', 'logger', '.', 'debug', '(', '"Resetting chain..."', ')', 'sampler', '.', 'reset', '(', ')', '# Sample the posterior.', 'sampler', ',', 'prod_acceptance_fractions', ',', 'pos', ',', 'lnprob', ',', 'rstate', ',', 't_elapsed', '=', 'self', '.', '_sample', '(', 'sampler', ',', 'pos', ',', 'kwd', '[', '"sample"', ']', ',', 'lnprob0', '=', 'lnprob', ',', 'rstate0', '=', 'rstate', ',', 'descr', '=', '"production"', ',', 'parameters', '=', 'parameters', ',', '__show_progress_bar', '=', 'progress_bar', ')', 'production_chains', '=', 'sampler', '.', 'chain', 'production_ln_probabilities', '=', 'sampler', '.', 'lnprobability', 'acceptance_fractions', '.', 'append', '(', 'prod_acceptance_fractions', ')', 'else', ':', '# Start checking for convergence at a frequency', '# of check_convergence_frequency', 'last_state', '=', '[', 'pos', ',', 'lnprob', ',', 'rstate', ']', 'converged', ',', 'total_steps', '=', 'False', ',', '0', '+', 'iterations', 'min_eis_required', '=', 'kwd', '[', '"minimum_effective_independent_samples"', ']', 'while', 'not', 'converged', 'and', 'kwd', '[', '"maximum_sample"', ']', '>', 'total_steps', ':', '# Check for convergence.', '# Estimate the exponential autocorrelation time.', 'try', ':', 'tau_exp', ',', 'rho', ',', 'rho_max_fit', '=', 'utils', '.', 'estimate_tau_exp', '(', 'sampler', '.', 'chain', ')', 'except', ':', 'logger', '.', 'exception', '(', '"Exception occurred when trying to "', '"estimate the exponential autocorrelation time:"', ')', 'logger', '.', 'info', '(', '"To recover, we are temporarily setting tau_exp"', '" to {0}"', '.', 'format', '(', 'total_steps', ')', ')', 'tau_exp', '=', 'total_steps', 'logger', '.', 'info', '(', '"Estimated tau_exp at {0} is {1:.0f}"', '.', 'format', '(', 'total_steps', ',', 'tau_exp', ')', ')', '# Grab everything n_tau_exp_as_burn_in times that.', 'burn', '=', 'int', '(', 'np', '.', 'ceil', '(', 'tau_exp', ')', ')', '*', 'kwd', '[', '"n_tau_exp_as_burn_in"', ']', 'sample', '=', 'sampler', '.', 'chain', '.', 'shape', '[', '1', ']', '-', 'burn', 'if', '1', '>', 'sample', ':', 'logger', '.', 'info', '(', '"Sampler has not converged because {0}x the "', '"estimated exponential autocorrelation time of {1:.0f}"', '" is step {2}, and we are only at step {3}"', '.', 'format', '(', 'kwd', '[', '"n_tau_exp_as_burn_in"', ']', ',', 'tau_exp', ',', 'burn', ',', 'total_steps', ')', ')', 'else', ':', '# Calculate the integrated autocorrelation time in the ', '# remaining sample, for every parameter.', 'tau_int', '=', 'utils', '.', 'estimate_tau_int', '(', 'sampler', '.', 'chain', '[', ':', ',', 'burn', ':', ']', ')', '# Calculate the effective number of independent samples in ', '# each parameter.', 'num_effective', '=', '(', 'kwd', '[', '"walkers"', ']', '*', 'sample', ')', '/', '(', '2', '*', 'tau_int', ')', 'logger', '.', 'info', '(', '"Effective number of independent samples in "', '"each parameter:"', ')', 'for', 'parameter', ',', 'n_eis', 'in', 'zip', '(', 'parameters', ',', 'num_effective', ')', ':', 'logger', '.', 'info', '(', '"\\t{0}: {1:.0f}"', '.', 'format', '(', 'parameter', ',', 'n_eis', ')', ')', 'if', 'num_effective', '.', 'min', '(', ')', '>', 'min_eis_required', ':', '# Converged.', 'converged', '=', 'True', 'logger', '.', 'info', '(', '"Convergence achieved ({0:.0f} > {1:.0f})"', '.', 'format', '(', 'num_effective', '.', 'min', '(', ')', '>', 'min_eis_required', ')', ')', '# Separate the samples into burn and production..', 'burn_chains', '=', 'sampler', '.', 'chain', '[', ':', ',', ':', 'burn', ',', ':', ']', 'burn_ln_probabilities', '=', 'sampler', '.', 'lnprobability', '[', ':', 'burn', ']', 'production_chains', '=', 'sampler', '.', 'chain', '[', ':', ',', 'burn', ':', ',', ':', ']', 'production_ln_probabilities', '=', 'sampler', '.', 'lnprobability', '[', 'burn', ':', ']', 'break', 'else', ':', '# Nope.', 'logger', '.', 'info', '(', '"Sampler has not converged because it did "', '"not meet the minimum number of effective "', '"independent samples ({0:.0f})"', '.', 'format', '(', 'kwd', '[', '"n"', ']', ')', ')', '# Keep sampling.', 'iterations', '=', 'kwd', '[', '"check_convergence_frequency"', ']', 'logger', '.', 'info', '(', '"Trying for another {0} steps"', '.', 'format', '(', 'iterations', ')', ')', 'pos', ',', 'lnprob', ',', 'rstate', '=', 'last_state', 'sampler', ',', 'af', ',', 'pos', ',', 'lnprob', ',', 'rstate', ',', 't_elapsed', '=', 'self', '.', '_sample', '(', 'sampler', ',', 'pos', ',', 'iterations', ',', 'lnprob0', '=', 'lnprob', ',', 'rstate0', '=', 'rstate', ',', 'descr', '=', '""', ',', 'parameters', '=', 'parameters', ',', '__show_progress_bar', '=', 'progress_bar', ')', 'total_steps', '+=', 'iterations', 'acceptance_fractions', '.', 'append', '(', 'af', ')', 'last_state', '.', 'extend', '(', 'pos', ',', 'lnprob', ',', 'rstate', ')', 'del', 'last_state', '[', ':', '3', ']', 'if', 'not', 'converged', ':', 'logger', '.', 'warn', '(', '"Maximum number of samples ({:.0f}) reached without"', '"convergence!"', '.', 'format', '(', 'kwd', '[', '"maximum_sample"', ']', ')', ')', 'logger', '.', 'info', '(', '"Total time elapsed: {0} seconds"', '.', 'format', '(', 'time', '(', ')', '-', 't_init', ')', ')', 'if', 'sampler', '.', 'pool', ':', 'sampler', '.', 'pool', '.', 'close', '(', ')', 'sampler', '.', 'pool', '.', 'join', '(', ')', '# Stack burn and production information together.', 'chains', '=', 'np', '.', 'hstack', '(', '[', 'burn_chains', ',', 'production_chains', ']', ')', 'lnprobability', '=', 'np', '.', 'hstack', '(', '[', 'burn_ln_probabilities', ',', 'production_ln_probabilities', ']', ')', 'acceptance_fractions', '=', 'np', '.', 'hstack', '(', 'acceptance_fractions', ')', 'chi_sq', ',', 'dof', ',', 'model_fluxes', '=', 'self', '.', '_chi_sq', '(', 'dict', '(', 'zip', '(', 'parameters', ',', '[', 'np', '.', 'percentile', '(', 'chains', '[', ':', ',', 'burn', ':', ',', 'i', ']', ',', '50', ')', 'for', 'i', 'in', 'range', '(', 'len', '(', 'parameters', ')', ')', ']', ')', ')', ',', 'data', ')', '# Convert velocity scales.', 'symbol', ',', 'scale', ',', 'units', '=', 'self', '.', '_preferred_redshift_scale', 'labels', '=', '[', ']', '+', 'parameters', 'scales', '=', 'np', '.', 'ones', '(', 'len', '(', 'parameters', ')', ')', 'if', 'symbol', '!=', '"z"', ':', 'for', 'i', ',', 'parameter', 'in', 'enumerate', '(', 'parameters', ')', ':', 'if', 'parameter', '==', '"z"', 'or', 'parameter', '.', 'startswith', '(', '"z_"', ')', ':', 'chains', '[', ':', ',', ':', ',', 'i', ']', '*=', 'scale', 'scales', '[', 'i', ']', '=', 'scale', 'if', '"_"', 'in', 'parameter', ':', 'labels', '[', 'i', ']', '=', '"_"', '.', 'join', '(', '[', 'symbol', ',', 'parameter', '.', 'split', '(', '"_"', ')', '[', '1', ':', ']', ']', ')', 'else', ':', 'labels', '[', 'i', ']', '=', 'symbol', 'logger', '.', 'debug', '(', '"Scaled {0} (now {1}) to units of {2}"', '.', 'format', '(', 'parameter', ',', 'labels', '[', 'i', ']', ',', 'units', ')', ')', '# Calculate MAP values and associated uncertainties.', 'theta', '=', 'OrderedDict', '(', ')', 'for', 'i', ',', 'label', 'in', 'enumerate', '(', 'labels', ')', ':', 'l', ',', 'c', ',', 'u', '=', 'np', '.', 'percentile', '(', 'chains', '[', ':', ',', 'burn', ':', ',', 'i', ']', ',', '[', '16', ',', '50', ',', '84', ']', ')', 'theta', '[', 'label', ']', '=', '(', 'c', ',', 'u', '-', 'c', ',', 'l', '-', 'c', ')', '# Re-arrange the chains to be in the same order as the model parameters.', 'indices', '=', 'np', '.', 'array', '(', '[', 'parameters', '.', 'index', '(', 'p', ')', 'for', 'p', 'in', 'self', '.', 'parameters', 'if', 'p', 'in', 'parameters', ']', ')', 'chains', '=', 'chains', '[', ':', ',', ':', ',', 'indices', ']', '# Remove the convolution functions.', 'if', 'not', 'kwargs', '.', 'get', '(', '"__keep_convolution_functions"', ',', 'False', ')', ':', 'self', '.', '_destroy_convolution_functions', '(', ')', 'if', 'full_output', ':', 'metadata', '=', '{', '"burn"', ':', 'burn', ',', '"walkers"', ':', 'kwd', '[', '"walkers"', ']', ',', '"sample"', ':', 'sample', ',', '"parameters"', ':', 'labels', ',', '"scales"', ':', 'scales', ',', '"chi_sq"', ':', 'chi_sq', ',', '"dof"', ':', 'dof', '}', 'return', '(', 'theta', ',', 'chains', ',', 'lnprobability', ',', 'acceptance_fractions', ',', 'sampler', ',', 'metadata', ')', 'return', 'theta']
Infer the model parameters, given the data. auto_convergence=True, walkers=100, burn=2000, sample=2000, minimum_sample=2000, convergence_check_frequency=1000, a=2.0, threads=1,
['Infer', 'the', 'model', 'parameters', 'given', 'the', 'data', '.', 'auto_convergence', '=', 'True', 'walkers', '=', '100', 'burn', '=', '2000', 'sample', '=', '2000', 'minimum_sample', '=', '2000', 'convergence_check_frequency', '=', '1000', 'a', '=', '2', '.', '0', 'threads', '=', '1']
train
https://github.com/andycasey/sick/blob/6c37686182794c4cafea45abf7062b30b789b1a2/sick/models/model.py#L261-L578
1,320
saltstack/salt
salt/daemons/masterapi.py
access_keys
def access_keys(opts): ''' A key needs to be placed in the filesystem with permissions 0400 so clients are required to run as root. ''' # TODO: Need a way to get all available users for systems not supported by pwd module. # For now users pattern matching will not work for publisher_acl. keys = {} publisher_acl = opts['publisher_acl'] acl_users = set(publisher_acl.keys()) if opts.get('user'): acl_users.add(opts['user']) acl_users.add(salt.utils.user.get_user()) for user in acl_users: log.info('Preparing the %s key for local communication', user) key = mk_key(opts, user) if key is not None: keys[user] = key # Check other users matching ACL patterns if opts['client_acl_verify'] and HAS_PWD: log.profile('Beginning pwd.getpwall() call in masterapi access_keys function') for user in pwd.getpwall(): user = user.pw_name if user not in keys and salt.utils.stringutils.check_whitelist_blacklist(user, whitelist=acl_users): keys[user] = mk_key(opts, user) log.profile('End pwd.getpwall() call in masterapi access_keys function') return keys
python
def access_keys(opts): ''' A key needs to be placed in the filesystem with permissions 0400 so clients are required to run as root. ''' # TODO: Need a way to get all available users for systems not supported by pwd module. # For now users pattern matching will not work for publisher_acl. keys = {} publisher_acl = opts['publisher_acl'] acl_users = set(publisher_acl.keys()) if opts.get('user'): acl_users.add(opts['user']) acl_users.add(salt.utils.user.get_user()) for user in acl_users: log.info('Preparing the %s key for local communication', user) key = mk_key(opts, user) if key is not None: keys[user] = key # Check other users matching ACL patterns if opts['client_acl_verify'] and HAS_PWD: log.profile('Beginning pwd.getpwall() call in masterapi access_keys function') for user in pwd.getpwall(): user = user.pw_name if user not in keys and salt.utils.stringutils.check_whitelist_blacklist(user, whitelist=acl_users): keys[user] = mk_key(opts, user) log.profile('End pwd.getpwall() call in masterapi access_keys function') return keys
['def', 'access_keys', '(', 'opts', ')', ':', '# TODO: Need a way to get all available users for systems not supported by pwd module.', '# For now users pattern matching will not work for publisher_acl.', 'keys', '=', '{', '}', 'publisher_acl', '=', 'opts', '[', "'publisher_acl'", ']', 'acl_users', '=', 'set', '(', 'publisher_acl', '.', 'keys', '(', ')', ')', 'if', 'opts', '.', 'get', '(', "'user'", ')', ':', 'acl_users', '.', 'add', '(', 'opts', '[', "'user'", ']', ')', 'acl_users', '.', 'add', '(', 'salt', '.', 'utils', '.', 'user', '.', 'get_user', '(', ')', ')', 'for', 'user', 'in', 'acl_users', ':', 'log', '.', 'info', '(', "'Preparing the %s key for local communication'", ',', 'user', ')', 'key', '=', 'mk_key', '(', 'opts', ',', 'user', ')', 'if', 'key', 'is', 'not', 'None', ':', 'keys', '[', 'user', ']', '=', 'key', '# Check other users matching ACL patterns', 'if', 'opts', '[', "'client_acl_verify'", ']', 'and', 'HAS_PWD', ':', 'log', '.', 'profile', '(', "'Beginning pwd.getpwall() call in masterapi access_keys function'", ')', 'for', 'user', 'in', 'pwd', '.', 'getpwall', '(', ')', ':', 'user', '=', 'user', '.', 'pw_name', 'if', 'user', 'not', 'in', 'keys', 'and', 'salt', '.', 'utils', '.', 'stringutils', '.', 'check_whitelist_blacklist', '(', 'user', ',', 'whitelist', '=', 'acl_users', ')', ':', 'keys', '[', 'user', ']', '=', 'mk_key', '(', 'opts', ',', 'user', ')', 'log', '.', 'profile', '(', "'End pwd.getpwall() call in masterapi access_keys function'", ')', 'return', 'keys']
A key needs to be placed in the filesystem with permissions 0400 so clients are required to run as root.
['A', 'key', 'needs', 'to', 'be', 'placed', 'in', 'the', 'filesystem', 'with', 'permissions', '0400', 'so', 'clients', 'are', 'required', 'to', 'run', 'as', 'root', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/daemons/masterapi.py#L259-L287
1,321
molmod/molmod
molmod/graphs.py
EqualPattern.compare
def compare(self, vertex0, vertex1, subject_graph): """Returns true when the two vertices are of the same kind""" return ( self.pattern_graph.vertex_fingerprints[vertex0] == subject_graph.vertex_fingerprints[vertex1] ).all()
python
def compare(self, vertex0, vertex1, subject_graph): """Returns true when the two vertices are of the same kind""" return ( self.pattern_graph.vertex_fingerprints[vertex0] == subject_graph.vertex_fingerprints[vertex1] ).all()
['def', 'compare', '(', 'self', ',', 'vertex0', ',', 'vertex1', ',', 'subject_graph', ')', ':', 'return', '(', 'self', '.', 'pattern_graph', '.', 'vertex_fingerprints', '[', 'vertex0', ']', '==', 'subject_graph', '.', 'vertex_fingerprints', '[', 'vertex1', ']', ')', '.', 'all', '(', ')']
Returns true when the two vertices are of the same kind
['Returns', 'true', 'when', 'the', 'two', 'vertices', 'are', 'of', 'the', 'same', 'kind']
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L1402-L1407
1,322
tonybaloney/wily
wily/operators/__init__.py
resolve_metric_as_tuple
def resolve_metric_as_tuple(metric): """ Resolve metric key to a given target. :param metric: the metric name. :type metric: ``str`` :rtype: :class:`Metric` """ if "." in metric: _, metric = metric.split(".") r = [ (operator, match) for operator, match in ALL_METRICS if match[0] == metric ] if not r or len(r) == 0: raise ValueError(f"Metric {metric} not recognised.") else: return r[0]
python
def resolve_metric_as_tuple(metric): """ Resolve metric key to a given target. :param metric: the metric name. :type metric: ``str`` :rtype: :class:`Metric` """ if "." in metric: _, metric = metric.split(".") r = [ (operator, match) for operator, match in ALL_METRICS if match[0] == metric ] if not r or len(r) == 0: raise ValueError(f"Metric {metric} not recognised.") else: return r[0]
['def', 'resolve_metric_as_tuple', '(', 'metric', ')', ':', 'if', '"."', 'in', 'metric', ':', '_', ',', 'metric', '=', 'metric', '.', 'split', '(', '"."', ')', 'r', '=', '[', '(', 'operator', ',', 'match', ')', 'for', 'operator', ',', 'match', 'in', 'ALL_METRICS', 'if', 'match', '[', '0', ']', '==', 'metric', ']', 'if', 'not', 'r', 'or', 'len', '(', 'r', ')', '==', '0', ':', 'raise', 'ValueError', '(', 'f"Metric {metric} not recognised."', ')', 'else', ':', 'return', 'r', '[', '0', ']']
Resolve metric key to a given target. :param metric: the metric name. :type metric: ``str`` :rtype: :class:`Metric`
['Resolve', 'metric', 'key', 'to', 'a', 'given', 'target', '.']
train
https://github.com/tonybaloney/wily/blob/bae259354a91b57d56603f0ca7403186f086a84c/wily/operators/__init__.py#L170-L188
1,323
DLR-RM/RAFCON
source/rafcon/core/global_variable_manager.py
GlobalVariableManager.delete_variable
def delete_variable(self, key): """Deletes a global variable :param key: the key of the global variable to be deleted :raises exceptions.AttributeError: if the global variable does not exist """ key = str(key) if self.is_locked(key): raise RuntimeError("Global variable is locked") with self.__global_lock: if key in self.__global_variable_dictionary: access_key = self.lock_variable(key, block=True) del self.__global_variable_dictionary[key] self.unlock_variable(key, access_key) del self.__variable_locks[key] del self.__variable_references[key] else: raise AttributeError("Global variable %s does not exist!" % str(key)) logger.debug("Global variable %s was deleted!" % str(key))
python
def delete_variable(self, key): """Deletes a global variable :param key: the key of the global variable to be deleted :raises exceptions.AttributeError: if the global variable does not exist """ key = str(key) if self.is_locked(key): raise RuntimeError("Global variable is locked") with self.__global_lock: if key in self.__global_variable_dictionary: access_key = self.lock_variable(key, block=True) del self.__global_variable_dictionary[key] self.unlock_variable(key, access_key) del self.__variable_locks[key] del self.__variable_references[key] else: raise AttributeError("Global variable %s does not exist!" % str(key)) logger.debug("Global variable %s was deleted!" % str(key))
['def', 'delete_variable', '(', 'self', ',', 'key', ')', ':', 'key', '=', 'str', '(', 'key', ')', 'if', 'self', '.', 'is_locked', '(', 'key', ')', ':', 'raise', 'RuntimeError', '(', '"Global variable is locked"', ')', 'with', 'self', '.', '__global_lock', ':', 'if', 'key', 'in', 'self', '.', '__global_variable_dictionary', ':', 'access_key', '=', 'self', '.', 'lock_variable', '(', 'key', ',', 'block', '=', 'True', ')', 'del', 'self', '.', '__global_variable_dictionary', '[', 'key', ']', 'self', '.', 'unlock_variable', '(', 'key', ',', 'access_key', ')', 'del', 'self', '.', '__variable_locks', '[', 'key', ']', 'del', 'self', '.', '__variable_references', '[', 'key', ']', 'else', ':', 'raise', 'AttributeError', '(', '"Global variable %s does not exist!"', '%', 'str', '(', 'key', ')', ')', 'logger', '.', 'debug', '(', '"Global variable %s was deleted!"', '%', 'str', '(', 'key', ')', ')']
Deletes a global variable :param key: the key of the global variable to be deleted :raises exceptions.AttributeError: if the global variable does not exist
['Deletes', 'a', 'global', 'variable']
train
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/global_variable_manager.py#L159-L179
1,324
spacetelescope/pysynphot
commissioning/extrap/extrap.py
classify_file
def classify_file(f): """Examine the column names to determine which type of file this is. Return a tuple: retvalue[0] = "file is non-parameterized" retvalue[1] = "file contains error column" """ cols=f[1].columns if len(cols) == 2: #Then we must have a simple file return (True,False) elif len(cols) == 3 and ('ERROR' in cols.names): return (True,True) elif len(cols) > 2 and ('ERROR' not in cols.names): return (True,False) else: return (False,True)
python
def classify_file(f): """Examine the column names to determine which type of file this is. Return a tuple: retvalue[0] = "file is non-parameterized" retvalue[1] = "file contains error column" """ cols=f[1].columns if len(cols) == 2: #Then we must have a simple file return (True,False) elif len(cols) == 3 and ('ERROR' in cols.names): return (True,True) elif len(cols) > 2 and ('ERROR' not in cols.names): return (True,False) else: return (False,True)
['def', 'classify_file', '(', 'f', ')', ':', 'cols', '=', 'f', '[', '1', ']', '.', 'columns', 'if', 'len', '(', 'cols', ')', '==', '2', ':', '#Then we must have a simple file', 'return', '(', 'True', ',', 'False', ')', 'elif', 'len', '(', 'cols', ')', '==', '3', 'and', '(', "'ERROR'", 'in', 'cols', '.', 'names', ')', ':', 'return', '(', 'True', ',', 'True', ')', 'elif', 'len', '(', 'cols', ')', '>', '2', 'and', '(', "'ERROR'", 'not', 'in', 'cols', '.', 'names', ')', ':', 'return', '(', 'True', ',', 'False', ')', 'else', ':', 'return', '(', 'False', ',', 'True', ')']
Examine the column names to determine which type of file this is. Return a tuple: retvalue[0] = "file is non-parameterized" retvalue[1] = "file contains error column"
['Examine', 'the', 'column', 'names', 'to', 'determine', 'which', 'type', 'of', 'file', 'this', 'is', '.', 'Return', 'a', 'tuple', ':', 'retvalue', '[', '0', ']', '=', 'file', 'is', 'non', '-', 'parameterized', 'retvalue', '[', '1', ']', '=', 'file', 'contains', 'error', 'column']
train
https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/commissioning/extrap/extrap.py#L22-L37
1,325
pyvisa/pyvisa
pyvisa/shell.py
VisaShell.do_open
def do_open(self, args): """Open resource by number, resource name or alias: open 3""" if not args: print('A resource name must be specified.') return if self.current: print('You can only open one resource at a time. Please close the current one first.') return if args.isdigit(): try: args = self.resources[int(args)][0] except IndexError: print('Not a valid resource number. Use the command "list".') return try: self.current = self.resource_manager.open_resource(args) print('{} has been opened.\n' 'You can talk to the device using "write", "read" or "query".\n' 'The default end of message is added to each message.'.format(args)) self.py_attr = [] self.vi_attr = [] for attr in getattr(self.current, 'visa_attributes_classes', ()): if attr.py_name: self.py_attr.append(attr.py_name) self.vi_attr.append(attr.visa_name) self.prompt = '(open) ' except Exception as e: print(e)
python
def do_open(self, args): """Open resource by number, resource name or alias: open 3""" if not args: print('A resource name must be specified.') return if self.current: print('You can only open one resource at a time. Please close the current one first.') return if args.isdigit(): try: args = self.resources[int(args)][0] except IndexError: print('Not a valid resource number. Use the command "list".') return try: self.current = self.resource_manager.open_resource(args) print('{} has been opened.\n' 'You can talk to the device using "write", "read" or "query".\n' 'The default end of message is added to each message.'.format(args)) self.py_attr = [] self.vi_attr = [] for attr in getattr(self.current, 'visa_attributes_classes', ()): if attr.py_name: self.py_attr.append(attr.py_name) self.vi_attr.append(attr.visa_name) self.prompt = '(open) ' except Exception as e: print(e)
['def', 'do_open', '(', 'self', ',', 'args', ')', ':', 'if', 'not', 'args', ':', 'print', '(', "'A resource name must be specified.'", ')', 'return', 'if', 'self', '.', 'current', ':', 'print', '(', "'You can only open one resource at a time. Please close the current one first.'", ')', 'return', 'if', 'args', '.', 'isdigit', '(', ')', ':', 'try', ':', 'args', '=', 'self', '.', 'resources', '[', 'int', '(', 'args', ')', ']', '[', '0', ']', 'except', 'IndexError', ':', 'print', '(', '\'Not a valid resource number. Use the command "list".\'', ')', 'return', 'try', ':', 'self', '.', 'current', '=', 'self', '.', 'resource_manager', '.', 'open_resource', '(', 'args', ')', 'print', '(', "'{} has been opened.\\n'", '\'You can talk to the device using "write", "read" or "query".\\n\'', "'The default end of message is added to each message.'", '.', 'format', '(', 'args', ')', ')', 'self', '.', 'py_attr', '=', '[', ']', 'self', '.', 'vi_attr', '=', '[', ']', 'for', 'attr', 'in', 'getattr', '(', 'self', '.', 'current', ',', "'visa_attributes_classes'", ',', '(', ')', ')', ':', 'if', 'attr', '.', 'py_name', ':', 'self', '.', 'py_attr', '.', 'append', '(', 'attr', '.', 'py_name', ')', 'self', '.', 'vi_attr', '.', 'append', '(', 'attr', '.', 'visa_name', ')', 'self', '.', 'prompt', '=', "'(open) '", 'except', 'Exception', 'as', 'e', ':', 'print', '(', 'e', ')']
Open resource by number, resource name or alias: open 3
['Open', 'resource', 'by', 'number', 'resource', 'name', 'or', 'alias', ':', 'open', '3']
train
https://github.com/pyvisa/pyvisa/blob/b8b2d4371e1f00782856aa9176ff1ced6bcb3798/pyvisa/shell.py#L138-L171
1,326
Telefonica/toolium
toolium/driver_wrapper.py
DriverWrapper.configure_visual_baseline
def configure_visual_baseline(self): """Configure baseline directory""" # Get baseline name baseline_name = self.config.get_optional('VisualTests', 'baseline_name', '{Driver_type}') for section in self.config.sections(): for option in self.config.options(section): option_value = self.config.get(section, option) baseline_name = baseline_name.replace('{{{0}_{1}}}'.format(section, option), option_value) # Configure baseline directory if baseline name has changed if self.baseline_name != baseline_name: self.baseline_name = baseline_name self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory, get_valid_filename(baseline_name))
python
def configure_visual_baseline(self): """Configure baseline directory""" # Get baseline name baseline_name = self.config.get_optional('VisualTests', 'baseline_name', '{Driver_type}') for section in self.config.sections(): for option in self.config.options(section): option_value = self.config.get(section, option) baseline_name = baseline_name.replace('{{{0}_{1}}}'.format(section, option), option_value) # Configure baseline directory if baseline name has changed if self.baseline_name != baseline_name: self.baseline_name = baseline_name self.visual_baseline_directory = os.path.join(DriverWrappersPool.visual_baseline_directory, get_valid_filename(baseline_name))
['def', 'configure_visual_baseline', '(', 'self', ')', ':', '# Get baseline name', 'baseline_name', '=', 'self', '.', 'config', '.', 'get_optional', '(', "'VisualTests'", ',', "'baseline_name'", ',', "'{Driver_type}'", ')', 'for', 'section', 'in', 'self', '.', 'config', '.', 'sections', '(', ')', ':', 'for', 'option', 'in', 'self', '.', 'config', '.', 'options', '(', 'section', ')', ':', 'option_value', '=', 'self', '.', 'config', '.', 'get', '(', 'section', ',', 'option', ')', 'baseline_name', '=', 'baseline_name', '.', 'replace', '(', "'{{{0}_{1}}}'", '.', 'format', '(', 'section', ',', 'option', ')', ',', 'option_value', ')', '# Configure baseline directory if baseline name has changed', 'if', 'self', '.', 'baseline_name', '!=', 'baseline_name', ':', 'self', '.', 'baseline_name', '=', 'baseline_name', 'self', '.', 'visual_baseline_directory', '=', 'os', '.', 'path', '.', 'join', '(', 'DriverWrappersPool', '.', 'visual_baseline_directory', ',', 'get_valid_filename', '(', 'baseline_name', ')', ')']
Configure baseline directory
['Configure', 'baseline', 'directory']
train
https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/driver_wrapper.py#L133-L146
1,327
saltstack/salt
salt/states/zk_concurrency.py
lock
def lock(name, zk_hosts=None, identifier=None, max_concurrency=1, timeout=None, ephemeral_lease=False, profile=None, scheme=None, username=None, password=None, default_acl=None): ''' Block state execution until you are able to get the lock (or hit the timeout) ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} conn_kwargs = {'profile': profile, 'scheme': scheme, 'username': username, 'password': password, 'default_acl': default_acl} if __opts__['test']: ret['result'] = None ret['comment'] = 'Attempt to acquire lock' return ret if identifier is None: identifier = __grains__['id'] locked = __salt__['zk_concurrency.lock'](name, zk_hosts, identifier=identifier, max_concurrency=max_concurrency, timeout=timeout, ephemeral_lease=ephemeral_lease, **conn_kwargs) if locked: ret['result'] = True ret['comment'] = 'lock acquired' else: ret['comment'] = 'Unable to acquire lock' return ret
python
def lock(name, zk_hosts=None, identifier=None, max_concurrency=1, timeout=None, ephemeral_lease=False, profile=None, scheme=None, username=None, password=None, default_acl=None): ''' Block state execution until you are able to get the lock (or hit the timeout) ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} conn_kwargs = {'profile': profile, 'scheme': scheme, 'username': username, 'password': password, 'default_acl': default_acl} if __opts__['test']: ret['result'] = None ret['comment'] = 'Attempt to acquire lock' return ret if identifier is None: identifier = __grains__['id'] locked = __salt__['zk_concurrency.lock'](name, zk_hosts, identifier=identifier, max_concurrency=max_concurrency, timeout=timeout, ephemeral_lease=ephemeral_lease, **conn_kwargs) if locked: ret['result'] = True ret['comment'] = 'lock acquired' else: ret['comment'] = 'Unable to acquire lock' return ret
['def', 'lock', '(', 'name', ',', 'zk_hosts', '=', 'None', ',', 'identifier', '=', 'None', ',', 'max_concurrency', '=', '1', ',', 'timeout', '=', 'None', ',', 'ephemeral_lease', '=', 'False', ',', 'profile', '=', 'None', ',', 'scheme', '=', 'None', ',', 'username', '=', 'None', ',', 'password', '=', 'None', ',', 'default_acl', '=', 'None', ')', ':', 'ret', '=', '{', "'name'", ':', 'name', ',', "'changes'", ':', '{', '}', ',', "'result'", ':', 'False', ',', "'comment'", ':', "''", '}', 'conn_kwargs', '=', '{', "'profile'", ':', 'profile', ',', "'scheme'", ':', 'scheme', ',', "'username'", ':', 'username', ',', "'password'", ':', 'password', ',', "'default_acl'", ':', 'default_acl', '}', 'if', '__opts__', '[', "'test'", ']', ':', 'ret', '[', "'result'", ']', '=', 'None', 'ret', '[', "'comment'", ']', '=', "'Attempt to acquire lock'", 'return', 'ret', 'if', 'identifier', 'is', 'None', ':', 'identifier', '=', '__grains__', '[', "'id'", ']', 'locked', '=', '__salt__', '[', "'zk_concurrency.lock'", ']', '(', 'name', ',', 'zk_hosts', ',', 'identifier', '=', 'identifier', ',', 'max_concurrency', '=', 'max_concurrency', ',', 'timeout', '=', 'timeout', ',', 'ephemeral_lease', '=', 'ephemeral_lease', ',', '*', '*', 'conn_kwargs', ')', 'if', 'locked', ':', 'ret', '[', "'result'", ']', '=', 'True', 'ret', '[', "'comment'", ']', '=', "'lock acquired'", 'else', ':', 'ret', '[', "'comment'", ']', '=', "'Unable to acquire lock'", 'return', 'ret']
Block state execution until you are able to get the lock (or hit the timeout)
['Block', 'state', 'execution', 'until', 'you', 'are', 'able', 'to', 'get', 'the', 'lock', '(', 'or', 'hit', 'the', 'timeout', ')']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/zk_concurrency.py#L69-L112
1,328
trevisanj/a99
a99/gui/a_WConfigEditor.py
WConfigEditor._update_fobj
def _update_fobj(self): """Updates fobj from GUI. Opposite of _update_gui().""" # print("PPPPPPPPPPPPPPPPPPPRINTANDO O STACK") # traceback.print_stack() emsg, flag_error = "", False fieldname = None try: self._before_update_fobj() for item in self._map: self._f.obj[item.fieldname] = item.get_value() self._after_update_fobj() except Exception as E: flag_error = True if fieldname is not None: emsg = "Field '{}': {}".format(fieldname, str(E)) else: emsg = str(E) self.add_log_error(emsg) self._flag_valid = not flag_error if not flag_error: self.status("")
python
def _update_fobj(self): """Updates fobj from GUI. Opposite of _update_gui().""" # print("PPPPPPPPPPPPPPPPPPPRINTANDO O STACK") # traceback.print_stack() emsg, flag_error = "", False fieldname = None try: self._before_update_fobj() for item in self._map: self._f.obj[item.fieldname] = item.get_value() self._after_update_fobj() except Exception as E: flag_error = True if fieldname is not None: emsg = "Field '{}': {}".format(fieldname, str(E)) else: emsg = str(E) self.add_log_error(emsg) self._flag_valid = not flag_error if not flag_error: self.status("")
['def', '_update_fobj', '(', 'self', ')', ':', '# print("PPPPPPPPPPPPPPPPPPPRINTANDO O STACK")\r', '# traceback.print_stack()\r', 'emsg', ',', 'flag_error', '=', '""', ',', 'False', 'fieldname', '=', 'None', 'try', ':', 'self', '.', '_before_update_fobj', '(', ')', 'for', 'item', 'in', 'self', '.', '_map', ':', 'self', '.', '_f', '.', 'obj', '[', 'item', '.', 'fieldname', ']', '=', 'item', '.', 'get_value', '(', ')', 'self', '.', '_after_update_fobj', '(', ')', 'except', 'Exception', 'as', 'E', ':', 'flag_error', '=', 'True', 'if', 'fieldname', 'is', 'not', 'None', ':', 'emsg', '=', '"Field \'{}\': {}"', '.', 'format', '(', 'fieldname', ',', 'str', '(', 'E', ')', ')', 'else', ':', 'emsg', '=', 'str', '(', 'E', ')', 'self', '.', 'add_log_error', '(', 'emsg', ')', 'self', '.', '_flag_valid', '=', 'not', 'flag_error', 'if', 'not', 'flag_error', ':', 'self', '.', 'status', '(', '""', ')']
Updates fobj from GUI. Opposite of _update_gui().
['Updates', 'fobj', 'from', 'GUI', '.', 'Opposite', 'of', '_update_gui', '()', '.']
train
https://github.com/trevisanj/a99/blob/193e6e3c9b3e4f4a0ba7eb3eece846fe7045c539/a99/gui/a_WConfigEditor.py#L119-L146
1,329
bukun/TorCMS
torcms/model/post_model.py
MPost.query_most_pic
def query_most_pic(num, kind='1'): ''' Query most pics. ''' return TabPost.select().where( (TabPost.kind == kind) & (TabPost.logo != "") ).order_by(TabPost.view_count.desc()).limit(num)
python
def query_most_pic(num, kind='1'): ''' Query most pics. ''' return TabPost.select().where( (TabPost.kind == kind) & (TabPost.logo != "") ).order_by(TabPost.view_count.desc()).limit(num)
['def', 'query_most_pic', '(', 'num', ',', 'kind', '=', "'1'", ')', ':', 'return', 'TabPost', '.', 'select', '(', ')', '.', 'where', '(', '(', 'TabPost', '.', 'kind', '==', 'kind', ')', '&', '(', 'TabPost', '.', 'logo', '!=', '""', ')', ')', '.', 'order_by', '(', 'TabPost', '.', 'view_count', '.', 'desc', '(', ')', ')', '.', 'limit', '(', 'num', ')']
Query most pics.
['Query', 'most', 'pics', '.']
train
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/post_model.py#L352-L358
1,330
molmod/molmod
molmod/graphs.py
Graph.full_match
def full_match(self, other): """Find the mapping between vertex indexes in self and other. This also works on disconnected graphs. Derived classes should just implement get_vertex_string and get_edge_string to make this method aware of the different nature of certain vertices. In case molecules, this would make the algorithm sensitive to atom numbers etc. """ # we need normalize subgraphs because these graphs are used as patterns. graphs0 = [ self.get_subgraph(group, normalize=True) for group in self.independent_vertices ] graphs1 = [ other.get_subgraph(group) for group in other.independent_vertices ] if len(graphs0) != len(graphs1): return matches = [] for graph0 in graphs0: pattern = EqualPattern(graph0) found_match = False for i, graph1 in enumerate(graphs1): local_matches = list(GraphSearch(pattern)(graph1, one_match=True)) if len(local_matches) == 1: match = local_matches[0] # we need to restore the relation between the normalized # graph0 and its original indexes old_to_new = OneToOne(( (j, i) for i, j in enumerate(graph0._old_vertex_indexes) )) matches.append(match * old_to_new) del graphs1[i] found_match = True break if not found_match: return result = OneToOne() for match in matches: result.add_relations(match.forward.items()) return result
python
def full_match(self, other): """Find the mapping between vertex indexes in self and other. This also works on disconnected graphs. Derived classes should just implement get_vertex_string and get_edge_string to make this method aware of the different nature of certain vertices. In case molecules, this would make the algorithm sensitive to atom numbers etc. """ # we need normalize subgraphs because these graphs are used as patterns. graphs0 = [ self.get_subgraph(group, normalize=True) for group in self.independent_vertices ] graphs1 = [ other.get_subgraph(group) for group in other.independent_vertices ] if len(graphs0) != len(graphs1): return matches = [] for graph0 in graphs0: pattern = EqualPattern(graph0) found_match = False for i, graph1 in enumerate(graphs1): local_matches = list(GraphSearch(pattern)(graph1, one_match=True)) if len(local_matches) == 1: match = local_matches[0] # we need to restore the relation between the normalized # graph0 and its original indexes old_to_new = OneToOne(( (j, i) for i, j in enumerate(graph0._old_vertex_indexes) )) matches.append(match * old_to_new) del graphs1[i] found_match = True break if not found_match: return result = OneToOne() for match in matches: result.add_relations(match.forward.items()) return result
['def', 'full_match', '(', 'self', ',', 'other', ')', ':', '# we need normalize subgraphs because these graphs are used as patterns.', 'graphs0', '=', '[', 'self', '.', 'get_subgraph', '(', 'group', ',', 'normalize', '=', 'True', ')', 'for', 'group', 'in', 'self', '.', 'independent_vertices', ']', 'graphs1', '=', '[', 'other', '.', 'get_subgraph', '(', 'group', ')', 'for', 'group', 'in', 'other', '.', 'independent_vertices', ']', 'if', 'len', '(', 'graphs0', ')', '!=', 'len', '(', 'graphs1', ')', ':', 'return', 'matches', '=', '[', ']', 'for', 'graph0', 'in', 'graphs0', ':', 'pattern', '=', 'EqualPattern', '(', 'graph0', ')', 'found_match', '=', 'False', 'for', 'i', ',', 'graph1', 'in', 'enumerate', '(', 'graphs1', ')', ':', 'local_matches', '=', 'list', '(', 'GraphSearch', '(', 'pattern', ')', '(', 'graph1', ',', 'one_match', '=', 'True', ')', ')', 'if', 'len', '(', 'local_matches', ')', '==', '1', ':', 'match', '=', 'local_matches', '[', '0', ']', '# we need to restore the relation between the normalized', '# graph0 and its original indexes', 'old_to_new', '=', 'OneToOne', '(', '(', '(', 'j', ',', 'i', ')', 'for', 'i', ',', 'j', 'in', 'enumerate', '(', 'graph0', '.', '_old_vertex_indexes', ')', ')', ')', 'matches', '.', 'append', '(', 'match', '*', 'old_to_new', ')', 'del', 'graphs1', '[', 'i', ']', 'found_match', '=', 'True', 'break', 'if', 'not', 'found_match', ':', 'return', 'result', '=', 'OneToOne', '(', ')', 'for', 'match', 'in', 'matches', ':', 'result', '.', 'add_relations', '(', 'match', '.', 'forward', '.', 'items', '(', ')', ')', 'return', 'result']
Find the mapping between vertex indexes in self and other. This also works on disconnected graphs. Derived classes should just implement get_vertex_string and get_edge_string to make this method aware of the different nature of certain vertices. In case molecules, this would make the algorithm sensitive to atom numbers etc.
['Find', 'the', 'mapping', 'between', 'vertex', 'indexes', 'in', 'self', 'and', 'other', '.']
train
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/graphs.py#L782-L828
1,331
openego/eDisGo
edisgo/grid/network.py
Results.save
def save(self, directory, parameters='all'): """ Saves results to disk. Depending on which results are selected and if they exist, the following directories and files are created: * `powerflow_results` directory * `voltages_pu.csv` See :py:attr:`~pfa_v_mag_pu` for more information. * `currents.csv` See :func:`~i_res` for more information. * `active_powers.csv` See :py:attr:`~pfa_p` for more information. * `reactive_powers.csv` See :py:attr:`~pfa_q` for more information. * `apparent_powers.csv` See :func:`~s_res` for more information. * `grid_losses.csv` See :py:attr:`~grid_losses` for more information. * `hv_mv_exchanges.csv` See :py:attr:`~hv_mv_exchanges` for more information. * `pypsa_network` directory See :py:func:`pypsa.Network.export_to_csv_folder` * `grid_expansion_results` directory * `grid_expansion_costs.csv` See :py:attr:`~grid_expansion_costs` for more information. * `equipment_changes.csv` See :py:attr:`~equipment_changes` for more information. * `unresolved_issues.csv` See :py:attr:`~unresolved_issues` for more information. * `curtailment_results` directory Files depend on curtailment specifications. There will be one file for each curtailment specification, that is for every key in :py:attr:`~curtailment` dictionary. * `storage_integration_results` directory * `storages.csv` See :func:`~storages` for more information. Parameters ---------- directory : :obj:`str` Directory to save the results in. parameters : :obj:`str` or :obj:`list` of :obj:`str` Specifies which results will be saved. By default all results are saved. To only save certain results set `parameters` to one of the following options or choose several options by providing a list: * 'pypsa_network' * 'powerflow_results' * 'grid_expansion_results' * 'curtailment_results' * 'storage_integration_results' """ def _save_power_flow_results(target_dir): if self.pfa_v_mag_pu is not None: # create directory os.makedirs(target_dir, exist_ok=True) # voltage self.pfa_v_mag_pu.to_csv( os.path.join(target_dir, 'voltages_pu.csv')) # current self.i_res.to_csv( os.path.join(target_dir, 'currents.csv')) # active power self.pfa_p.to_csv( os.path.join(target_dir, 'active_powers.csv')) # reactive power self.pfa_q.to_csv( os.path.join(target_dir, 'reactive_powers.csv')) # apparent power self.s_res().to_csv( os.path.join(target_dir, 'apparent_powers.csv')) # grid losses self.grid_losses.to_csv( os.path.join(target_dir, 'grid_losses.csv')) # grid exchanges self.hv_mv_exchanges.to_csv(os.path.join( target_dir, 'hv_mv_exchanges.csv')) def _save_pypsa_network(target_dir): if self.network.pypsa: # create directory os.makedirs(target_dir, exist_ok=True) self.network.pypsa.export_to_csv_folder(target_dir) def _save_grid_expansion_results(target_dir): if self.grid_expansion_costs is not None: # create directory os.makedirs(target_dir, exist_ok=True) # grid expansion costs self.grid_expansion_costs.to_csv(os.path.join( target_dir, 'grid_expansion_costs.csv')) # unresolved issues pd.DataFrame(self.unresolved_issues).to_csv(os.path.join( target_dir, 'unresolved_issues.csv')) # equipment changes self.equipment_changes.to_csv(os.path.join( target_dir, 'equipment_changes.csv')) def _save_curtailment_results(target_dir): if self.curtailment is not None: # create directory os.makedirs(target_dir, exist_ok=True) for key, curtailment_df in self.curtailment.items(): if type(key) == tuple: type_prefix = '-'.join([key[0], str(key[1])]) elif type(key) == str: type_prefix = key else: raise KeyError("Unknown key type {} for key {}".format( type(key), key)) filename = os.path.join( target_dir, '{}.csv'.format(type_prefix)) curtailment_df.to_csv(filename, index_label=type_prefix) def _save_storage_integration_results(target_dir): storages = self.storages if not storages.empty: # create directory os.makedirs(target_dir, exist_ok=True) # general storage information storages.to_csv(os.path.join(target_dir, 'storages.csv')) # storages time series ts_p, ts_q = self.storages_timeseries() ts_p.to_csv(os.path.join( target_dir, 'storages_active_power.csv')) ts_q.to_csv(os.path.join( target_dir, 'storages_reactive_power.csv')) if not self.storages_costs_reduction is None: self.storages_costs_reduction.to_csv( os.path.join(target_dir, 'storages_costs_reduction.csv')) # dictionary with function to call to save each parameter func_dict = { 'powerflow_results': _save_power_flow_results, 'pypsa_network': _save_pypsa_network, 'grid_expansion_results': _save_grid_expansion_results, 'curtailment_results': _save_curtailment_results, 'storage_integration_results': _save_storage_integration_results } # if string is given convert to list if isinstance(parameters, str): if parameters == 'all': parameters = ['powerflow_results', 'pypsa_network', 'grid_expansion_results', 'curtailment_results', 'storage_integration_results'] else: parameters = [parameters] # save each parameter for parameter in parameters: try: func_dict[parameter](os.path.join(directory, parameter)) except KeyError: message = "Invalid input {} for `parameters` when saving " \ "results. Must be any or a list of the following: " \ "'pypsa_network', 'powerflow_results', " \ "'grid_expansion_results', 'curtailment_results', " \ "'storage_integration_results'.".format(parameter) logger.error(message) raise KeyError(message) except: raise # save measures pd.DataFrame(data={'measure': self.measures}).to_csv( os.path.join(directory, 'measures.csv')) # save configs with open(os.path.join(directory, 'configs.csv'), 'w') as f: writer = csv.writer(f) rows = [ ['{}'.format(key)] + [value for item in values.items() for value in item] for key, values in self.network.config._data.items()] writer.writerows(rows)
python
def save(self, directory, parameters='all'): """ Saves results to disk. Depending on which results are selected and if they exist, the following directories and files are created: * `powerflow_results` directory * `voltages_pu.csv` See :py:attr:`~pfa_v_mag_pu` for more information. * `currents.csv` See :func:`~i_res` for more information. * `active_powers.csv` See :py:attr:`~pfa_p` for more information. * `reactive_powers.csv` See :py:attr:`~pfa_q` for more information. * `apparent_powers.csv` See :func:`~s_res` for more information. * `grid_losses.csv` See :py:attr:`~grid_losses` for more information. * `hv_mv_exchanges.csv` See :py:attr:`~hv_mv_exchanges` for more information. * `pypsa_network` directory See :py:func:`pypsa.Network.export_to_csv_folder` * `grid_expansion_results` directory * `grid_expansion_costs.csv` See :py:attr:`~grid_expansion_costs` for more information. * `equipment_changes.csv` See :py:attr:`~equipment_changes` for more information. * `unresolved_issues.csv` See :py:attr:`~unresolved_issues` for more information. * `curtailment_results` directory Files depend on curtailment specifications. There will be one file for each curtailment specification, that is for every key in :py:attr:`~curtailment` dictionary. * `storage_integration_results` directory * `storages.csv` See :func:`~storages` for more information. Parameters ---------- directory : :obj:`str` Directory to save the results in. parameters : :obj:`str` or :obj:`list` of :obj:`str` Specifies which results will be saved. By default all results are saved. To only save certain results set `parameters` to one of the following options or choose several options by providing a list: * 'pypsa_network' * 'powerflow_results' * 'grid_expansion_results' * 'curtailment_results' * 'storage_integration_results' """ def _save_power_flow_results(target_dir): if self.pfa_v_mag_pu is not None: # create directory os.makedirs(target_dir, exist_ok=True) # voltage self.pfa_v_mag_pu.to_csv( os.path.join(target_dir, 'voltages_pu.csv')) # current self.i_res.to_csv( os.path.join(target_dir, 'currents.csv')) # active power self.pfa_p.to_csv( os.path.join(target_dir, 'active_powers.csv')) # reactive power self.pfa_q.to_csv( os.path.join(target_dir, 'reactive_powers.csv')) # apparent power self.s_res().to_csv( os.path.join(target_dir, 'apparent_powers.csv')) # grid losses self.grid_losses.to_csv( os.path.join(target_dir, 'grid_losses.csv')) # grid exchanges self.hv_mv_exchanges.to_csv(os.path.join( target_dir, 'hv_mv_exchanges.csv')) def _save_pypsa_network(target_dir): if self.network.pypsa: # create directory os.makedirs(target_dir, exist_ok=True) self.network.pypsa.export_to_csv_folder(target_dir) def _save_grid_expansion_results(target_dir): if self.grid_expansion_costs is not None: # create directory os.makedirs(target_dir, exist_ok=True) # grid expansion costs self.grid_expansion_costs.to_csv(os.path.join( target_dir, 'grid_expansion_costs.csv')) # unresolved issues pd.DataFrame(self.unresolved_issues).to_csv(os.path.join( target_dir, 'unresolved_issues.csv')) # equipment changes self.equipment_changes.to_csv(os.path.join( target_dir, 'equipment_changes.csv')) def _save_curtailment_results(target_dir): if self.curtailment is not None: # create directory os.makedirs(target_dir, exist_ok=True) for key, curtailment_df in self.curtailment.items(): if type(key) == tuple: type_prefix = '-'.join([key[0], str(key[1])]) elif type(key) == str: type_prefix = key else: raise KeyError("Unknown key type {} for key {}".format( type(key), key)) filename = os.path.join( target_dir, '{}.csv'.format(type_prefix)) curtailment_df.to_csv(filename, index_label=type_prefix) def _save_storage_integration_results(target_dir): storages = self.storages if not storages.empty: # create directory os.makedirs(target_dir, exist_ok=True) # general storage information storages.to_csv(os.path.join(target_dir, 'storages.csv')) # storages time series ts_p, ts_q = self.storages_timeseries() ts_p.to_csv(os.path.join( target_dir, 'storages_active_power.csv')) ts_q.to_csv(os.path.join( target_dir, 'storages_reactive_power.csv')) if not self.storages_costs_reduction is None: self.storages_costs_reduction.to_csv( os.path.join(target_dir, 'storages_costs_reduction.csv')) # dictionary with function to call to save each parameter func_dict = { 'powerflow_results': _save_power_flow_results, 'pypsa_network': _save_pypsa_network, 'grid_expansion_results': _save_grid_expansion_results, 'curtailment_results': _save_curtailment_results, 'storage_integration_results': _save_storage_integration_results } # if string is given convert to list if isinstance(parameters, str): if parameters == 'all': parameters = ['powerflow_results', 'pypsa_network', 'grid_expansion_results', 'curtailment_results', 'storage_integration_results'] else: parameters = [parameters] # save each parameter for parameter in parameters: try: func_dict[parameter](os.path.join(directory, parameter)) except KeyError: message = "Invalid input {} for `parameters` when saving " \ "results. Must be any or a list of the following: " \ "'pypsa_network', 'powerflow_results', " \ "'grid_expansion_results', 'curtailment_results', " \ "'storage_integration_results'.".format(parameter) logger.error(message) raise KeyError(message) except: raise # save measures pd.DataFrame(data={'measure': self.measures}).to_csv( os.path.join(directory, 'measures.csv')) # save configs with open(os.path.join(directory, 'configs.csv'), 'w') as f: writer = csv.writer(f) rows = [ ['{}'.format(key)] + [value for item in values.items() for value in item] for key, values in self.network.config._data.items()] writer.writerows(rows)
['def', 'save', '(', 'self', ',', 'directory', ',', 'parameters', '=', "'all'", ')', ':', 'def', '_save_power_flow_results', '(', 'target_dir', ')', ':', 'if', 'self', '.', 'pfa_v_mag_pu', 'is', 'not', 'None', ':', '# create directory', 'os', '.', 'makedirs', '(', 'target_dir', ',', 'exist_ok', '=', 'True', ')', '# voltage', 'self', '.', 'pfa_v_mag_pu', '.', 'to_csv', '(', 'os', '.', 'path', '.', 'join', '(', 'target_dir', ',', "'voltages_pu.csv'", ')', ')', '# current', 'self', '.', 'i_res', '.', 'to_csv', '(', 'os', '.', 'path', '.', 'join', '(', 'target_dir', ',', "'currents.csv'", ')', ')', '# active power', 'self', '.', 'pfa_p', '.', 'to_csv', '(', 'os', '.', 'path', '.', 'join', '(', 'target_dir', ',', "'active_powers.csv'", ')', ')', '# reactive power', 'self', '.', 'pfa_q', '.', 'to_csv', '(', 'os', '.', 'path', '.', 'join', '(', 'target_dir', ',', "'reactive_powers.csv'", ')', ')', '# apparent power', 'self', '.', 's_res', '(', ')', '.', 'to_csv', '(', 'os', '.', 'path', '.', 'join', '(', 'target_dir', ',', "'apparent_powers.csv'", ')', ')', '# grid losses', 'self', '.', 'grid_losses', '.', 'to_csv', '(', 'os', '.', 'path', '.', 'join', '(', 'target_dir', ',', "'grid_losses.csv'", ')', ')', '# grid exchanges', 'self', '.', 'hv_mv_exchanges', '.', 'to_csv', '(', 'os', '.', 'path', '.', 'join', '(', 'target_dir', ',', "'hv_mv_exchanges.csv'", ')', ')', 'def', '_save_pypsa_network', '(', 'target_dir', ')', ':', 'if', 'self', '.', 'network', '.', 'pypsa', ':', '# create directory', 'os', '.', 'makedirs', '(', 'target_dir', ',', 'exist_ok', '=', 'True', ')', 'self', '.', 'network', '.', 'pypsa', '.', 'export_to_csv_folder', '(', 'target_dir', ')', 'def', '_save_grid_expansion_results', '(', 'target_dir', ')', ':', 'if', 'self', '.', 'grid_expansion_costs', 'is', 'not', 'None', ':', '# create directory', 'os', '.', 'makedirs', '(', 'target_dir', ',', 'exist_ok', '=', 'True', ')', '# grid expansion costs', 'self', '.', 'grid_expansion_costs', '.', 'to_csv', '(', 'os', '.', 'path', '.', 'join', '(', 'target_dir', ',', "'grid_expansion_costs.csv'", ')', ')', '# unresolved issues', 'pd', '.', 'DataFrame', '(', 'self', '.', 'unresolved_issues', ')', '.', 'to_csv', '(', 'os', '.', 'path', '.', 'join', '(', 'target_dir', ',', "'unresolved_issues.csv'", ')', ')', '# equipment changes', 'self', '.', 'equipment_changes', '.', 'to_csv', '(', 'os', '.', 'path', '.', 'join', '(', 'target_dir', ',', "'equipment_changes.csv'", ')', ')', 'def', '_save_curtailment_results', '(', 'target_dir', ')', ':', 'if', 'self', '.', 'curtailment', 'is', 'not', 'None', ':', '# create directory', 'os', '.', 'makedirs', '(', 'target_dir', ',', 'exist_ok', '=', 'True', ')', 'for', 'key', ',', 'curtailment_df', 'in', 'self', '.', 'curtailment', '.', 'items', '(', ')', ':', 'if', 'type', '(', 'key', ')', '==', 'tuple', ':', 'type_prefix', '=', "'-'", '.', 'join', '(', '[', 'key', '[', '0', ']', ',', 'str', '(', 'key', '[', '1', ']', ')', ']', ')', 'elif', 'type', '(', 'key', ')', '==', 'str', ':', 'type_prefix', '=', 'key', 'else', ':', 'raise', 'KeyError', '(', '"Unknown key type {} for key {}"', '.', 'format', '(', 'type', '(', 'key', ')', ',', 'key', ')', ')', 'filename', '=', 'os', '.', 'path', '.', 'join', '(', 'target_dir', ',', "'{}.csv'", '.', 'format', '(', 'type_prefix', ')', ')', 'curtailment_df', '.', 'to_csv', '(', 'filename', ',', 'index_label', '=', 'type_prefix', ')', 'def', '_save_storage_integration_results', '(', 'target_dir', ')', ':', 'storages', '=', 'self', '.', 'storages', 'if', 'not', 'storages', '.', 'empty', ':', '# create directory', 'os', '.', 'makedirs', '(', 'target_dir', ',', 'exist_ok', '=', 'True', ')', '# general storage information', 'storages', '.', 'to_csv', '(', 'os', '.', 'path', '.', 'join', '(', 'target_dir', ',', "'storages.csv'", ')', ')', '# storages time series', 'ts_p', ',', 'ts_q', '=', 'self', '.', 'storages_timeseries', '(', ')', 'ts_p', '.', 'to_csv', '(', 'os', '.', 'path', '.', 'join', '(', 'target_dir', ',', "'storages_active_power.csv'", ')', ')', 'ts_q', '.', 'to_csv', '(', 'os', '.', 'path', '.', 'join', '(', 'target_dir', ',', "'storages_reactive_power.csv'", ')', ')', 'if', 'not', 'self', '.', 'storages_costs_reduction', 'is', 'None', ':', 'self', '.', 'storages_costs_reduction', '.', 'to_csv', '(', 'os', '.', 'path', '.', 'join', '(', 'target_dir', ',', "'storages_costs_reduction.csv'", ')', ')', '# dictionary with function to call to save each parameter', 'func_dict', '=', '{', "'powerflow_results'", ':', '_save_power_flow_results', ',', "'pypsa_network'", ':', '_save_pypsa_network', ',', "'grid_expansion_results'", ':', '_save_grid_expansion_results', ',', "'curtailment_results'", ':', '_save_curtailment_results', ',', "'storage_integration_results'", ':', '_save_storage_integration_results', '}', '# if string is given convert to list', 'if', 'isinstance', '(', 'parameters', ',', 'str', ')', ':', 'if', 'parameters', '==', "'all'", ':', 'parameters', '=', '[', "'powerflow_results'", ',', "'pypsa_network'", ',', "'grid_expansion_results'", ',', "'curtailment_results'", ',', "'storage_integration_results'", ']', 'else', ':', 'parameters', '=', '[', 'parameters', ']', '# save each parameter', 'for', 'parameter', 'in', 'parameters', ':', 'try', ':', 'func_dict', '[', 'parameter', ']', '(', 'os', '.', 'path', '.', 'join', '(', 'directory', ',', 'parameter', ')', ')', 'except', 'KeyError', ':', 'message', '=', '"Invalid input {} for `parameters` when saving "', '"results. Must be any or a list of the following: "', '"\'pypsa_network\', \'powerflow_results\', "', '"\'grid_expansion_results\', \'curtailment_results\', "', '"\'storage_integration_results\'."', '.', 'format', '(', 'parameter', ')', 'logger', '.', 'error', '(', 'message', ')', 'raise', 'KeyError', '(', 'message', ')', 'except', ':', 'raise', '# save measures', 'pd', '.', 'DataFrame', '(', 'data', '=', '{', "'measure'", ':', 'self', '.', 'measures', '}', ')', '.', 'to_csv', '(', 'os', '.', 'path', '.', 'join', '(', 'directory', ',', "'measures.csv'", ')', ')', '# save configs', 'with', 'open', '(', 'os', '.', 'path', '.', 'join', '(', 'directory', ',', "'configs.csv'", ')', ',', "'w'", ')', 'as', 'f', ':', 'writer', '=', 'csv', '.', 'writer', '(', 'f', ')', 'rows', '=', '[', '[', "'{}'", '.', 'format', '(', 'key', ')', ']', '+', '[', 'value', 'for', 'item', 'in', 'values', '.', 'items', '(', ')', 'for', 'value', 'in', 'item', ']', 'for', 'key', ',', 'values', 'in', 'self', '.', 'network', '.', 'config', '.', '_data', '.', 'items', '(', ')', ']', 'writer', '.', 'writerows', '(', 'rows', ')']
Saves results to disk. Depending on which results are selected and if they exist, the following directories and files are created: * `powerflow_results` directory * `voltages_pu.csv` See :py:attr:`~pfa_v_mag_pu` for more information. * `currents.csv` See :func:`~i_res` for more information. * `active_powers.csv` See :py:attr:`~pfa_p` for more information. * `reactive_powers.csv` See :py:attr:`~pfa_q` for more information. * `apparent_powers.csv` See :func:`~s_res` for more information. * `grid_losses.csv` See :py:attr:`~grid_losses` for more information. * `hv_mv_exchanges.csv` See :py:attr:`~hv_mv_exchanges` for more information. * `pypsa_network` directory See :py:func:`pypsa.Network.export_to_csv_folder` * `grid_expansion_results` directory * `grid_expansion_costs.csv` See :py:attr:`~grid_expansion_costs` for more information. * `equipment_changes.csv` See :py:attr:`~equipment_changes` for more information. * `unresolved_issues.csv` See :py:attr:`~unresolved_issues` for more information. * `curtailment_results` directory Files depend on curtailment specifications. There will be one file for each curtailment specification, that is for every key in :py:attr:`~curtailment` dictionary. * `storage_integration_results` directory * `storages.csv` See :func:`~storages` for more information. Parameters ---------- directory : :obj:`str` Directory to save the results in. parameters : :obj:`str` or :obj:`list` of :obj:`str` Specifies which results will be saved. By default all results are saved. To only save certain results set `parameters` to one of the following options or choose several options by providing a list: * 'pypsa_network' * 'powerflow_results' * 'grid_expansion_results' * 'curtailment_results' * 'storage_integration_results'
['Saves', 'results', 'to', 'disk', '.']
train
https://github.com/openego/eDisGo/blob/e6245bdaf236f9c49dbda5a18c1c458290f41e2b/edisgo/grid/network.py#L2996-L3209
1,332
SKA-ScienceDataProcessor/integration-prototype
sip/tango_control/tango_master/app/sdp_master_device.py
SDPMasterDevice._get_service_state
def _get_service_state(service_id: str): """Get the Service state object for the specified id.""" LOG.debug('Getting state of service %s', service_id) services = get_service_id_list() service_ids = [s for s in services if service_id in s] if len(service_ids) != 1: return 'Service not found! services = {}'.format(str(services)) subsystem, name, version = service_ids[0].split(':') return ServiceState(subsystem, name, version)
python
def _get_service_state(service_id: str): """Get the Service state object for the specified id.""" LOG.debug('Getting state of service %s', service_id) services = get_service_id_list() service_ids = [s for s in services if service_id in s] if len(service_ids) != 1: return 'Service not found! services = {}'.format(str(services)) subsystem, name, version = service_ids[0].split(':') return ServiceState(subsystem, name, version)
['def', '_get_service_state', '(', 'service_id', ':', 'str', ')', ':', 'LOG', '.', 'debug', '(', "'Getting state of service %s'", ',', 'service_id', ')', 'services', '=', 'get_service_id_list', '(', ')', 'service_ids', '=', '[', 's', 'for', 's', 'in', 'services', 'if', 'service_id', 'in', 's', ']', 'if', 'len', '(', 'service_ids', ')', '!=', '1', ':', 'return', "'Service not found! services = {}'", '.', 'format', '(', 'str', '(', 'services', ')', ')', 'subsystem', ',', 'name', ',', 'version', '=', 'service_ids', '[', '0', ']', '.', 'split', '(', "':'", ')', 'return', 'ServiceState', '(', 'subsystem', ',', 'name', ',', 'version', ')']
Get the Service state object for the specified id.
['Get', 'the', 'Service', 'state', 'object', 'for', 'the', 'specified', 'id', '.']
train
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/tango_control/tango_master/app/sdp_master_device.py#L118-L126
1,333
python-rope/rope
rope/base/history.py
History.do
def do(self, changes, task_handle=taskhandle.NullTaskHandle()): """Perform the change and add it to the `self.undo_list` Note that uninteresting changes (changes to ignored files) will not be appended to `self.undo_list`. """ try: self.current_change = changes changes.do(change.create_job_set(task_handle, changes)) finally: self.current_change = None if self._is_change_interesting(changes): self.undo_list.append(changes) self._remove_extra_items() del self.redo_list[:]
python
def do(self, changes, task_handle=taskhandle.NullTaskHandle()): """Perform the change and add it to the `self.undo_list` Note that uninteresting changes (changes to ignored files) will not be appended to `self.undo_list`. """ try: self.current_change = changes changes.do(change.create_job_set(task_handle, changes)) finally: self.current_change = None if self._is_change_interesting(changes): self.undo_list.append(changes) self._remove_extra_items() del self.redo_list[:]
['def', 'do', '(', 'self', ',', 'changes', ',', 'task_handle', '=', 'taskhandle', '.', 'NullTaskHandle', '(', ')', ')', ':', 'try', ':', 'self', '.', 'current_change', '=', 'changes', 'changes', '.', 'do', '(', 'change', '.', 'create_job_set', '(', 'task_handle', ',', 'changes', ')', ')', 'finally', ':', 'self', '.', 'current_change', '=', 'None', 'if', 'self', '.', '_is_change_interesting', '(', 'changes', ')', ':', 'self', '.', 'undo_list', '.', 'append', '(', 'changes', ')', 'self', '.', '_remove_extra_items', '(', ')', 'del', 'self', '.', 'redo_list', '[', ':', ']']
Perform the change and add it to the `self.undo_list` Note that uninteresting changes (changes to ignored files) will not be appended to `self.undo_list`.
['Perform', 'the', 'change', 'and', 'add', 'it', 'to', 'the', 'self', '.', 'undo_list']
train
https://github.com/python-rope/rope/blob/1c9f9cd5964b099a99a9111e998f0dc728860688/rope/base/history.py#L27-L42
1,334
glitchassassin/lackey
lackey/RegionMatching.py
Region.stopObserver
def stopObserver(self): """ Stops this region's observer loop. If this is running in a subprocess, the subprocess will end automatically. """ self._observer.isStopped = True self._observer.isRunning = False
python
def stopObserver(self): """ Stops this region's observer loop. If this is running in a subprocess, the subprocess will end automatically. """ self._observer.isStopped = True self._observer.isRunning = False
['def', 'stopObserver', '(', 'self', ')', ':', 'self', '.', '_observer', '.', 'isStopped', '=', 'True', 'self', '.', '_observer', '.', 'isRunning', '=', 'False']
Stops this region's observer loop. If this is running in a subprocess, the subprocess will end automatically.
['Stops', 'this', 'region', 's', 'observer', 'loop', '.']
train
https://github.com/glitchassassin/lackey/blob/7adadfacd7f45d81186710be992f5668b15399fe/lackey/RegionMatching.py#L1486-L1492
1,335
EUDAT-B2SAFE/B2HANDLE
b2handle/utilhandle.py
create_authentication_string
def create_authentication_string(username, password): ''' Creates an authentication string from the username and password. :username: Username. :password: Password. :return: The encoded string. ''' username_utf8 = username.encode('utf-8') userpw_utf8 = password.encode('utf-8') username_perc = quote(username_utf8) userpw_perc = quote(userpw_utf8) authinfostring = username_perc + ':' + userpw_perc authinfostring_base64 = base64.b64encode(authinfostring.encode('utf-8')).decode('utf-8') return authinfostring_base64
python
def create_authentication_string(username, password): ''' Creates an authentication string from the username and password. :username: Username. :password: Password. :return: The encoded string. ''' username_utf8 = username.encode('utf-8') userpw_utf8 = password.encode('utf-8') username_perc = quote(username_utf8) userpw_perc = quote(userpw_utf8) authinfostring = username_perc + ':' + userpw_perc authinfostring_base64 = base64.b64encode(authinfostring.encode('utf-8')).decode('utf-8') return authinfostring_base64
['def', 'create_authentication_string', '(', 'username', ',', 'password', ')', ':', 'username_utf8', '=', 'username', '.', 'encode', '(', "'utf-8'", ')', 'userpw_utf8', '=', 'password', '.', 'encode', '(', "'utf-8'", ')', 'username_perc', '=', 'quote', '(', 'username_utf8', ')', 'userpw_perc', '=', 'quote', '(', 'userpw_utf8', ')', 'authinfostring', '=', 'username_perc', '+', "':'", '+', 'userpw_perc', 'authinfostring_base64', '=', 'base64', '.', 'b64encode', '(', 'authinfostring', '.', 'encode', '(', "'utf-8'", ')', ')', '.', 'decode', '(', "'utf-8'", ')', 'return', 'authinfostring_base64']
Creates an authentication string from the username and password. :username: Username. :password: Password. :return: The encoded string.
['Creates', 'an', 'authentication', 'string', 'from', 'the', 'username', 'and', 'password', '.']
train
https://github.com/EUDAT-B2SAFE/B2HANDLE/blob/a6d216d459644e01fbdfd5b318a535950bc5cdbb/b2handle/utilhandle.py#L102-L118
1,336
brechtm/rinohtype
src/rinoh/hyphenator.py
Hyph_dict.positions
def positions(self, word): """ Returns a list of positions where the word can be hyphenated. E.g. for the dutch word 'lettergrepen' this method returns the list [3, 6, 9]. Each position is a 'data int' (dint) with a data attribute. If the data attribute is not None, it contains a tuple with information about nonstandard hyphenation at that point: (change, index, cut) change: is a string like 'ff=f', that describes how hyphenation should take place. index: where to substitute the change, counting from the current point cut: how many characters to remove while substituting the nonstandard hyphenation """ word = word.lower() points = self.cache.get(word) if points is None: prepWord = '.%s.' % word res = [0] * (len(prepWord) + 1) for i in range(len(prepWord) - 1): for j in range(i + 1, min(i + self.maxlen, len(prepWord)) + 1): p = self.patterns.get(prepWord[i:j]) if p: offset, value = p s = slice(i + offset, i + offset + len(value)) res[s] = map(max, value, res[s]) points = [dint(i - 1, ref=r) for i, r in enumerate(res) if r % 2] self.cache[word] = points return points
python
def positions(self, word): """ Returns a list of positions where the word can be hyphenated. E.g. for the dutch word 'lettergrepen' this method returns the list [3, 6, 9]. Each position is a 'data int' (dint) with a data attribute. If the data attribute is not None, it contains a tuple with information about nonstandard hyphenation at that point: (change, index, cut) change: is a string like 'ff=f', that describes how hyphenation should take place. index: where to substitute the change, counting from the current point cut: how many characters to remove while substituting the nonstandard hyphenation """ word = word.lower() points = self.cache.get(word) if points is None: prepWord = '.%s.' % word res = [0] * (len(prepWord) + 1) for i in range(len(prepWord) - 1): for j in range(i + 1, min(i + self.maxlen, len(prepWord)) + 1): p = self.patterns.get(prepWord[i:j]) if p: offset, value = p s = slice(i + offset, i + offset + len(value)) res[s] = map(max, value, res[s]) points = [dint(i - 1, ref=r) for i, r in enumerate(res) if r % 2] self.cache[word] = points return points
['def', 'positions', '(', 'self', ',', 'word', ')', ':', 'word', '=', 'word', '.', 'lower', '(', ')', 'points', '=', 'self', '.', 'cache', '.', 'get', '(', 'word', ')', 'if', 'points', 'is', 'None', ':', 'prepWord', '=', "'.%s.'", '%', 'word', 'res', '=', '[', '0', ']', '*', '(', 'len', '(', 'prepWord', ')', '+', '1', ')', 'for', 'i', 'in', 'range', '(', 'len', '(', 'prepWord', ')', '-', '1', ')', ':', 'for', 'j', 'in', 'range', '(', 'i', '+', '1', ',', 'min', '(', 'i', '+', 'self', '.', 'maxlen', ',', 'len', '(', 'prepWord', ')', ')', '+', '1', ')', ':', 'p', '=', 'self', '.', 'patterns', '.', 'get', '(', 'prepWord', '[', 'i', ':', 'j', ']', ')', 'if', 'p', ':', 'offset', ',', 'value', '=', 'p', 's', '=', 'slice', '(', 'i', '+', 'offset', ',', 'i', '+', 'offset', '+', 'len', '(', 'value', ')', ')', 'res', '[', 's', ']', '=', 'map', '(', 'max', ',', 'value', ',', 'res', '[', 's', ']', ')', 'points', '=', '[', 'dint', '(', 'i', '-', '1', ',', 'ref', '=', 'r', ')', 'for', 'i', ',', 'r', 'in', 'enumerate', '(', 'res', ')', 'if', 'r', '%', '2', ']', 'self', '.', 'cache', '[', 'word', ']', '=', 'points', 'return', 'points']
Returns a list of positions where the word can be hyphenated. E.g. for the dutch word 'lettergrepen' this method returns the list [3, 6, 9]. Each position is a 'data int' (dint) with a data attribute. If the data attribute is not None, it contains a tuple with information about nonstandard hyphenation at that point: (change, index, cut) change: is a string like 'ff=f', that describes how hyphenation should take place. index: where to substitute the change, counting from the current point cut: how many characters to remove while substituting the nonstandard hyphenation
['Returns', 'a', 'list', 'of', 'positions', 'where', 'the', 'word', 'can', 'be', 'hyphenated', '.', 'E', '.', 'g', '.', 'for', 'the', 'dutch', 'word', 'lettergrepen', 'this', 'method', 'returns', 'the', 'list', '[', '3', '6', '9', ']', '.']
train
https://github.com/brechtm/rinohtype/blob/40a63c4e5ad7550f62b6860f1812cb67cafb9dc7/src/rinoh/hyphenator.py#L114-L147
1,337
ensime/ensime-vim
ensime_shared/ticker.py
Ticker._start_refresh_timer
def _start_refresh_timer(self): """Start the Vim timer. """ if not self._timer: self._timer = self._vim.eval( "timer_start({}, 'EnTick', {{'repeat': -1}})" .format(REFRESH_TIMER) )
python
def _start_refresh_timer(self): """Start the Vim timer. """ if not self._timer: self._timer = self._vim.eval( "timer_start({}, 'EnTick', {{'repeat': -1}})" .format(REFRESH_TIMER) )
['def', '_start_refresh_timer', '(', 'self', ')', ':', 'if', 'not', 'self', '.', '_timer', ':', 'self', '.', '_timer', '=', 'self', '.', '_vim', '.', 'eval', '(', '"timer_start({}, \'EnTick\', {{\'repeat\': -1}})"', '.', 'format', '(', 'REFRESH_TIMER', ')', ')']
Start the Vim timer.
['Start', 'the', 'Vim', 'timer', '.']
train
https://github.com/ensime/ensime-vim/blob/caa734e84f002b25446c615706283a74edd4ecfe/ensime_shared/ticker.py#L30-L36
1,338
arviz-devs/arviz
arviz/stats/stats.py
loo
def loo(data, pointwise=False, reff=None, scale="deviance"): """Pareto-smoothed importance sampling leave-one-out cross-validation. Calculates leave-one-out (LOO) cross-validation for out of sample predictive model fit, following Vehtari et al. (2017). Cross-validation is computed using Pareto-smoothed importance sampling (PSIS). Parameters ---------- data : result of MCMC run pointwise: bool, optional if True the pointwise predictive accuracy will be returned. Defaults to False reff : float, optional Relative MCMC efficiency, `effective_n / n` i.e. number of effective samples divided by the number of actual samples. Computed from trace by default. scale : str Output scale for loo. Available options are: - `deviance` : (default) -2 * (log-score) - `log` : 1 * log-score (after Vehtari et al. (2017)) - `negative_log` : -1 * (log-score) Returns ------- pandas.Series with the following columns: loo: approximated Leave-one-out cross-validation loo_se: standard error of loo p_loo: effective number of parameters shape_warn: 1 if the estimated shape parameter of Pareto distribution is greater than 0.7 for one or more samples loo_i: array of pointwise predictive accuracy, only if pointwise True pareto_k: array of Pareto shape values, only if pointwise True loo_scale: scale of the loo results """ inference_data = convert_to_inference_data(data) for group in ("posterior", "sample_stats"): if not hasattr(inference_data, group): raise TypeError( "Must be able to extract a {group}" "group from data!".format(group=group) ) if "log_likelihood" not in inference_data.sample_stats: raise TypeError("Data must include log_likelihood in sample_stats") posterior = inference_data.posterior log_likelihood = inference_data.sample_stats.log_likelihood n_samples = log_likelihood.chain.size * log_likelihood.draw.size new_shape = (n_samples,) + log_likelihood.shape[2:] log_likelihood = log_likelihood.values.reshape(*new_shape) if scale.lower() == "deviance": scale_value = -2 elif scale.lower() == "log": scale_value = 1 elif scale.lower() == "negative_log": scale_value = -1 else: raise TypeError('Valid scale values are "deviance", "log", "negative_log"') if reff is None: n_chains = len(posterior.chain) if n_chains == 1: reff = 1.0 else: ess = effective_sample_size(posterior) # this mean is over all data variables reff = np.hstack([ess[v].values.flatten() for v in ess.data_vars]).mean() / n_samples log_weights, pareto_shape = psislw(-log_likelihood, reff) log_weights += log_likelihood warn_mg = 0 if np.any(pareto_shape > 0.7): warnings.warn( """Estimated shape parameter of Pareto distribution is greater than 0.7 for one or more samples. You should consider using a more robust model, this is because importance sampling is less likely to work well if the marginal posterior and LOO posterior are very different. This is more likely to happen with a non-robust model and highly influential observations.""" ) warn_mg = 1 loo_lppd_i = scale_value * _logsumexp(log_weights, axis=0) loo_lppd = loo_lppd_i.sum() loo_lppd_se = (len(loo_lppd_i) * np.var(loo_lppd_i)) ** 0.5 lppd = np.sum(_logsumexp(log_likelihood, axis=0, b_inv=log_likelihood.shape[0])) p_loo = lppd - loo_lppd / scale_value if pointwise: if np.equal(loo_lppd, loo_lppd_i).all(): # pylint: disable=no-member warnings.warn( """The point-wise LOO is the same with the sum LOO, please double check the Observed RV in your model to make sure it returns element-wise logp. """ ) return pd.Series( data=[loo_lppd, loo_lppd_se, p_loo, warn_mg, loo_lppd_i, pareto_shape, scale], index=["loo", "loo_se", "p_loo", "warning", "loo_i", "pareto_k", "loo_scale"], ) else: return pd.Series( data=[loo_lppd, loo_lppd_se, p_loo, warn_mg, scale], index=["loo", "loo_se", "p_loo", "warning", "loo_scale"], )
python
def loo(data, pointwise=False, reff=None, scale="deviance"): """Pareto-smoothed importance sampling leave-one-out cross-validation. Calculates leave-one-out (LOO) cross-validation for out of sample predictive model fit, following Vehtari et al. (2017). Cross-validation is computed using Pareto-smoothed importance sampling (PSIS). Parameters ---------- data : result of MCMC run pointwise: bool, optional if True the pointwise predictive accuracy will be returned. Defaults to False reff : float, optional Relative MCMC efficiency, `effective_n / n` i.e. number of effective samples divided by the number of actual samples. Computed from trace by default. scale : str Output scale for loo. Available options are: - `deviance` : (default) -2 * (log-score) - `log` : 1 * log-score (after Vehtari et al. (2017)) - `negative_log` : -1 * (log-score) Returns ------- pandas.Series with the following columns: loo: approximated Leave-one-out cross-validation loo_se: standard error of loo p_loo: effective number of parameters shape_warn: 1 if the estimated shape parameter of Pareto distribution is greater than 0.7 for one or more samples loo_i: array of pointwise predictive accuracy, only if pointwise True pareto_k: array of Pareto shape values, only if pointwise True loo_scale: scale of the loo results """ inference_data = convert_to_inference_data(data) for group in ("posterior", "sample_stats"): if not hasattr(inference_data, group): raise TypeError( "Must be able to extract a {group}" "group from data!".format(group=group) ) if "log_likelihood" not in inference_data.sample_stats: raise TypeError("Data must include log_likelihood in sample_stats") posterior = inference_data.posterior log_likelihood = inference_data.sample_stats.log_likelihood n_samples = log_likelihood.chain.size * log_likelihood.draw.size new_shape = (n_samples,) + log_likelihood.shape[2:] log_likelihood = log_likelihood.values.reshape(*new_shape) if scale.lower() == "deviance": scale_value = -2 elif scale.lower() == "log": scale_value = 1 elif scale.lower() == "negative_log": scale_value = -1 else: raise TypeError('Valid scale values are "deviance", "log", "negative_log"') if reff is None: n_chains = len(posterior.chain) if n_chains == 1: reff = 1.0 else: ess = effective_sample_size(posterior) # this mean is over all data variables reff = np.hstack([ess[v].values.flatten() for v in ess.data_vars]).mean() / n_samples log_weights, pareto_shape = psislw(-log_likelihood, reff) log_weights += log_likelihood warn_mg = 0 if np.any(pareto_shape > 0.7): warnings.warn( """Estimated shape parameter of Pareto distribution is greater than 0.7 for one or more samples. You should consider using a more robust model, this is because importance sampling is less likely to work well if the marginal posterior and LOO posterior are very different. This is more likely to happen with a non-robust model and highly influential observations.""" ) warn_mg = 1 loo_lppd_i = scale_value * _logsumexp(log_weights, axis=0) loo_lppd = loo_lppd_i.sum() loo_lppd_se = (len(loo_lppd_i) * np.var(loo_lppd_i)) ** 0.5 lppd = np.sum(_logsumexp(log_likelihood, axis=0, b_inv=log_likelihood.shape[0])) p_loo = lppd - loo_lppd / scale_value if pointwise: if np.equal(loo_lppd, loo_lppd_i).all(): # pylint: disable=no-member warnings.warn( """The point-wise LOO is the same with the sum LOO, please double check the Observed RV in your model to make sure it returns element-wise logp. """ ) return pd.Series( data=[loo_lppd, loo_lppd_se, p_loo, warn_mg, loo_lppd_i, pareto_shape, scale], index=["loo", "loo_se", "p_loo", "warning", "loo_i", "pareto_k", "loo_scale"], ) else: return pd.Series( data=[loo_lppd, loo_lppd_se, p_loo, warn_mg, scale], index=["loo", "loo_se", "p_loo", "warning", "loo_scale"], )
['def', 'loo', '(', 'data', ',', 'pointwise', '=', 'False', ',', 'reff', '=', 'None', ',', 'scale', '=', '"deviance"', ')', ':', 'inference_data', '=', 'convert_to_inference_data', '(', 'data', ')', 'for', 'group', 'in', '(', '"posterior"', ',', '"sample_stats"', ')', ':', 'if', 'not', 'hasattr', '(', 'inference_data', ',', 'group', ')', ':', 'raise', 'TypeError', '(', '"Must be able to extract a {group}"', '"group from data!"', '.', 'format', '(', 'group', '=', 'group', ')', ')', 'if', '"log_likelihood"', 'not', 'in', 'inference_data', '.', 'sample_stats', ':', 'raise', 'TypeError', '(', '"Data must include log_likelihood in sample_stats"', ')', 'posterior', '=', 'inference_data', '.', 'posterior', 'log_likelihood', '=', 'inference_data', '.', 'sample_stats', '.', 'log_likelihood', 'n_samples', '=', 'log_likelihood', '.', 'chain', '.', 'size', '*', 'log_likelihood', '.', 'draw', '.', 'size', 'new_shape', '=', '(', 'n_samples', ',', ')', '+', 'log_likelihood', '.', 'shape', '[', '2', ':', ']', 'log_likelihood', '=', 'log_likelihood', '.', 'values', '.', 'reshape', '(', '*', 'new_shape', ')', 'if', 'scale', '.', 'lower', '(', ')', '==', '"deviance"', ':', 'scale_value', '=', '-', '2', 'elif', 'scale', '.', 'lower', '(', ')', '==', '"log"', ':', 'scale_value', '=', '1', 'elif', 'scale', '.', 'lower', '(', ')', '==', '"negative_log"', ':', 'scale_value', '=', '-', '1', 'else', ':', 'raise', 'TypeError', '(', '\'Valid scale values are "deviance", "log", "negative_log"\'', ')', 'if', 'reff', 'is', 'None', ':', 'n_chains', '=', 'len', '(', 'posterior', '.', 'chain', ')', 'if', 'n_chains', '==', '1', ':', 'reff', '=', '1.0', 'else', ':', 'ess', '=', 'effective_sample_size', '(', 'posterior', ')', '# this mean is over all data variables', 'reff', '=', 'np', '.', 'hstack', '(', '[', 'ess', '[', 'v', ']', '.', 'values', '.', 'flatten', '(', ')', 'for', 'v', 'in', 'ess', '.', 'data_vars', ']', ')', '.', 'mean', '(', ')', '/', 'n_samples', 'log_weights', ',', 'pareto_shape', '=', 'psislw', '(', '-', 'log_likelihood', ',', 'reff', ')', 'log_weights', '+=', 'log_likelihood', 'warn_mg', '=', '0', 'if', 'np', '.', 'any', '(', 'pareto_shape', '>', '0.7', ')', ':', 'warnings', '.', 'warn', '(', '"""Estimated shape parameter of Pareto distribution is greater than 0.7 for\n one or more samples. You should consider using a more robust model, this is because\n importance sampling is less likely to work well if the marginal posterior and LOO posterior\n are very different. This is more likely to happen with a non-robust model and highly\n influential observations."""', ')', 'warn_mg', '=', '1', 'loo_lppd_i', '=', 'scale_value', '*', '_logsumexp', '(', 'log_weights', ',', 'axis', '=', '0', ')', 'loo_lppd', '=', 'loo_lppd_i', '.', 'sum', '(', ')', 'loo_lppd_se', '=', '(', 'len', '(', 'loo_lppd_i', ')', '*', 'np', '.', 'var', '(', 'loo_lppd_i', ')', ')', '**', '0.5', 'lppd', '=', 'np', '.', 'sum', '(', '_logsumexp', '(', 'log_likelihood', ',', 'axis', '=', '0', ',', 'b_inv', '=', 'log_likelihood', '.', 'shape', '[', '0', ']', ')', ')', 'p_loo', '=', 'lppd', '-', 'loo_lppd', '/', 'scale_value', 'if', 'pointwise', ':', 'if', 'np', '.', 'equal', '(', 'loo_lppd', ',', 'loo_lppd_i', ')', '.', 'all', '(', ')', ':', '# pylint: disable=no-member', 'warnings', '.', 'warn', '(', '"""The point-wise LOO is the same with the sum LOO, please double check\n the Observed RV in your model to make sure it returns element-wise logp.\n """', ')', 'return', 'pd', '.', 'Series', '(', 'data', '=', '[', 'loo_lppd', ',', 'loo_lppd_se', ',', 'p_loo', ',', 'warn_mg', ',', 'loo_lppd_i', ',', 'pareto_shape', ',', 'scale', ']', ',', 'index', '=', '[', '"loo"', ',', '"loo_se"', ',', '"p_loo"', ',', '"warning"', ',', '"loo_i"', ',', '"pareto_k"', ',', '"loo_scale"', ']', ',', ')', 'else', ':', 'return', 'pd', '.', 'Series', '(', 'data', '=', '[', 'loo_lppd', ',', 'loo_lppd_se', ',', 'p_loo', ',', 'warn_mg', ',', 'scale', ']', ',', 'index', '=', '[', '"loo"', ',', '"loo_se"', ',', '"p_loo"', ',', '"warning"', ',', '"loo_scale"', ']', ',', ')']
Pareto-smoothed importance sampling leave-one-out cross-validation. Calculates leave-one-out (LOO) cross-validation for out of sample predictive model fit, following Vehtari et al. (2017). Cross-validation is computed using Pareto-smoothed importance sampling (PSIS). Parameters ---------- data : result of MCMC run pointwise: bool, optional if True the pointwise predictive accuracy will be returned. Defaults to False reff : float, optional Relative MCMC efficiency, `effective_n / n` i.e. number of effective samples divided by the number of actual samples. Computed from trace by default. scale : str Output scale for loo. Available options are: - `deviance` : (default) -2 * (log-score) - `log` : 1 * log-score (after Vehtari et al. (2017)) - `negative_log` : -1 * (log-score) Returns ------- pandas.Series with the following columns: loo: approximated Leave-one-out cross-validation loo_se: standard error of loo p_loo: effective number of parameters shape_warn: 1 if the estimated shape parameter of Pareto distribution is greater than 0.7 for one or more samples loo_i: array of pointwise predictive accuracy, only if pointwise True pareto_k: array of Pareto shape values, only if pointwise True loo_scale: scale of the loo results
['Pareto', '-', 'smoothed', 'importance', 'sampling', 'leave', '-', 'one', '-', 'out', 'cross', '-', 'validation', '.']
train
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/stats/stats.py#L387-L490
1,339
codelv/enaml-native
src/enamlnative/widgets/scroll_view.py
ScrollView._update_proxy
def _update_proxy(self, change): """ An observer which sends the state change to the proxy. """ if change['type'] in ['event', 'update'] and self.proxy_is_active: handler = getattr(self.proxy, 'set_' + change['name'], None) if handler is not None: handler(change['value'])
python
def _update_proxy(self, change): """ An observer which sends the state change to the proxy. """ if change['type'] in ['event', 'update'] and self.proxy_is_active: handler = getattr(self.proxy, 'set_' + change['name'], None) if handler is not None: handler(change['value'])
['def', '_update_proxy', '(', 'self', ',', 'change', ')', ':', 'if', 'change', '[', "'type'", ']', 'in', '[', "'event'", ',', "'update'", ']', 'and', 'self', '.', 'proxy_is_active', ':', 'handler', '=', 'getattr', '(', 'self', '.', 'proxy', ',', "'set_'", '+', 'change', '[', "'name'", ']', ',', 'None', ')', 'if', 'handler', 'is', 'not', 'None', ':', 'handler', '(', 'change', '[', "'value'", ']', ')']
An observer which sends the state change to the proxy.
['An', 'observer', 'which', 'sends', 'the', 'state', 'change', 'to', 'the', 'proxy', '.']
train
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/widgets/scroll_view.py#L65-L72
1,340
kislyuk/aegea
aegea/packages/github3/repos/hook.py
Hook.edit
def edit(self, config={}, events=[], add_events=[], rm_events=[], active=True): """Edit this hook. :param dict config: (optional), key-value pairs of settings for this hook :param list events: (optional), which events should this be triggered for :param list add_events: (optional), events to be added to the list of events that this hook triggers for :param list rm_events: (optional), events to be remvoed from the list of events that this hook triggers for :param bool active: (optional), should this event be active :returns: bool """ data = {'config': config, 'active': active} if events: data['events'] = events if add_events: data['add_events'] = add_events if rm_events: data['remove_events'] = rm_events json = self._json(self._patch(self._api, data=dumps(data)), 200) if json: self._update_(json) return True return False
python
def edit(self, config={}, events=[], add_events=[], rm_events=[], active=True): """Edit this hook. :param dict config: (optional), key-value pairs of settings for this hook :param list events: (optional), which events should this be triggered for :param list add_events: (optional), events to be added to the list of events that this hook triggers for :param list rm_events: (optional), events to be remvoed from the list of events that this hook triggers for :param bool active: (optional), should this event be active :returns: bool """ data = {'config': config, 'active': active} if events: data['events'] = events if add_events: data['add_events'] = add_events if rm_events: data['remove_events'] = rm_events json = self._json(self._patch(self._api, data=dumps(data)), 200) if json: self._update_(json) return True return False
['def', 'edit', '(', 'self', ',', 'config', '=', '{', '}', ',', 'events', '=', '[', ']', ',', 'add_events', '=', '[', ']', ',', 'rm_events', '=', '[', ']', ',', 'active', '=', 'True', ')', ':', 'data', '=', '{', "'config'", ':', 'config', ',', "'active'", ':', 'active', '}', 'if', 'events', ':', 'data', '[', "'events'", ']', '=', 'events', 'if', 'add_events', ':', 'data', '[', "'add_events'", ']', '=', 'add_events', 'if', 'rm_events', ':', 'data', '[', "'remove_events'", ']', '=', 'rm_events', 'json', '=', 'self', '.', '_json', '(', 'self', '.', '_patch', '(', 'self', '.', '_api', ',', 'data', '=', 'dumps', '(', 'data', ')', ')', ',', '200', ')', 'if', 'json', ':', 'self', '.', '_update_', '(', 'json', ')', 'return', 'True', 'return', 'False']
Edit this hook. :param dict config: (optional), key-value pairs of settings for this hook :param list events: (optional), which events should this be triggered for :param list add_events: (optional), events to be added to the list of events that this hook triggers for :param list rm_events: (optional), events to be remvoed from the list of events that this hook triggers for :param bool active: (optional), should this event be active :returns: bool
['Edit', 'this', 'hook', '.']
train
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/repos/hook.py#L65-L96
1,341
coghost/izen
izen/chaos.py
RsaPub.rsa_base64_encrypt
def rsa_base64_encrypt(self, plain, b64=True): """ 使用公钥加密 ``可见数据`` - 由于rsa公钥加密相对耗时, 多用来 ``加密数据量小`` 的数据 .. note:: 1. 使用aes加密数据 2. 然后rsa用来加密aes加密数据时使用的key :param plain: :type plain: :param b64: :type b64: :return: :rtype: """ with open(self.key_file) as fp: key_ = RSA.importKey(fp.read()) plain = helper.to_bytes(plain) cipher = PKCS1_v1_5.new(key_).encrypt(plain) cip = base64.b64encode(cipher) if b64 else cipher return helper.to_str(cip)
python
def rsa_base64_encrypt(self, plain, b64=True): """ 使用公钥加密 ``可见数据`` - 由于rsa公钥加密相对耗时, 多用来 ``加密数据量小`` 的数据 .. note:: 1. 使用aes加密数据 2. 然后rsa用来加密aes加密数据时使用的key :param plain: :type plain: :param b64: :type b64: :return: :rtype: """ with open(self.key_file) as fp: key_ = RSA.importKey(fp.read()) plain = helper.to_bytes(plain) cipher = PKCS1_v1_5.new(key_).encrypt(plain) cip = base64.b64encode(cipher) if b64 else cipher return helper.to_str(cip)
['def', 'rsa_base64_encrypt', '(', 'self', ',', 'plain', ',', 'b64', '=', 'True', ')', ':', 'with', 'open', '(', 'self', '.', 'key_file', ')', 'as', 'fp', ':', 'key_', '=', 'RSA', '.', 'importKey', '(', 'fp', '.', 'read', '(', ')', ')', 'plain', '=', 'helper', '.', 'to_bytes', '(', 'plain', ')', 'cipher', '=', 'PKCS1_v1_5', '.', 'new', '(', 'key_', ')', '.', 'encrypt', '(', 'plain', ')', 'cip', '=', 'base64', '.', 'b64encode', '(', 'cipher', ')', 'if', 'b64', 'else', 'cipher', 'return', 'helper', '.', 'to_str', '(', 'cip', ')']
使用公钥加密 ``可见数据`` - 由于rsa公钥加密相对耗时, 多用来 ``加密数据量小`` 的数据 .. note:: 1. 使用aes加密数据 2. 然后rsa用来加密aes加密数据时使用的key :param plain: :type plain: :param b64: :type b64: :return: :rtype:
['使用公钥加密', '可见数据']
train
https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/chaos.py#L143-L165
1,342
doconix/django-mako-plus
django_mako_plus/tags.py
django_include
def django_include(context, template_name, **kwargs): ''' Mako tag to include a Django template withing the current DMP (Mako) template. Since this is a Django template, it is search for using the Django search algorithm (instead of the DMP app-based concept). See https://docs.djangoproject.com/en/2.1/topics/templates/. The current context is sent to the included template, which makes all context variables available to the Django template. Any additional kwargs are added to the context. ''' try: djengine = engines['django'] except KeyError as e: raise TemplateDoesNotExist("Django template engine not configured in settings, so template cannot be found: {}".format(template_name)) from e djtemplate = djengine.get_template(template_name) djcontext = {} djcontext.update(context) djcontext.update(kwargs) return djtemplate.render(djcontext, context['request'])
python
def django_include(context, template_name, **kwargs): ''' Mako tag to include a Django template withing the current DMP (Mako) template. Since this is a Django template, it is search for using the Django search algorithm (instead of the DMP app-based concept). See https://docs.djangoproject.com/en/2.1/topics/templates/. The current context is sent to the included template, which makes all context variables available to the Django template. Any additional kwargs are added to the context. ''' try: djengine = engines['django'] except KeyError as e: raise TemplateDoesNotExist("Django template engine not configured in settings, so template cannot be found: {}".format(template_name)) from e djtemplate = djengine.get_template(template_name) djcontext = {} djcontext.update(context) djcontext.update(kwargs) return djtemplate.render(djcontext, context['request'])
['def', 'django_include', '(', 'context', ',', 'template_name', ',', '*', '*', 'kwargs', ')', ':', 'try', ':', 'djengine', '=', 'engines', '[', "'django'", ']', 'except', 'KeyError', 'as', 'e', ':', 'raise', 'TemplateDoesNotExist', '(', '"Django template engine not configured in settings, so template cannot be found: {}"', '.', 'format', '(', 'template_name', ')', ')', 'from', 'e', 'djtemplate', '=', 'djengine', '.', 'get_template', '(', 'template_name', ')', 'djcontext', '=', '{', '}', 'djcontext', '.', 'update', '(', 'context', ')', 'djcontext', '.', 'update', '(', 'kwargs', ')', 'return', 'djtemplate', '.', 'render', '(', 'djcontext', ',', 'context', '[', "'request'", ']', ')']
Mako tag to include a Django template withing the current DMP (Mako) template. Since this is a Django template, it is search for using the Django search algorithm (instead of the DMP app-based concept). See https://docs.djangoproject.com/en/2.1/topics/templates/. The current context is sent to the included template, which makes all context variables available to the Django template. Any additional kwargs are added to the context.
['Mako', 'tag', 'to', 'include', 'a', 'Django', 'template', 'withing', 'the', 'current', 'DMP', '(', 'Mako', ')', 'template', '.', 'Since', 'this', 'is', 'a', 'Django', 'template', 'it', 'is', 'search', 'for', 'using', 'the', 'Django', 'search', 'algorithm', '(', 'instead', 'of', 'the', 'DMP', 'app', '-', 'based', 'concept', ')', '.', 'See', 'https', ':', '//', 'docs', '.', 'djangoproject', '.', 'com', '/', 'en', '/', '2', '.', '1', '/', 'topics', '/', 'templates', '/', '.']
train
https://github.com/doconix/django-mako-plus/blob/a90f9b4af19e5fa9f83452989cdcaed21569a181/django_mako_plus/tags.py#L14-L33
1,343
ardydedase/pycouchbase
couchbase-python-cffi/couchbase_ffi/result.py
MultiResult._add_bad_rc
def _add_bad_rc(self, rc, result): """ Sets an error with a bad return code. Handles 'quiet' logic :param rc: The error code """ if not rc: return self.all_ok = False if rc == C.LCB_KEY_ENOENT and self._quiet: return try: raise pycbc_exc_lcb(rc) except PyCBC.default_exception as e: e.all_results = self e.key = result.key e.result = result self._add_err(sys.exc_info())
python
def _add_bad_rc(self, rc, result): """ Sets an error with a bad return code. Handles 'quiet' logic :param rc: The error code """ if not rc: return self.all_ok = False if rc == C.LCB_KEY_ENOENT and self._quiet: return try: raise pycbc_exc_lcb(rc) except PyCBC.default_exception as e: e.all_results = self e.key = result.key e.result = result self._add_err(sys.exc_info())
['def', '_add_bad_rc', '(', 'self', ',', 'rc', ',', 'result', ')', ':', 'if', 'not', 'rc', ':', 'return', 'self', '.', 'all_ok', '=', 'False', 'if', 'rc', '==', 'C', '.', 'LCB_KEY_ENOENT', 'and', 'self', '.', '_quiet', ':', 'return', 'try', ':', 'raise', 'pycbc_exc_lcb', '(', 'rc', ')', 'except', 'PyCBC', '.', 'default_exception', 'as', 'e', ':', 'e', '.', 'all_results', '=', 'self', 'e', '.', 'key', '=', 'result', '.', 'key', 'e', '.', 'result', '=', 'result', 'self', '.', '_add_err', '(', 'sys', '.', 'exc_info', '(', ')', ')']
Sets an error with a bad return code. Handles 'quiet' logic :param rc: The error code
['Sets', 'an', 'error', 'with', 'a', 'bad', 'return', 'code', '.', 'Handles', 'quiet', 'logic', ':', 'param', 'rc', ':', 'The', 'error', 'code']
train
https://github.com/ardydedase/pycouchbase/blob/6f010b4d2ef41aead2366878d0cf0b1284c0db0e/couchbase-python-cffi/couchbase_ffi/result.py#L132-L150
1,344
mabuchilab/QNET
src/qnet/algebra/pattern_matching/__init__.py
Pattern.finditer
def finditer(self, expr): """Return an iterator over all matches in `expr` Iterate over all :class:`MatchDict` results of matches for any matching (sub-)expressions in `expr`. The order of the matches conforms to the equivalent matched expressions returned by :meth:`findall`. """ try: for arg in expr.args: for m in self.finditer(arg): yield m for arg in expr.kwargs.values(): for m in self.finditer(arg): yield m except AttributeError: pass m = self.match(expr) if m: yield m
python
def finditer(self, expr): """Return an iterator over all matches in `expr` Iterate over all :class:`MatchDict` results of matches for any matching (sub-)expressions in `expr`. The order of the matches conforms to the equivalent matched expressions returned by :meth:`findall`. """ try: for arg in expr.args: for m in self.finditer(arg): yield m for arg in expr.kwargs.values(): for m in self.finditer(arg): yield m except AttributeError: pass m = self.match(expr) if m: yield m
['def', 'finditer', '(', 'self', ',', 'expr', ')', ':', 'try', ':', 'for', 'arg', 'in', 'expr', '.', 'args', ':', 'for', 'm', 'in', 'self', '.', 'finditer', '(', 'arg', ')', ':', 'yield', 'm', 'for', 'arg', 'in', 'expr', '.', 'kwargs', '.', 'values', '(', ')', ':', 'for', 'm', 'in', 'self', '.', 'finditer', '(', 'arg', ')', ':', 'yield', 'm', 'except', 'AttributeError', ':', 'pass', 'm', '=', 'self', '.', 'match', '(', 'expr', ')', 'if', 'm', ':', 'yield', 'm']
Return an iterator over all matches in `expr` Iterate over all :class:`MatchDict` results of matches for any matching (sub-)expressions in `expr`. The order of the matches conforms to the equivalent matched expressions returned by :meth:`findall`.
['Return', 'an', 'iterator', 'over', 'all', 'matches', 'in', 'expr']
train
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/pattern_matching/__init__.py#L370-L388
1,345
QUANTAXIS/QUANTAXIS
QUANTAXIS/QASetting/executor.py
execute
def execute(command, shell=None, working_dir=".", echo=False, echo_indent=0): """Execute a command on the command-line. :param str,list command: The command to run :param bool shell: Whether or not to use the shell. This is optional; if ``command`` is a basestring, shell will be set to True, otherwise it will be false. You can override this behavior by setting this parameter directly. :param str working_dir: The directory in which to run the command. :param bool echo: Whether or not to print the output from the command to stdout. :param int echo_indent: Any number of spaces to indent the echo for clarity :returns: tuple: (return code, stdout) Example >>> from executor import execute >>> return_code, text = execute("dir") """ if shell is None: shell = True if isinstance(command, str) else False p = Popen(command, stdin=PIPE, stdout=PIPE, stderr=STDOUT, shell=shell, cwd=working_dir) if echo: stdout = "" while p.poll() is None: # This blocks until it receives a newline. line = p.stdout.readline() print(" " * echo_indent, line, end="") stdout += line # Read any last bits line = p.stdout.read() print(" " * echo_indent, line, end="") print() stdout += line else: stdout, _ = p.communicate() return (p.returncode, stdout)
python
def execute(command, shell=None, working_dir=".", echo=False, echo_indent=0): """Execute a command on the command-line. :param str,list command: The command to run :param bool shell: Whether or not to use the shell. This is optional; if ``command`` is a basestring, shell will be set to True, otherwise it will be false. You can override this behavior by setting this parameter directly. :param str working_dir: The directory in which to run the command. :param bool echo: Whether or not to print the output from the command to stdout. :param int echo_indent: Any number of spaces to indent the echo for clarity :returns: tuple: (return code, stdout) Example >>> from executor import execute >>> return_code, text = execute("dir") """ if shell is None: shell = True if isinstance(command, str) else False p = Popen(command, stdin=PIPE, stdout=PIPE, stderr=STDOUT, shell=shell, cwd=working_dir) if echo: stdout = "" while p.poll() is None: # This blocks until it receives a newline. line = p.stdout.readline() print(" " * echo_indent, line, end="") stdout += line # Read any last bits line = p.stdout.read() print(" " * echo_indent, line, end="") print() stdout += line else: stdout, _ = p.communicate() return (p.returncode, stdout)
['def', 'execute', '(', 'command', ',', 'shell', '=', 'None', ',', 'working_dir', '=', '"."', ',', 'echo', '=', 'False', ',', 'echo_indent', '=', '0', ')', ':', 'if', 'shell', 'is', 'None', ':', 'shell', '=', 'True', 'if', 'isinstance', '(', 'command', ',', 'str', ')', 'else', 'False', 'p', '=', 'Popen', '(', 'command', ',', 'stdin', '=', 'PIPE', ',', 'stdout', '=', 'PIPE', ',', 'stderr', '=', 'STDOUT', ',', 'shell', '=', 'shell', ',', 'cwd', '=', 'working_dir', ')', 'if', 'echo', ':', 'stdout', '=', '""', 'while', 'p', '.', 'poll', '(', ')', 'is', 'None', ':', '# This blocks until it receives a newline.', 'line', '=', 'p', '.', 'stdout', '.', 'readline', '(', ')', 'print', '(', '" "', '*', 'echo_indent', ',', 'line', ',', 'end', '=', '""', ')', 'stdout', '+=', 'line', '# Read any last bits', 'line', '=', 'p', '.', 'stdout', '.', 'read', '(', ')', 'print', '(', '" "', '*', 'echo_indent', ',', 'line', ',', 'end', '=', '""', ')', 'print', '(', ')', 'stdout', '+=', 'line', 'else', ':', 'stdout', ',', '_', '=', 'p', '.', 'communicate', '(', ')', 'return', '(', 'p', '.', 'returncode', ',', 'stdout', ')']
Execute a command on the command-line. :param str,list command: The command to run :param bool shell: Whether or not to use the shell. This is optional; if ``command`` is a basestring, shell will be set to True, otherwise it will be false. You can override this behavior by setting this parameter directly. :param str working_dir: The directory in which to run the command. :param bool echo: Whether or not to print the output from the command to stdout. :param int echo_indent: Any number of spaces to indent the echo for clarity :returns: tuple: (return code, stdout) Example >>> from executor import execute >>> return_code, text = execute("dir")
['Execute', 'a', 'command', 'on', 'the', 'command', '-', 'line', '.', ':', 'param', 'str', 'list', 'command', ':', 'The', 'command', 'to', 'run', ':', 'param', 'bool', 'shell', ':', 'Whether', 'or', 'not', 'to', 'use', 'the', 'shell', '.', 'This', 'is', 'optional', ';', 'if', 'command', 'is', 'a', 'basestring', 'shell', 'will', 'be', 'set', 'to', 'True', 'otherwise', 'it', 'will', 'be', 'false', '.', 'You', 'can', 'override', 'this', 'behavior', 'by', 'setting', 'this', 'parameter', 'directly', '.', ':', 'param', 'str', 'working_dir', ':', 'The', 'directory', 'in', 'which', 'to', 'run', 'the', 'command', '.', ':', 'param', 'bool', 'echo', ':', 'Whether', 'or', 'not', 'to', 'print', 'the', 'output', 'from', 'the', 'command', 'to', 'stdout', '.', ':', 'param', 'int', 'echo_indent', ':', 'Any', 'number', 'of', 'spaces', 'to', 'indent', 'the', 'echo', 'for', 'clarity', ':', 'returns', ':', 'tuple', ':', '(', 'return', 'code', 'stdout', ')', 'Example', '>>>', 'from', 'executor', 'import', 'execute', '>>>', 'return_code', 'text', '=', 'execute', '(', 'dir', ')']
train
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QASetting/executor.py#L33-L71
1,346
aio-libs/aiodocker
aiodocker/docker.py
Docker._query_json
async def _query_json( self, path, method="GET", *, params=None, data=None, headers=None, timeout=None ): """ A shorthand of _query() that treats the input as JSON. """ if headers is None: headers = {} headers["content-type"] = "application/json" if not isinstance(data, (str, bytes)): data = json.dumps(data) response = await self._query( path, method, params=params, data=data, headers=headers, timeout=timeout ) data = await parse_result(response) return data
python
async def _query_json( self, path, method="GET", *, params=None, data=None, headers=None, timeout=None ): """ A shorthand of _query() that treats the input as JSON. """ if headers is None: headers = {} headers["content-type"] = "application/json" if not isinstance(data, (str, bytes)): data = json.dumps(data) response = await self._query( path, method, params=params, data=data, headers=headers, timeout=timeout ) data = await parse_result(response) return data
['async', 'def', '_query_json', '(', 'self', ',', 'path', ',', 'method', '=', '"GET"', ',', '*', ',', 'params', '=', 'None', ',', 'data', '=', 'None', ',', 'headers', '=', 'None', ',', 'timeout', '=', 'None', ')', ':', 'if', 'headers', 'is', 'None', ':', 'headers', '=', '{', '}', 'headers', '[', '"content-type"', ']', '=', '"application/json"', 'if', 'not', 'isinstance', '(', 'data', ',', '(', 'str', ',', 'bytes', ')', ')', ':', 'data', '=', 'json', '.', 'dumps', '(', 'data', ')', 'response', '=', 'await', 'self', '.', '_query', '(', 'path', ',', 'method', ',', 'params', '=', 'params', ',', 'data', '=', 'data', ',', 'headers', '=', 'headers', ',', 'timeout', '=', 'timeout', ')', 'data', '=', 'await', 'parse_result', '(', 'response', ')', 'return', 'data']
A shorthand of _query() that treats the input as JSON.
['A', 'shorthand', 'of', '_query', '()', 'that', 'treats', 'the', 'input', 'as', 'JSON', '.']
train
https://github.com/aio-libs/aiodocker/blob/88d0285ddba8e606ff684278e0a831347209189c/aiodocker/docker.py#L176-L191
1,347
asmodehn/filefinder2
filefinder2/util.py
module_for_loader
def module_for_loader(fxn): """Decorator to handle selecting the proper module for loaders. The decorated function is passed the module to use instead of the module name. The module passed in to the function is either from sys.modules if it already exists or is a new module. If the module is new, then __name__ is set the first argument to the method, __loader__ is set to self, and __package__ is set accordingly (if self.is_package() is defined) will be set before it is passed to the decorated function (if self.is_package() does not work for the module it will be set post-load). If an exception is raised and the decorator created the module it is subsequently removed from sys.modules. The decorator assumes that the decorated function takes the module name as the second argument. """ warnings.warn('The import system now takes care of this automatically.', DeprecationWarning, stacklevel=2) @functools.wraps(fxn) def module_for_loader_wrapper(self, fullname, *args, **kwargs): with _module_to_load(fullname) as module: module.__loader__ = self try: is_package = self.is_package(fullname) except (ImportError, AttributeError): pass else: if is_package: module.__package__ = fullname else: module.__package__ = fullname.rpartition('.')[0] # If __package__ was not set above, __import__() will do it later. return fxn(self, module, *args, **kwargs) return module_for_loader_wrapper
python
def module_for_loader(fxn): """Decorator to handle selecting the proper module for loaders. The decorated function is passed the module to use instead of the module name. The module passed in to the function is either from sys.modules if it already exists or is a new module. If the module is new, then __name__ is set the first argument to the method, __loader__ is set to self, and __package__ is set accordingly (if self.is_package() is defined) will be set before it is passed to the decorated function (if self.is_package() does not work for the module it will be set post-load). If an exception is raised and the decorator created the module it is subsequently removed from sys.modules. The decorator assumes that the decorated function takes the module name as the second argument. """ warnings.warn('The import system now takes care of this automatically.', DeprecationWarning, stacklevel=2) @functools.wraps(fxn) def module_for_loader_wrapper(self, fullname, *args, **kwargs): with _module_to_load(fullname) as module: module.__loader__ = self try: is_package = self.is_package(fullname) except (ImportError, AttributeError): pass else: if is_package: module.__package__ = fullname else: module.__package__ = fullname.rpartition('.')[0] # If __package__ was not set above, __import__() will do it later. return fxn(self, module, *args, **kwargs) return module_for_loader_wrapper
['def', 'module_for_loader', '(', 'fxn', ')', ':', 'warnings', '.', 'warn', '(', "'The import system now takes care of this automatically.'", ',', 'DeprecationWarning', ',', 'stacklevel', '=', '2', ')', '@', 'functools', '.', 'wraps', '(', 'fxn', ')', 'def', 'module_for_loader_wrapper', '(', 'self', ',', 'fullname', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'with', '_module_to_load', '(', 'fullname', ')', 'as', 'module', ':', 'module', '.', '__loader__', '=', 'self', 'try', ':', 'is_package', '=', 'self', '.', 'is_package', '(', 'fullname', ')', 'except', '(', 'ImportError', ',', 'AttributeError', ')', ':', 'pass', 'else', ':', 'if', 'is_package', ':', 'module', '.', '__package__', '=', 'fullname', 'else', ':', 'module', '.', '__package__', '=', 'fullname', '.', 'rpartition', '(', "'.'", ')', '[', '0', ']', '# If __package__ was not set above, __import__() will do it later.', 'return', 'fxn', '(', 'self', ',', 'module', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'module_for_loader_wrapper']
Decorator to handle selecting the proper module for loaders. The decorated function is passed the module to use instead of the module name. The module passed in to the function is either from sys.modules if it already exists or is a new module. If the module is new, then __name__ is set the first argument to the method, __loader__ is set to self, and __package__ is set accordingly (if self.is_package() is defined) will be set before it is passed to the decorated function (if self.is_package() does not work for the module it will be set post-load). If an exception is raised and the decorator created the module it is subsequently removed from sys.modules. The decorator assumes that the decorated function takes the module name as the second argument.
['Decorator', 'to', 'handle', 'selecting', 'the', 'proper', 'module', 'for', 'loaders', '.', 'The', 'decorated', 'function', 'is', 'passed', 'the', 'module', 'to', 'use', 'instead', 'of', 'the', 'module', 'name', '.', 'The', 'module', 'passed', 'in', 'to', 'the', 'function', 'is', 'either', 'from', 'sys', '.', 'modules', 'if', 'it', 'already', 'exists', 'or', 'is', 'a', 'new', 'module', '.', 'If', 'the', 'module', 'is', 'new', 'then', '__name__', 'is', 'set', 'the', 'first', 'argument', 'to', 'the', 'method', '__loader__', 'is', 'set', 'to', 'self', 'and', '__package__', 'is', 'set', 'accordingly', '(', 'if', 'self', '.', 'is_package', '()', 'is', 'defined', ')', 'will', 'be', 'set', 'before', 'it', 'is', 'passed', 'to', 'the', 'decorated', 'function', '(', 'if', 'self', '.', 'is_package', '()', 'does', 'not', 'work', 'for', 'the', 'module', 'it', 'will', 'be', 'set', 'post', '-', 'load', ')', '.', 'If', 'an', 'exception', 'is', 'raised', 'and', 'the', 'decorator', 'created', 'the', 'module', 'it', 'is', 'subsequently', 'removed', 'from', 'sys', '.', 'modules', '.', 'The', 'decorator', 'assumes', 'that', 'the', 'decorated', 'function', 'takes', 'the', 'module', 'name', 'as', 'the', 'second', 'argument', '.']
train
https://github.com/asmodehn/filefinder2/blob/3f0b211ce11a34562e2a2160e039ae5290b68d6b/filefinder2/util.py#L169-L201
1,348
kgori/treeCl
treeCl/bootstrap.py
optimise_levenberg_marquardt
def optimise_levenberg_marquardt(x, a, c, damping=0.001, tolerance=0.001): """ Optimise value of x using levenberg-marquardt """ x_new = x x_old = x-1 # dummy value f_old = f(x_new, a, c) while np.abs(x_new - x_old).sum() > tolerance: x_old = x_new x_tmp = levenberg_marquardt_update(x_old, a, c, damping) f_new = f(x_tmp, a, c) if f_new < f_old: damping = np.max(damping/10., 1e-20) x_new = x_tmp f_old = f_new else: damping *= 10. return x_new
python
def optimise_levenberg_marquardt(x, a, c, damping=0.001, tolerance=0.001): """ Optimise value of x using levenberg-marquardt """ x_new = x x_old = x-1 # dummy value f_old = f(x_new, a, c) while np.abs(x_new - x_old).sum() > tolerance: x_old = x_new x_tmp = levenberg_marquardt_update(x_old, a, c, damping) f_new = f(x_tmp, a, c) if f_new < f_old: damping = np.max(damping/10., 1e-20) x_new = x_tmp f_old = f_new else: damping *= 10. return x_new
['def', 'optimise_levenberg_marquardt', '(', 'x', ',', 'a', ',', 'c', ',', 'damping', '=', '0.001', ',', 'tolerance', '=', '0.001', ')', ':', 'x_new', '=', 'x', 'x_old', '=', 'x', '-', '1', '# dummy value', 'f_old', '=', 'f', '(', 'x_new', ',', 'a', ',', 'c', ')', 'while', 'np', '.', 'abs', '(', 'x_new', '-', 'x_old', ')', '.', 'sum', '(', ')', '>', 'tolerance', ':', 'x_old', '=', 'x_new', 'x_tmp', '=', 'levenberg_marquardt_update', '(', 'x_old', ',', 'a', ',', 'c', ',', 'damping', ')', 'f_new', '=', 'f', '(', 'x_tmp', ',', 'a', ',', 'c', ')', 'if', 'f_new', '<', 'f_old', ':', 'damping', '=', 'np', '.', 'max', '(', 'damping', '/', '10.', ',', '1e-20', ')', 'x_new', '=', 'x_tmp', 'f_old', '=', 'f_new', 'else', ':', 'damping', '*=', '10.', 'return', 'x_new']
Optimise value of x using levenberg-marquardt
['Optimise', 'value', 'of', 'x', 'using', 'levenberg', '-', 'marquardt']
train
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/bootstrap.py#L143-L160
1,349
pypa/pipenv
pipenv/vendor/tomlkit/parser.py
Parser._parse_table
def _parse_table( self, parent_name=None ): # type: (Optional[str]) -> Tuple[Key, Union[Table, AoT]] """ Parses a table element. """ if self._current != "[": raise self.parse_error( InternalParserError, "_parse_table() called on non-bracket character." ) indent = self.extract() self.inc() # Skip opening bracket if self.end(): raise self.parse_error(UnexpectedEofError) is_aot = False if self._current == "[": if not self.inc(): raise self.parse_error(UnexpectedEofError) is_aot = True # Key self.mark() while self._current != "]" and self.inc(): if self.end(): raise self.parse_error(UnexpectedEofError) pass name = self.extract() if not name.strip(): raise self.parse_error(EmptyTableNameError) key = Key(name, sep="") name_parts = tuple(self._split_table_name(name)) missing_table = False if parent_name: parent_name_parts = tuple(self._split_table_name(parent_name)) else: parent_name_parts = tuple() if len(name_parts) > len(parent_name_parts) + 1: missing_table = True name_parts = name_parts[len(parent_name_parts) :] values = Container(True) self.inc() # Skip closing bracket if is_aot: # TODO: Verify close bracket self.inc() cws, comment, trail = self._parse_comment_trail() result = Null() if len(name_parts) > 1: if missing_table: # Missing super table # i.e. a table initialized like this: [foo.bar] # without initializing [foo] # # So we have to create the parent tables table = Table( Container(True), Trivia(indent, cws, comment, trail), is_aot and name_parts[0].key in self._aot_stack, is_super_table=True, name=name_parts[0].key, ) result = table key = name_parts[0] for i, _name in enumerate(name_parts[1:]): if _name in table: child = table[_name] else: child = Table( Container(True), Trivia(indent, cws, comment, trail), is_aot and i == len(name_parts[1:]) - 1, is_super_table=i < len(name_parts[1:]) - 1, name=_name.key, display_name=name if i == len(name_parts[1:]) - 1 else None, ) if is_aot and i == len(name_parts[1:]) - 1: table.append(_name, AoT([child], name=table.name, parsed=True)) else: table.append(_name, child) table = child values = table.value else: if name_parts: key = name_parts[0] while not self.end(): item = self._parse_item() if item: _key, item = item if not self._merge_ws(item, values): if _key is not None and _key.is_dotted(): self._handle_dotted_key(values, _key, item) else: values.append(_key, item) else: if self._current == "[": is_aot_next, name_next = self._peek_table() if self._is_child(name, name_next): key_next, table_next = self._parse_table(name) values.append(key_next, table_next) # Picking up any sibling while not self.end(): _, name_next = self._peek_table() if not self._is_child(name, name_next): break key_next, table_next = self._parse_table(name) values.append(key_next, table_next) break else: raise self.parse_error( InternalParserError, "_parse_item() returned None on a non-bracket character.", ) if isinstance(result, Null): result = Table( values, Trivia(indent, cws, comment, trail), is_aot, name=name, display_name=name, ) if is_aot and (not self._aot_stack or name != self._aot_stack[-1]): result = self._parse_aot(result, name) return key, result
python
def _parse_table( self, parent_name=None ): # type: (Optional[str]) -> Tuple[Key, Union[Table, AoT]] """ Parses a table element. """ if self._current != "[": raise self.parse_error( InternalParserError, "_parse_table() called on non-bracket character." ) indent = self.extract() self.inc() # Skip opening bracket if self.end(): raise self.parse_error(UnexpectedEofError) is_aot = False if self._current == "[": if not self.inc(): raise self.parse_error(UnexpectedEofError) is_aot = True # Key self.mark() while self._current != "]" and self.inc(): if self.end(): raise self.parse_error(UnexpectedEofError) pass name = self.extract() if not name.strip(): raise self.parse_error(EmptyTableNameError) key = Key(name, sep="") name_parts = tuple(self._split_table_name(name)) missing_table = False if parent_name: parent_name_parts = tuple(self._split_table_name(parent_name)) else: parent_name_parts = tuple() if len(name_parts) > len(parent_name_parts) + 1: missing_table = True name_parts = name_parts[len(parent_name_parts) :] values = Container(True) self.inc() # Skip closing bracket if is_aot: # TODO: Verify close bracket self.inc() cws, comment, trail = self._parse_comment_trail() result = Null() if len(name_parts) > 1: if missing_table: # Missing super table # i.e. a table initialized like this: [foo.bar] # without initializing [foo] # # So we have to create the parent tables table = Table( Container(True), Trivia(indent, cws, comment, trail), is_aot and name_parts[0].key in self._aot_stack, is_super_table=True, name=name_parts[0].key, ) result = table key = name_parts[0] for i, _name in enumerate(name_parts[1:]): if _name in table: child = table[_name] else: child = Table( Container(True), Trivia(indent, cws, comment, trail), is_aot and i == len(name_parts[1:]) - 1, is_super_table=i < len(name_parts[1:]) - 1, name=_name.key, display_name=name if i == len(name_parts[1:]) - 1 else None, ) if is_aot and i == len(name_parts[1:]) - 1: table.append(_name, AoT([child], name=table.name, parsed=True)) else: table.append(_name, child) table = child values = table.value else: if name_parts: key = name_parts[0] while not self.end(): item = self._parse_item() if item: _key, item = item if not self._merge_ws(item, values): if _key is not None and _key.is_dotted(): self._handle_dotted_key(values, _key, item) else: values.append(_key, item) else: if self._current == "[": is_aot_next, name_next = self._peek_table() if self._is_child(name, name_next): key_next, table_next = self._parse_table(name) values.append(key_next, table_next) # Picking up any sibling while not self.end(): _, name_next = self._peek_table() if not self._is_child(name, name_next): break key_next, table_next = self._parse_table(name) values.append(key_next, table_next) break else: raise self.parse_error( InternalParserError, "_parse_item() returned None on a non-bracket character.", ) if isinstance(result, Null): result = Table( values, Trivia(indent, cws, comment, trail), is_aot, name=name, display_name=name, ) if is_aot and (not self._aot_stack or name != self._aot_stack[-1]): result = self._parse_aot(result, name) return key, result
['def', '_parse_table', '(', 'self', ',', 'parent_name', '=', 'None', ')', ':', '# type: (Optional[str]) -> Tuple[Key, Union[Table, AoT]]', 'if', 'self', '.', '_current', '!=', '"["', ':', 'raise', 'self', '.', 'parse_error', '(', 'InternalParserError', ',', '"_parse_table() called on non-bracket character."', ')', 'indent', '=', 'self', '.', 'extract', '(', ')', 'self', '.', 'inc', '(', ')', '# Skip opening bracket', 'if', 'self', '.', 'end', '(', ')', ':', 'raise', 'self', '.', 'parse_error', '(', 'UnexpectedEofError', ')', 'is_aot', '=', 'False', 'if', 'self', '.', '_current', '==', '"["', ':', 'if', 'not', 'self', '.', 'inc', '(', ')', ':', 'raise', 'self', '.', 'parse_error', '(', 'UnexpectedEofError', ')', 'is_aot', '=', 'True', '# Key', 'self', '.', 'mark', '(', ')', 'while', 'self', '.', '_current', '!=', '"]"', 'and', 'self', '.', 'inc', '(', ')', ':', 'if', 'self', '.', 'end', '(', ')', ':', 'raise', 'self', '.', 'parse_error', '(', 'UnexpectedEofError', ')', 'pass', 'name', '=', 'self', '.', 'extract', '(', ')', 'if', 'not', 'name', '.', 'strip', '(', ')', ':', 'raise', 'self', '.', 'parse_error', '(', 'EmptyTableNameError', ')', 'key', '=', 'Key', '(', 'name', ',', 'sep', '=', '""', ')', 'name_parts', '=', 'tuple', '(', 'self', '.', '_split_table_name', '(', 'name', ')', ')', 'missing_table', '=', 'False', 'if', 'parent_name', ':', 'parent_name_parts', '=', 'tuple', '(', 'self', '.', '_split_table_name', '(', 'parent_name', ')', ')', 'else', ':', 'parent_name_parts', '=', 'tuple', '(', ')', 'if', 'len', '(', 'name_parts', ')', '>', 'len', '(', 'parent_name_parts', ')', '+', '1', ':', 'missing_table', '=', 'True', 'name_parts', '=', 'name_parts', '[', 'len', '(', 'parent_name_parts', ')', ':', ']', 'values', '=', 'Container', '(', 'True', ')', 'self', '.', 'inc', '(', ')', '# Skip closing bracket', 'if', 'is_aot', ':', '# TODO: Verify close bracket', 'self', '.', 'inc', '(', ')', 'cws', ',', 'comment', ',', 'trail', '=', 'self', '.', '_parse_comment_trail', '(', ')', 'result', '=', 'Null', '(', ')', 'if', 'len', '(', 'name_parts', ')', '>', '1', ':', 'if', 'missing_table', ':', '# Missing super table', '# i.e. a table initialized like this: [foo.bar]', '# without initializing [foo]', '#', '# So we have to create the parent tables', 'table', '=', 'Table', '(', 'Container', '(', 'True', ')', ',', 'Trivia', '(', 'indent', ',', 'cws', ',', 'comment', ',', 'trail', ')', ',', 'is_aot', 'and', 'name_parts', '[', '0', ']', '.', 'key', 'in', 'self', '.', '_aot_stack', ',', 'is_super_table', '=', 'True', ',', 'name', '=', 'name_parts', '[', '0', ']', '.', 'key', ',', ')', 'result', '=', 'table', 'key', '=', 'name_parts', '[', '0', ']', 'for', 'i', ',', '_name', 'in', 'enumerate', '(', 'name_parts', '[', '1', ':', ']', ')', ':', 'if', '_name', 'in', 'table', ':', 'child', '=', 'table', '[', '_name', ']', 'else', ':', 'child', '=', 'Table', '(', 'Container', '(', 'True', ')', ',', 'Trivia', '(', 'indent', ',', 'cws', ',', 'comment', ',', 'trail', ')', ',', 'is_aot', 'and', 'i', '==', 'len', '(', 'name_parts', '[', '1', ':', ']', ')', '-', '1', ',', 'is_super_table', '=', 'i', '<', 'len', '(', 'name_parts', '[', '1', ':', ']', ')', '-', '1', ',', 'name', '=', '_name', '.', 'key', ',', 'display_name', '=', 'name', 'if', 'i', '==', 'len', '(', 'name_parts', '[', '1', ':', ']', ')', '-', '1', 'else', 'None', ',', ')', 'if', 'is_aot', 'and', 'i', '==', 'len', '(', 'name_parts', '[', '1', ':', ']', ')', '-', '1', ':', 'table', '.', 'append', '(', '_name', ',', 'AoT', '(', '[', 'child', ']', ',', 'name', '=', 'table', '.', 'name', ',', 'parsed', '=', 'True', ')', ')', 'else', ':', 'table', '.', 'append', '(', '_name', ',', 'child', ')', 'table', '=', 'child', 'values', '=', 'table', '.', 'value', 'else', ':', 'if', 'name_parts', ':', 'key', '=', 'name_parts', '[', '0', ']', 'while', 'not', 'self', '.', 'end', '(', ')', ':', 'item', '=', 'self', '.', '_parse_item', '(', ')', 'if', 'item', ':', '_key', ',', 'item', '=', 'item', 'if', 'not', 'self', '.', '_merge_ws', '(', 'item', ',', 'values', ')', ':', 'if', '_key', 'is', 'not', 'None', 'and', '_key', '.', 'is_dotted', '(', ')', ':', 'self', '.', '_handle_dotted_key', '(', 'values', ',', '_key', ',', 'item', ')', 'else', ':', 'values', '.', 'append', '(', '_key', ',', 'item', ')', 'else', ':', 'if', 'self', '.', '_current', '==', '"["', ':', 'is_aot_next', ',', 'name_next', '=', 'self', '.', '_peek_table', '(', ')', 'if', 'self', '.', '_is_child', '(', 'name', ',', 'name_next', ')', ':', 'key_next', ',', 'table_next', '=', 'self', '.', '_parse_table', '(', 'name', ')', 'values', '.', 'append', '(', 'key_next', ',', 'table_next', ')', '# Picking up any sibling', 'while', 'not', 'self', '.', 'end', '(', ')', ':', '_', ',', 'name_next', '=', 'self', '.', '_peek_table', '(', ')', 'if', 'not', 'self', '.', '_is_child', '(', 'name', ',', 'name_next', ')', ':', 'break', 'key_next', ',', 'table_next', '=', 'self', '.', '_parse_table', '(', 'name', ')', 'values', '.', 'append', '(', 'key_next', ',', 'table_next', ')', 'break', 'else', ':', 'raise', 'self', '.', 'parse_error', '(', 'InternalParserError', ',', '"_parse_item() returned None on a non-bracket character."', ',', ')', 'if', 'isinstance', '(', 'result', ',', 'Null', ')', ':', 'result', '=', 'Table', '(', 'values', ',', 'Trivia', '(', 'indent', ',', 'cws', ',', 'comment', ',', 'trail', ')', ',', 'is_aot', ',', 'name', '=', 'name', ',', 'display_name', '=', 'name', ',', ')', 'if', 'is_aot', 'and', '(', 'not', 'self', '.', '_aot_stack', 'or', 'name', '!=', 'self', '.', '_aot_stack', '[', '-', '1', ']', ')', ':', 'result', '=', 'self', '.', '_parse_aot', '(', 'result', ',', 'name', ')', 'return', 'key', ',', 'result']
Parses a table element.
['Parses', 'a', 'table', 'element', '.']
train
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/tomlkit/parser.py#L855-L1005
1,350
bigchaindb/bigchaindb-driver
bigchaindb_driver/common/transaction.py
Transaction.inputs_valid
def inputs_valid(self, outputs=None): """Validates the Inputs in the Transaction against given Outputs. Note: Given a `CREATE` Transaction is passed, dummy values for Outputs are submitted for validation that evaluate parts of the validation-checks to `True`. Args: outputs (:obj:`list` of :class:`~bigchaindb.common. transaction.Output`): A list of Outputs to check the Inputs against. Returns: bool: If all Inputs are valid. """ if self.operation == Transaction.CREATE: # NOTE: Since in the case of a `CREATE`-transaction we do not have # to check for outputs, we're just submitting dummy # values to the actual method. This simplifies it's logic # greatly, as we do not have to check against `None` values. return self._inputs_valid(['dummyvalue' for _ in self.inputs]) elif self.operation == Transaction.TRANSFER: return self._inputs_valid([output.fulfillment.condition_uri for output in outputs]) else: allowed_ops = ', '.join(self.__class__.ALLOWED_OPERATIONS) raise TypeError('`operation` must be one of {}' .format(allowed_ops))
python
def inputs_valid(self, outputs=None): """Validates the Inputs in the Transaction against given Outputs. Note: Given a `CREATE` Transaction is passed, dummy values for Outputs are submitted for validation that evaluate parts of the validation-checks to `True`. Args: outputs (:obj:`list` of :class:`~bigchaindb.common. transaction.Output`): A list of Outputs to check the Inputs against. Returns: bool: If all Inputs are valid. """ if self.operation == Transaction.CREATE: # NOTE: Since in the case of a `CREATE`-transaction we do not have # to check for outputs, we're just submitting dummy # values to the actual method. This simplifies it's logic # greatly, as we do not have to check against `None` values. return self._inputs_valid(['dummyvalue' for _ in self.inputs]) elif self.operation == Transaction.TRANSFER: return self._inputs_valid([output.fulfillment.condition_uri for output in outputs]) else: allowed_ops = ', '.join(self.__class__.ALLOWED_OPERATIONS) raise TypeError('`operation` must be one of {}' .format(allowed_ops))
['def', 'inputs_valid', '(', 'self', ',', 'outputs', '=', 'None', ')', ':', 'if', 'self', '.', 'operation', '==', 'Transaction', '.', 'CREATE', ':', '# NOTE: Since in the case of a `CREATE`-transaction we do not have', "# to check for outputs, we're just submitting dummy", "# values to the actual method. This simplifies it's logic", '# greatly, as we do not have to check against `None` values.', 'return', 'self', '.', '_inputs_valid', '(', '[', "'dummyvalue'", 'for', '_', 'in', 'self', '.', 'inputs', ']', ')', 'elif', 'self', '.', 'operation', '==', 'Transaction', '.', 'TRANSFER', ':', 'return', 'self', '.', '_inputs_valid', '(', '[', 'output', '.', 'fulfillment', '.', 'condition_uri', 'for', 'output', 'in', 'outputs', ']', ')', 'else', ':', 'allowed_ops', '=', "', '", '.', 'join', '(', 'self', '.', '__class__', '.', 'ALLOWED_OPERATIONS', ')', 'raise', 'TypeError', '(', "'`operation` must be one of {}'", '.', 'format', '(', 'allowed_ops', ')', ')']
Validates the Inputs in the Transaction against given Outputs. Note: Given a `CREATE` Transaction is passed, dummy values for Outputs are submitted for validation that evaluate parts of the validation-checks to `True`. Args: outputs (:obj:`list` of :class:`~bigchaindb.common. transaction.Output`): A list of Outputs to check the Inputs against. Returns: bool: If all Inputs are valid.
['Validates', 'the', 'Inputs', 'in', 'the', 'Transaction', 'against', 'given', 'Outputs', '.']
train
https://github.com/bigchaindb/bigchaindb-driver/blob/c294a535f0696bd19483ae11a4882b74e6fc061e/bigchaindb_driver/common/transaction.py#L945-L975
1,351
tensorflow/cleverhans
examples/multigpu_advtrain/make_model.py
make_basic_ngpu
def make_basic_ngpu(nb_classes=10, input_shape=(None, 28, 28, 1), **kwargs): """ Create a multi-GPU model similar to the basic cnn in the tutorials. """ model = make_basic_cnn() layers = model.layers model = MLPnGPU(nb_classes, layers, input_shape) return model
python
def make_basic_ngpu(nb_classes=10, input_shape=(None, 28, 28, 1), **kwargs): """ Create a multi-GPU model similar to the basic cnn in the tutorials. """ model = make_basic_cnn() layers = model.layers model = MLPnGPU(nb_classes, layers, input_shape) return model
['def', 'make_basic_ngpu', '(', 'nb_classes', '=', '10', ',', 'input_shape', '=', '(', 'None', ',', '28', ',', '28', ',', '1', ')', ',', '*', '*', 'kwargs', ')', ':', 'model', '=', 'make_basic_cnn', '(', ')', 'layers', '=', 'model', '.', 'layers', 'model', '=', 'MLPnGPU', '(', 'nb_classes', ',', 'layers', ',', 'input_shape', ')', 'return', 'model']
Create a multi-GPU model similar to the basic cnn in the tutorials.
['Create', 'a', 'multi', '-', 'GPU', 'model', 'similar', 'to', 'the', 'basic', 'cnn', 'in', 'the', 'tutorials', '.']
train
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/multigpu_advtrain/make_model.py#L27-L35
1,352
evhub/coconut
coconut/compiler/compiler.py
Compiler.case_stmt_handle
def case_stmt_handle(self, loc, tokens): """Process case blocks.""" if len(tokens) == 2: item, cases = tokens default = None elif len(tokens) == 3: item, cases, default = tokens else: raise CoconutInternalException("invalid case tokens", tokens) check_var = case_check_var + "_" + str(self.case_check_count) self.case_check_count += 1 out = ( match_to_var + " = " + item + "\n" + match_case_tokens(loc, cases[0], check_var, True) ) for case in cases[1:]: out += ( "if not " + check_var + ":\n" + openindent + match_case_tokens(loc, case, check_var, False) + closeindent ) if default is not None: out += "if not " + check_var + default return out
python
def case_stmt_handle(self, loc, tokens): """Process case blocks.""" if len(tokens) == 2: item, cases = tokens default = None elif len(tokens) == 3: item, cases, default = tokens else: raise CoconutInternalException("invalid case tokens", tokens) check_var = case_check_var + "_" + str(self.case_check_count) self.case_check_count += 1 out = ( match_to_var + " = " + item + "\n" + match_case_tokens(loc, cases[0], check_var, True) ) for case in cases[1:]: out += ( "if not " + check_var + ":\n" + openindent + match_case_tokens(loc, case, check_var, False) + closeindent ) if default is not None: out += "if not " + check_var + default return out
['def', 'case_stmt_handle', '(', 'self', ',', 'loc', ',', 'tokens', ')', ':', 'if', 'len', '(', 'tokens', ')', '==', '2', ':', 'item', ',', 'cases', '=', 'tokens', 'default', '=', 'None', 'elif', 'len', '(', 'tokens', ')', '==', '3', ':', 'item', ',', 'cases', ',', 'default', '=', 'tokens', 'else', ':', 'raise', 'CoconutInternalException', '(', '"invalid case tokens"', ',', 'tokens', ')', 'check_var', '=', 'case_check_var', '+', '"_"', '+', 'str', '(', 'self', '.', 'case_check_count', ')', 'self', '.', 'case_check_count', '+=', '1', 'out', '=', '(', 'match_to_var', '+', '" = "', '+', 'item', '+', '"\\n"', '+', 'match_case_tokens', '(', 'loc', ',', 'cases', '[', '0', ']', ',', 'check_var', ',', 'True', ')', ')', 'for', 'case', 'in', 'cases', '[', '1', ':', ']', ':', 'out', '+=', '(', '"if not "', '+', 'check_var', '+', '":\\n"', '+', 'openindent', '+', 'match_case_tokens', '(', 'loc', ',', 'case', ',', 'check_var', ',', 'False', ')', '+', 'closeindent', ')', 'if', 'default', 'is', 'not', 'None', ':', 'out', '+=', '"if not "', '+', 'check_var', '+', 'default', 'return', 'out']
Process case blocks.
['Process', 'case', 'blocks', '.']
train
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/compiler.py#L1851-L1873
1,353
tanghaibao/jcvi
jcvi/formats/posmap.py
bed
def bed(args): """ %prog bed frgscffile Convert the frgscf posmap file to bed format. """ p = OptionParser(bed.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) frgscffile, = args bedfile = frgscffile.rsplit(".", 1)[0] + ".bed" fw = open(bedfile, "w") fp = open(frgscffile) for row in fp: f = FrgScfLine(row) print(f.bedline, file=fw) logging.debug("File written to `{0}`.".format(bedfile)) return bedfile
python
def bed(args): """ %prog bed frgscffile Convert the frgscf posmap file to bed format. """ p = OptionParser(bed.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) frgscffile, = args bedfile = frgscffile.rsplit(".", 1)[0] + ".bed" fw = open(bedfile, "w") fp = open(frgscffile) for row in fp: f = FrgScfLine(row) print(f.bedline, file=fw) logging.debug("File written to `{0}`.".format(bedfile)) return bedfile
['def', 'bed', '(', 'args', ')', ':', 'p', '=', 'OptionParser', '(', 'bed', '.', '__doc__', ')', 'opts', ',', 'args', '=', 'p', '.', 'parse_args', '(', 'args', ')', 'if', 'len', '(', 'args', ')', '!=', '1', ':', 'sys', '.', 'exit', '(', 'not', 'p', '.', 'print_help', '(', ')', ')', 'frgscffile', ',', '=', 'args', 'bedfile', '=', 'frgscffile', '.', 'rsplit', '(', '"."', ',', '1', ')', '[', '0', ']', '+', '".bed"', 'fw', '=', 'open', '(', 'bedfile', ',', '"w"', ')', 'fp', '=', 'open', '(', 'frgscffile', ')', 'for', 'row', 'in', 'fp', ':', 'f', '=', 'FrgScfLine', '(', 'row', ')', 'print', '(', 'f', '.', 'bedline', ',', 'file', '=', 'fw', ')', 'logging', '.', 'debug', '(', '"File written to `{0}`."', '.', 'format', '(', 'bedfile', ')', ')', 'return', 'bedfile']
%prog bed frgscffile Convert the frgscf posmap file to bed format.
['%prog', 'bed', 'frgscffile']
train
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/posmap.py#L241-L264
1,354
B2W-BIT/aiologger
aiologger/logger.py
Logger.exception
def exception( # type: ignore self, msg, *args, exc_info=True, **kwargs ) -> Task: """ Convenience method for logging an ERROR with exception information. """ return self.error(msg, *args, exc_info=exc_info, **kwargs)
python
def exception( # type: ignore self, msg, *args, exc_info=True, **kwargs ) -> Task: """ Convenience method for logging an ERROR with exception information. """ return self.error(msg, *args, exc_info=exc_info, **kwargs)
['def', 'exception', '(', '# type: ignore', 'self', ',', 'msg', ',', '*', 'args', ',', 'exc_info', '=', 'True', ',', '*', '*', 'kwargs', ')', '->', 'Task', ':', 'return', 'self', '.', 'error', '(', 'msg', ',', '*', 'args', ',', 'exc_info', '=', 'exc_info', ',', '*', '*', 'kwargs', ')']
Convenience method for logging an ERROR with exception information.
['Convenience', 'method', 'for', 'logging', 'an', 'ERROR', 'with', 'exception', 'information', '.']
train
https://github.com/B2W-BIT/aiologger/blob/0b366597a8305d5577a267305e81d5e4784cd398/aiologger/logger.py#L218-L224
1,355
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/common.py
validate_read_preference_tags
def validate_read_preference_tags(name, value): """Parse readPreferenceTags if passed as a client kwarg. """ if not isinstance(value, list): value = [value] tag_sets = [] for tag_set in value: if tag_set == '': tag_sets.append({}) continue try: tag_sets.append(dict([tag.split(":") for tag in tag_set.split(",")])) except Exception: raise ValueError("%r not a valid " "value for %s" % (tag_set, name)) return tag_sets
python
def validate_read_preference_tags(name, value): """Parse readPreferenceTags if passed as a client kwarg. """ if not isinstance(value, list): value = [value] tag_sets = [] for tag_set in value: if tag_set == '': tag_sets.append({}) continue try: tag_sets.append(dict([tag.split(":") for tag in tag_set.split(",")])) except Exception: raise ValueError("%r not a valid " "value for %s" % (tag_set, name)) return tag_sets
['def', 'validate_read_preference_tags', '(', 'name', ',', 'value', ')', ':', 'if', 'not', 'isinstance', '(', 'value', ',', 'list', ')', ':', 'value', '=', '[', 'value', ']', 'tag_sets', '=', '[', ']', 'for', 'tag_set', 'in', 'value', ':', 'if', 'tag_set', '==', "''", ':', 'tag_sets', '.', 'append', '(', '{', '}', ')', 'continue', 'try', ':', 'tag_sets', '.', 'append', '(', 'dict', '(', '[', 'tag', '.', 'split', '(', '":"', ')', 'for', 'tag', 'in', 'tag_set', '.', 'split', '(', '","', ')', ']', ')', ')', 'except', 'Exception', ':', 'raise', 'ValueError', '(', '"%r not a valid "', '"value for %s"', '%', '(', 'tag_set', ',', 'name', ')', ')', 'return', 'tag_sets']
Parse readPreferenceTags if passed as a client kwarg.
['Parse', 'readPreferenceTags', 'if', 'passed', 'as', 'a', 'client', 'kwarg', '.']
train
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/common.py#L338-L355
1,356
dsoprea/NsqSpinner
nsq/identify.py
Identify.output_buffer_size
def output_buffer_size(self, output_buffer_size_b): """output_buffer_size (nsqd 0.2.21+) the size in bytes of the buffer nsqd will use when writing to this client. Valid range: 64 <= output_buffer_size <= configured_max (-1 disables output buffering) --max-output-buffer-size (nsqd flag) controls the max Defaults to 16kb """ assert issubclass(output_buffer_size_b.__class__, int) return self.__push('output_buffer_size', output_buffer_size_b)
python
def output_buffer_size(self, output_buffer_size_b): """output_buffer_size (nsqd 0.2.21+) the size in bytes of the buffer nsqd will use when writing to this client. Valid range: 64 <= output_buffer_size <= configured_max (-1 disables output buffering) --max-output-buffer-size (nsqd flag) controls the max Defaults to 16kb """ assert issubclass(output_buffer_size_b.__class__, int) return self.__push('output_buffer_size', output_buffer_size_b)
['def', 'output_buffer_size', '(', 'self', ',', 'output_buffer_size_b', ')', ':', 'assert', 'issubclass', '(', 'output_buffer_size_b', '.', '__class__', ',', 'int', ')', 'return', 'self', '.', '__push', '(', "'output_buffer_size'", ',', 'output_buffer_size_b', ')']
output_buffer_size (nsqd 0.2.21+) the size in bytes of the buffer nsqd will use when writing to this client. Valid range: 64 <= output_buffer_size <= configured_max (-1 disables output buffering) --max-output-buffer-size (nsqd flag) controls the max Defaults to 16kb
['output_buffer_size', '(', 'nsqd', '0', '.', '2', '.', '21', '+', ')', 'the', 'size', 'in', 'bytes', 'of', 'the', 'buffer', 'nsqd', 'will', 'use', 'when', 'writing', 'to', 'this', 'client', '.']
train
https://github.com/dsoprea/NsqSpinner/blob/972237b8ddce737983bfed001fde52e5236be695/nsq/identify.py#L118-L132
1,357
HewlettPackard/python-hpOneView
hpOneView/resources/resource.py
ResourceClient.patch
def patch(self, id_or_uri, operation, path, value, timeout=-1, custom_headers=None): """ Uses the PATCH to update a resource. Only one operation can be performed in each PATCH call. Args: id_or_uri: Can be either the resource ID or the resource URI. operation: Patch operation path: Path value: Value timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: Updated resource. """ patch_request_body = [{'op': operation, 'path': path, 'value': value}] return self.patch_request(id_or_uri=id_or_uri, body=patch_request_body, timeout=timeout, custom_headers=custom_headers)
python
def patch(self, id_or_uri, operation, path, value, timeout=-1, custom_headers=None): """ Uses the PATCH to update a resource. Only one operation can be performed in each PATCH call. Args: id_or_uri: Can be either the resource ID or the resource URI. operation: Patch operation path: Path value: Value timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: Updated resource. """ patch_request_body = [{'op': operation, 'path': path, 'value': value}] return self.patch_request(id_or_uri=id_or_uri, body=patch_request_body, timeout=timeout, custom_headers=custom_headers)
['def', 'patch', '(', 'self', ',', 'id_or_uri', ',', 'operation', ',', 'path', ',', 'value', ',', 'timeout', '=', '-', '1', ',', 'custom_headers', '=', 'None', ')', ':', 'patch_request_body', '=', '[', '{', "'op'", ':', 'operation', ',', "'path'", ':', 'path', ',', "'value'", ':', 'value', '}', ']', 'return', 'self', '.', 'patch_request', '(', 'id_or_uri', '=', 'id_or_uri', ',', 'body', '=', 'patch_request_body', ',', 'timeout', '=', 'timeout', ',', 'custom_headers', '=', 'custom_headers', ')']
Uses the PATCH to update a resource. Only one operation can be performed in each PATCH call. Args: id_or_uri: Can be either the resource ID or the resource URI. operation: Patch operation path: Path value: Value timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: Updated resource.
['Uses', 'the', 'PATCH', 'to', 'update', 'a', 'resource', '.']
train
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/resource.py#L1403-L1425
1,358
wroberts/fsed
fsed/fsed.py
detect_pattern_format
def detect_pattern_format(pattern_filename, encoding, on_word_boundaries): ''' Automatically detects the pattern file format, and determines whether the Aho-Corasick string matching should pay attention to word boundaries or not. Arguments: - `pattern_filename`: - `encoding`: - `on_word_boundaries`: ''' tsv = True boundaries = on_word_boundaries with open_file(pattern_filename) as input_file: for line in input_file: line = line.decode(encoding) if line.count('\t') != 1: tsv = False if '\\b' in line: boundaries = True if boundaries and not tsv: break return tsv, boundaries
python
def detect_pattern_format(pattern_filename, encoding, on_word_boundaries): ''' Automatically detects the pattern file format, and determines whether the Aho-Corasick string matching should pay attention to word boundaries or not. Arguments: - `pattern_filename`: - `encoding`: - `on_word_boundaries`: ''' tsv = True boundaries = on_word_boundaries with open_file(pattern_filename) as input_file: for line in input_file: line = line.decode(encoding) if line.count('\t') != 1: tsv = False if '\\b' in line: boundaries = True if boundaries and not tsv: break return tsv, boundaries
['def', 'detect_pattern_format', '(', 'pattern_filename', ',', 'encoding', ',', 'on_word_boundaries', ')', ':', 'tsv', '=', 'True', 'boundaries', '=', 'on_word_boundaries', 'with', 'open_file', '(', 'pattern_filename', ')', 'as', 'input_file', ':', 'for', 'line', 'in', 'input_file', ':', 'line', '=', 'line', '.', 'decode', '(', 'encoding', ')', 'if', 'line', '.', 'count', '(', "'\\t'", ')', '!=', '1', ':', 'tsv', '=', 'False', 'if', "'\\\\b'", 'in', 'line', ':', 'boundaries', '=', 'True', 'if', 'boundaries', 'and', 'not', 'tsv', ':', 'break', 'return', 'tsv', ',', 'boundaries']
Automatically detects the pattern file format, and determines whether the Aho-Corasick string matching should pay attention to word boundaries or not. Arguments: - `pattern_filename`: - `encoding`: - `on_word_boundaries`:
['Automatically', 'detects', 'the', 'pattern', 'file', 'format', 'and', 'determines', 'whether', 'the', 'Aho', '-', 'Corasick', 'string', 'matching', 'should', 'pay', 'attention', 'to', 'word', 'boundaries', 'or', 'not', '.']
train
https://github.com/wroberts/fsed/blob/c0c1c5e0ea3a413ef679fdf71635f7f2e5d79ca2/fsed/fsed.py#L43-L65
1,359
GNS3/gns3-server
gns3server/compute/dynamips/nodes/ethernet_hub.py
EthernetHub.start_capture
def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"): """ Starts a packet capture. :param port_number: allocated port number :param output_file: PCAP destination file for the capture :param data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB """ if port_number not in self._mappings: raise DynamipsError("Port {} is not allocated".format(port_number)) nio = self._mappings[port_number] data_link_type = data_link_type.lower() if data_link_type.startswith("dlt_"): data_link_type = data_link_type[4:] if nio.input_filter[0] is not None and nio.output_filter[0] is not None: raise DynamipsError("Port {} has already a filter applied".format(port_number)) yield from nio.bind_filter("both", "capture") yield from nio.setup_filter("both", '{} "{}"'.format(data_link_type, output_file)) log.info('Ethernet hub "{name}" [{id}]: starting packet capture on port {port}'.format(name=self._name, id=self._id, port=port_number))
python
def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"): """ Starts a packet capture. :param port_number: allocated port number :param output_file: PCAP destination file for the capture :param data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB """ if port_number not in self._mappings: raise DynamipsError("Port {} is not allocated".format(port_number)) nio = self._mappings[port_number] data_link_type = data_link_type.lower() if data_link_type.startswith("dlt_"): data_link_type = data_link_type[4:] if nio.input_filter[0] is not None and nio.output_filter[0] is not None: raise DynamipsError("Port {} has already a filter applied".format(port_number)) yield from nio.bind_filter("both", "capture") yield from nio.setup_filter("both", '{} "{}"'.format(data_link_type, output_file)) log.info('Ethernet hub "{name}" [{id}]: starting packet capture on port {port}'.format(name=self._name, id=self._id, port=port_number))
['def', 'start_capture', '(', 'self', ',', 'port_number', ',', 'output_file', ',', 'data_link_type', '=', '"DLT_EN10MB"', ')', ':', 'if', 'port_number', 'not', 'in', 'self', '.', '_mappings', ':', 'raise', 'DynamipsError', '(', '"Port {} is not allocated"', '.', 'format', '(', 'port_number', ')', ')', 'nio', '=', 'self', '.', '_mappings', '[', 'port_number', ']', 'data_link_type', '=', 'data_link_type', '.', 'lower', '(', ')', 'if', 'data_link_type', '.', 'startswith', '(', '"dlt_"', ')', ':', 'data_link_type', '=', 'data_link_type', '[', '4', ':', ']', 'if', 'nio', '.', 'input_filter', '[', '0', ']', 'is', 'not', 'None', 'and', 'nio', '.', 'output_filter', '[', '0', ']', 'is', 'not', 'None', ':', 'raise', 'DynamipsError', '(', '"Port {} has already a filter applied"', '.', 'format', '(', 'port_number', ')', ')', 'yield', 'from', 'nio', '.', 'bind_filter', '(', '"both"', ',', '"capture"', ')', 'yield', 'from', 'nio', '.', 'setup_filter', '(', '"both"', ',', '\'{} "{}"\'', '.', 'format', '(', 'data_link_type', ',', 'output_file', ')', ')', 'log', '.', 'info', '(', '\'Ethernet hub "{name}" [{id}]: starting packet capture on port {port}\'', '.', 'format', '(', 'name', '=', 'self', '.', '_name', ',', 'id', '=', 'self', '.', '_id', ',', 'port', '=', 'port_number', ')', ')']
Starts a packet capture. :param port_number: allocated port number :param output_file: PCAP destination file for the capture :param data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB
['Starts', 'a', 'packet', 'capture', '.']
train
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/compute/dynamips/nodes/ethernet_hub.py#L186-L212
1,360
StorjOld/heartbeat
heartbeat/util.py
KeyedPRF.eval
def eval(self, x): """This method returns the evaluation of the function with input x :param x: this is the input as a Long """ aes = AES.new(self.key, AES.MODE_CFB, "\0" * AES.block_size) while True: nonce = 0 data = KeyedPRF.pad(SHA256.new(str(x + nonce).encode()).digest(), (number.size(self.range) + 7) // 8) num = self.mask & number.bytes_to_long(aes.encrypt(data)) if (num < self.range): return num nonce += 1
python
def eval(self, x): """This method returns the evaluation of the function with input x :param x: this is the input as a Long """ aes = AES.new(self.key, AES.MODE_CFB, "\0" * AES.block_size) while True: nonce = 0 data = KeyedPRF.pad(SHA256.new(str(x + nonce).encode()).digest(), (number.size(self.range) + 7) // 8) num = self.mask & number.bytes_to_long(aes.encrypt(data)) if (num < self.range): return num nonce += 1
['def', 'eval', '(', 'self', ',', 'x', ')', ':', 'aes', '=', 'AES', '.', 'new', '(', 'self', '.', 'key', ',', 'AES', '.', 'MODE_CFB', ',', '"\\0"', '*', 'AES', '.', 'block_size', ')', 'while', 'True', ':', 'nonce', '=', '0', 'data', '=', 'KeyedPRF', '.', 'pad', '(', 'SHA256', '.', 'new', '(', 'str', '(', 'x', '+', 'nonce', ')', '.', 'encode', '(', ')', ')', '.', 'digest', '(', ')', ',', '(', 'number', '.', 'size', '(', 'self', '.', 'range', ')', '+', '7', ')', '//', '8', ')', 'num', '=', 'self', '.', 'mask', '&', 'number', '.', 'bytes_to_long', '(', 'aes', '.', 'encrypt', '(', 'data', ')', ')', 'if', '(', 'num', '<', 'self', '.', 'range', ')', ':', 'return', 'num', 'nonce', '+=', '1']
This method returns the evaluation of the function with input x :param x: this is the input as a Long
['This', 'method', 'returns', 'the', 'evaluation', 'of', 'the', 'function', 'with', 'input', 'x']
train
https://github.com/StorjOld/heartbeat/blob/4d54f2011f1e9f688073d4347bc51bb7bd682718/heartbeat/util.py#L83-L96
1,361
openid/python-openid
openid/extensions/draft/pape2.py
Response.parseExtensionArgs
def parseExtensionArgs(self, args, strict=False): """Parse the provider authentication policy arguments into the internal state of this object @param args: unqualified provider authentication policy arguments @param strict: Whether to raise an exception when bad data is encountered @returns: None. The data is parsed into the internal fields of this object. """ policies_str = args.get('auth_policies') if policies_str and policies_str != 'none': self.auth_policies = policies_str.split(' ') nist_level_str = args.get('nist_auth_level') if nist_level_str: try: nist_level = int(nist_level_str) except ValueError: if strict: raise ValueError('nist_auth_level must be an integer between ' 'zero and four, inclusive') else: self.nist_auth_level = None else: if 0 <= nist_level < 5: self.nist_auth_level = nist_level auth_time = args.get('auth_time') if auth_time: if TIME_VALIDATOR.match(auth_time): self.auth_time = auth_time elif strict: raise ValueError("auth_time must be in RFC3339 format")
python
def parseExtensionArgs(self, args, strict=False): """Parse the provider authentication policy arguments into the internal state of this object @param args: unqualified provider authentication policy arguments @param strict: Whether to raise an exception when bad data is encountered @returns: None. The data is parsed into the internal fields of this object. """ policies_str = args.get('auth_policies') if policies_str and policies_str != 'none': self.auth_policies = policies_str.split(' ') nist_level_str = args.get('nist_auth_level') if nist_level_str: try: nist_level = int(nist_level_str) except ValueError: if strict: raise ValueError('nist_auth_level must be an integer between ' 'zero and four, inclusive') else: self.nist_auth_level = None else: if 0 <= nist_level < 5: self.nist_auth_level = nist_level auth_time = args.get('auth_time') if auth_time: if TIME_VALIDATOR.match(auth_time): self.auth_time = auth_time elif strict: raise ValueError("auth_time must be in RFC3339 format")
['def', 'parseExtensionArgs', '(', 'self', ',', 'args', ',', 'strict', '=', 'False', ')', ':', 'policies_str', '=', 'args', '.', 'get', '(', "'auth_policies'", ')', 'if', 'policies_str', 'and', 'policies_str', '!=', "'none'", ':', 'self', '.', 'auth_policies', '=', 'policies_str', '.', 'split', '(', "' '", ')', 'nist_level_str', '=', 'args', '.', 'get', '(', "'nist_auth_level'", ')', 'if', 'nist_level_str', ':', 'try', ':', 'nist_level', '=', 'int', '(', 'nist_level_str', ')', 'except', 'ValueError', ':', 'if', 'strict', ':', 'raise', 'ValueError', '(', "'nist_auth_level must be an integer between '", "'zero and four, inclusive'", ')', 'else', ':', 'self', '.', 'nist_auth_level', '=', 'None', 'else', ':', 'if', '0', '<=', 'nist_level', '<', '5', ':', 'self', '.', 'nist_auth_level', '=', 'nist_level', 'auth_time', '=', 'args', '.', 'get', '(', "'auth_time'", ')', 'if', 'auth_time', ':', 'if', 'TIME_VALIDATOR', '.', 'match', '(', 'auth_time', ')', ':', 'self', '.', 'auth_time', '=', 'auth_time', 'elif', 'strict', ':', 'raise', 'ValueError', '(', '"auth_time must be in RFC3339 format"', ')']
Parse the provider authentication policy arguments into the internal state of this object @param args: unqualified provider authentication policy arguments @param strict: Whether to raise an exception when bad data is encountered @returns: None. The data is parsed into the internal fields of this object.
['Parse', 'the', 'provider', 'authentication', 'policy', 'arguments', 'into', 'the', 'internal', 'state', 'of', 'this', 'object']
train
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/extensions/draft/pape2.py#L211-L247
1,362
PGower/PyCanvas
pycanvas/apis/content_migrations.py
list_migration_issues_courses
def list_migration_issues_courses(self, course_id, content_migration_id): """ List migration issues. Returns paginated migration issues """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id self.logger.debug("GET /api/v1/courses/{course_id}/content_migrations/{content_migration_id}/migration_issues with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/content_migrations/{content_migration_id}/migration_issues".format(**path), data=data, params=params, all_pages=True)
python
def list_migration_issues_courses(self, course_id, content_migration_id): """ List migration issues. Returns paginated migration issues """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id self.logger.debug("GET /api/v1/courses/{course_id}/content_migrations/{content_migration_id}/migration_issues with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/content_migrations/{content_migration_id}/migration_issues".format(**path), data=data, params=params, all_pages=True)
['def', 'list_migration_issues_courses', '(', 'self', ',', 'course_id', ',', 'content_migration_id', ')', ':', 'path', '=', '{', '}', 'data', '=', '{', '}', 'params', '=', '{', '}', '# REQUIRED - PATH - course_id\r', '"""ID"""', 'path', '[', '"course_id"', ']', '=', 'course_id', '# REQUIRED - PATH - content_migration_id\r', '"""ID"""', 'path', '[', '"content_migration_id"', ']', '=', 'content_migration_id', 'self', '.', 'logger', '.', 'debug', '(', '"GET /api/v1/courses/{course_id}/content_migrations/{content_migration_id}/migration_issues with query params: {params} and form data: {data}"', '.', 'format', '(', 'params', '=', 'params', ',', 'data', '=', 'data', ',', '*', '*', 'path', ')', ')', 'return', 'self', '.', 'generic_request', '(', '"GET"', ',', '"/api/v1/courses/{course_id}/content_migrations/{content_migration_id}/migration_issues"', '.', 'format', '(', '*', '*', 'path', ')', ',', 'data', '=', 'data', ',', 'params', '=', 'params', ',', 'all_pages', '=', 'True', ')']
List migration issues. Returns paginated migration issues
['List', 'migration', 'issues', '.', 'Returns', 'paginated', 'migration', 'issues']
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/content_migrations.py#L40-L59
1,363
pyQode/pyqode.cobol
pyqode/cobol/modes/indenter.py
IndenterMode.unindent
def unindent(self): """ Un-indents text at cursor position. """ _logger().debug('unindent') cursor = self.editor.textCursor() _logger().debug('cursor has selection %r', cursor.hasSelection()) if cursor.hasSelection(): cursor.beginEditBlock() self.unindent_selection(cursor) cursor.endEditBlock() self.editor.setTextCursor(cursor) else: tab_len = self.editor.tab_length indentation = cursor.positionInBlock() indentation -= self.min_column if indentation == 0: return max_spaces = indentation % tab_len if max_spaces == 0: max_spaces = tab_len spaces = self.count_deletable_spaces(cursor, max_spaces) _logger().info('deleting %d space before cursor' % spaces) cursor.beginEditBlock() for _ in range(spaces): cursor.deletePreviousChar() cursor.endEditBlock() self.editor.setTextCursor(cursor) _logger().debug(cursor.block().text())
python
def unindent(self): """ Un-indents text at cursor position. """ _logger().debug('unindent') cursor = self.editor.textCursor() _logger().debug('cursor has selection %r', cursor.hasSelection()) if cursor.hasSelection(): cursor.beginEditBlock() self.unindent_selection(cursor) cursor.endEditBlock() self.editor.setTextCursor(cursor) else: tab_len = self.editor.tab_length indentation = cursor.positionInBlock() indentation -= self.min_column if indentation == 0: return max_spaces = indentation % tab_len if max_spaces == 0: max_spaces = tab_len spaces = self.count_deletable_spaces(cursor, max_spaces) _logger().info('deleting %d space before cursor' % spaces) cursor.beginEditBlock() for _ in range(spaces): cursor.deletePreviousChar() cursor.endEditBlock() self.editor.setTextCursor(cursor) _logger().debug(cursor.block().text())
['def', 'unindent', '(', 'self', ')', ':', '_logger', '(', ')', '.', 'debug', '(', "'unindent'", ')', 'cursor', '=', 'self', '.', 'editor', '.', 'textCursor', '(', ')', '_logger', '(', ')', '.', 'debug', '(', "'cursor has selection %r'", ',', 'cursor', '.', 'hasSelection', '(', ')', ')', 'if', 'cursor', '.', 'hasSelection', '(', ')', ':', 'cursor', '.', 'beginEditBlock', '(', ')', 'self', '.', 'unindent_selection', '(', 'cursor', ')', 'cursor', '.', 'endEditBlock', '(', ')', 'self', '.', 'editor', '.', 'setTextCursor', '(', 'cursor', ')', 'else', ':', 'tab_len', '=', 'self', '.', 'editor', '.', 'tab_length', 'indentation', '=', 'cursor', '.', 'positionInBlock', '(', ')', 'indentation', '-=', 'self', '.', 'min_column', 'if', 'indentation', '==', '0', ':', 'return', 'max_spaces', '=', 'indentation', '%', 'tab_len', 'if', 'max_spaces', '==', '0', ':', 'max_spaces', '=', 'tab_len', 'spaces', '=', 'self', '.', 'count_deletable_spaces', '(', 'cursor', ',', 'max_spaces', ')', '_logger', '(', ')', '.', 'info', '(', "'deleting %d space before cursor'", '%', 'spaces', ')', 'cursor', '.', 'beginEditBlock', '(', ')', 'for', '_', 'in', 'range', '(', 'spaces', ')', ':', 'cursor', '.', 'deletePreviousChar', '(', ')', 'cursor', '.', 'endEditBlock', '(', ')', 'self', '.', 'editor', '.', 'setTextCursor', '(', 'cursor', ')', '_logger', '(', ')', '.', 'debug', '(', 'cursor', '.', 'block', '(', ')', '.', 'text', '(', ')', ')']
Un-indents text at cursor position.
['Un', '-', 'indents', 'text', 'at', 'cursor', 'position', '.']
train
https://github.com/pyQode/pyqode.cobol/blob/eedae4e320a4b2d0c44abb2c3061091321648fb7/pyqode/cobol/modes/indenter.py#L149-L178
1,364
cyrus-/cypy
cypy/__init__.py
fn_kwargs
def fn_kwargs(callable): """Returns a dict with the kwargs from the provided function. Example >>> def x(a, b=0, *args, **kwargs): pass >>> func_kwargs(x) == { 'b': 0 } """ fn = get_fn(callable) (args, _, _, defaults) = _inspect.getargspec(fn) if defaults is None: return { } return dict(list(zip(reversed(args), reversed(defaults))))
python
def fn_kwargs(callable): """Returns a dict with the kwargs from the provided function. Example >>> def x(a, b=0, *args, **kwargs): pass >>> func_kwargs(x) == { 'b': 0 } """ fn = get_fn(callable) (args, _, _, defaults) = _inspect.getargspec(fn) if defaults is None: return { } return dict(list(zip(reversed(args), reversed(defaults))))
['def', 'fn_kwargs', '(', 'callable', ')', ':', 'fn', '=', 'get_fn', '(', 'callable', ')', '(', 'args', ',', '_', ',', '_', ',', 'defaults', ')', '=', '_inspect', '.', 'getargspec', '(', 'fn', ')', 'if', 'defaults', 'is', 'None', ':', 'return', '{', '}', 'return', 'dict', '(', 'list', '(', 'zip', '(', 'reversed', '(', 'args', ')', ',', 'reversed', '(', 'defaults', ')', ')', ')', ')']
Returns a dict with the kwargs from the provided function. Example >>> def x(a, b=0, *args, **kwargs): pass >>> func_kwargs(x) == { 'b': 0 }
['Returns', 'a', 'dict', 'with', 'the', 'kwargs', 'from', 'the', 'provided', 'function', '.']
train
https://github.com/cyrus-/cypy/blob/04bb59e91fa314e8cf987743189c77a9b6bc371d/cypy/__init__.py#L742-L754
1,365
gabstopper/smc-python
smc/elements/network.py
IPList.update_or_create
def update_or_create(cls, append_lists=True, with_status=False, **kwargs): """ Update or create an IPList. :param bool append_lists: append to existing IP List :param dict kwargs: provide at minimum the name attribute and optionally match the create constructor values :raises FetchElementFailed: Reason for retrieval failure """ was_created, was_modified = False, False element = None try: element = cls.get(kwargs.get('name')) if append_lists: iplist = element.iplist diff = [i for i in kwargs.get('iplist', []) if i not in iplist] if diff: iplist.extend(diff) else: iplist = [] else: iplist = kwargs.get('iplist', []) if iplist: element.upload(json={'ip': iplist}, as_type='json') was_modified = True except ElementNotFound: element = cls.create( kwargs.get('name'), iplist = kwargs.get('iplist', [])) was_created = True if with_status: return element, was_modified, was_created return element
python
def update_or_create(cls, append_lists=True, with_status=False, **kwargs): """ Update or create an IPList. :param bool append_lists: append to existing IP List :param dict kwargs: provide at minimum the name attribute and optionally match the create constructor values :raises FetchElementFailed: Reason for retrieval failure """ was_created, was_modified = False, False element = None try: element = cls.get(kwargs.get('name')) if append_lists: iplist = element.iplist diff = [i for i in kwargs.get('iplist', []) if i not in iplist] if diff: iplist.extend(diff) else: iplist = [] else: iplist = kwargs.get('iplist', []) if iplist: element.upload(json={'ip': iplist}, as_type='json') was_modified = True except ElementNotFound: element = cls.create( kwargs.get('name'), iplist = kwargs.get('iplist', [])) was_created = True if with_status: return element, was_modified, was_created return element
['def', 'update_or_create', '(', 'cls', ',', 'append_lists', '=', 'True', ',', 'with_status', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'was_created', ',', 'was_modified', '=', 'False', ',', 'False', 'element', '=', 'None', 'try', ':', 'element', '=', 'cls', '.', 'get', '(', 'kwargs', '.', 'get', '(', "'name'", ')', ')', 'if', 'append_lists', ':', 'iplist', '=', 'element', '.', 'iplist', 'diff', '=', '[', 'i', 'for', 'i', 'in', 'kwargs', '.', 'get', '(', "'iplist'", ',', '[', ']', ')', 'if', 'i', 'not', 'in', 'iplist', ']', 'if', 'diff', ':', 'iplist', '.', 'extend', '(', 'diff', ')', 'else', ':', 'iplist', '=', '[', ']', 'else', ':', 'iplist', '=', 'kwargs', '.', 'get', '(', "'iplist'", ',', '[', ']', ')', 'if', 'iplist', ':', 'element', '.', 'upload', '(', 'json', '=', '{', "'ip'", ':', 'iplist', '}', ',', 'as_type', '=', "'json'", ')', 'was_modified', '=', 'True', 'except', 'ElementNotFound', ':', 'element', '=', 'cls', '.', 'create', '(', 'kwargs', '.', 'get', '(', "'name'", ')', ',', 'iplist', '=', 'kwargs', '.', 'get', '(', "'iplist'", ',', '[', ']', ')', ')', 'was_created', '=', 'True', 'if', 'with_status', ':', 'return', 'element', ',', 'was_modified', ',', 'was_created', 'return', 'element']
Update or create an IPList. :param bool append_lists: append to existing IP List :param dict kwargs: provide at minimum the name attribute and optionally match the create constructor values :raises FetchElementFailed: Reason for retrieval failure
['Update', 'or', 'create', 'an', 'IPList', '.', ':', 'param', 'bool', 'append_lists', ':', 'append', 'to', 'existing', 'IP', 'List', ':', 'param', 'dict', 'kwargs', ':', 'provide', 'at', 'minimum', 'the', 'name', 'attribute', 'and', 'optionally', 'match', 'the', 'create', 'constructor', 'values', ':', 'raises', 'FetchElementFailed', ':', 'Reason', 'for', 'retrieval', 'failure']
train
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/elements/network.py#L458-L493
1,366
pytorch/text
torchtext/data/field.py
SubwordField.segment
def segment(self, *args): """Segment one or more datasets with this subword field. Arguments: Positional arguments: Dataset objects or other indexable mutable sequences to segment. If a Dataset object is provided, all columns corresponding to this field are used; individual columns can also be provided directly. """ sources = [] for arg in args: if isinstance(arg, Dataset): sources += [getattr(arg, name) for name, field in arg.fields.items() if field is self] else: sources.append(arg) for data in sources: for x in tqdm(data, 'segmenting'): x[:] = self.vocab.segment(x)
python
def segment(self, *args): """Segment one or more datasets with this subword field. Arguments: Positional arguments: Dataset objects or other indexable mutable sequences to segment. If a Dataset object is provided, all columns corresponding to this field are used; individual columns can also be provided directly. """ sources = [] for arg in args: if isinstance(arg, Dataset): sources += [getattr(arg, name) for name, field in arg.fields.items() if field is self] else: sources.append(arg) for data in sources: for x in tqdm(data, 'segmenting'): x[:] = self.vocab.segment(x)
['def', 'segment', '(', 'self', ',', '*', 'args', ')', ':', 'sources', '=', '[', ']', 'for', 'arg', 'in', 'args', ':', 'if', 'isinstance', '(', 'arg', ',', 'Dataset', ')', ':', 'sources', '+=', '[', 'getattr', '(', 'arg', ',', 'name', ')', 'for', 'name', ',', 'field', 'in', 'arg', '.', 'fields', '.', 'items', '(', ')', 'if', 'field', 'is', 'self', ']', 'else', ':', 'sources', '.', 'append', '(', 'arg', ')', 'for', 'data', 'in', 'sources', ':', 'for', 'x', 'in', 'tqdm', '(', 'data', ',', "'segmenting'", ')', ':', 'x', '[', ':', ']', '=', 'self', '.', 'vocab', '.', 'segment', '(', 'x', ')']
Segment one or more datasets with this subword field. Arguments: Positional arguments: Dataset objects or other indexable mutable sequences to segment. If a Dataset object is provided, all columns corresponding to this field are used; individual columns can also be provided directly.
['Segment', 'one', 'or', 'more', 'datasets', 'with', 'this', 'subword', 'field', '.']
train
https://github.com/pytorch/text/blob/26bfce6869dc704f1d86792f9a681d453d7e7bb8/torchtext/data/field.py#L424-L442
1,367
DLR-RM/RAFCON
source/rafcon/core/states/state.py
State.get_uppermost_library_root_state
def get_uppermost_library_root_state(self): """Find state_copy of uppermost LibraryState Method checks if there is a parent library root state and assigns it to be the current library root state till there is no further parent library root state. """ library_root_state = self.get_next_upper_library_root_state() parent_library_root_state = library_root_state # initial a library root state has to be found and if there is no further parent root state # parent_library_root_state and library_root_state are no more identical while parent_library_root_state and library_root_state is parent_library_root_state: if library_root_state: parent_library_root_state = library_root_state.parent.get_next_upper_library_root_state() if parent_library_root_state: library_root_state = parent_library_root_state return library_root_state
python
def get_uppermost_library_root_state(self): """Find state_copy of uppermost LibraryState Method checks if there is a parent library root state and assigns it to be the current library root state till there is no further parent library root state. """ library_root_state = self.get_next_upper_library_root_state() parent_library_root_state = library_root_state # initial a library root state has to be found and if there is no further parent root state # parent_library_root_state and library_root_state are no more identical while parent_library_root_state and library_root_state is parent_library_root_state: if library_root_state: parent_library_root_state = library_root_state.parent.get_next_upper_library_root_state() if parent_library_root_state: library_root_state = parent_library_root_state return library_root_state
['def', 'get_uppermost_library_root_state', '(', 'self', ')', ':', 'library_root_state', '=', 'self', '.', 'get_next_upper_library_root_state', '(', ')', 'parent_library_root_state', '=', 'library_root_state', '# initial a library root state has to be found and if there is no further parent root state', '# parent_library_root_state and library_root_state are no more identical', 'while', 'parent_library_root_state', 'and', 'library_root_state', 'is', 'parent_library_root_state', ':', 'if', 'library_root_state', ':', 'parent_library_root_state', '=', 'library_root_state', '.', 'parent', '.', 'get_next_upper_library_root_state', '(', ')', 'if', 'parent_library_root_state', ':', 'library_root_state', '=', 'parent_library_root_state', 'return', 'library_root_state']
Find state_copy of uppermost LibraryState Method checks if there is a parent library root state and assigns it to be the current library root state till there is no further parent library root state.
['Find', 'state_copy', 'of', 'uppermost', 'LibraryState']
train
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/state.py#L1441-L1459
1,368
StackStorm/pybind
pybind/slxos/v17s_1_02/__init__.py
brocade_mc_hms_operational._set_igmp_snooping_state
def _set_igmp_snooping_state(self, v, load=False): """ Setter method for igmp_snooping_state, mapped from YANG variable /igmp_snooping_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_igmp_snooping_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_igmp_snooping_state() directly. YANG Description: IGMP Snooping Root MO """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=igmp_snooping_state.igmp_snooping_state, is_container='container', presence=False, yang_name="igmp-snooping-state", rest_name="igmp-snooping-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mc-hms-igmp-snooping', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """igmp_snooping_state must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=igmp_snooping_state.igmp_snooping_state, is_container='container', presence=False, yang_name="igmp-snooping-state", rest_name="igmp-snooping-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mc-hms-igmp-snooping', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='container', is_config=True)""", }) self.__igmp_snooping_state = t if hasattr(self, '_set'): self._set()
python
def _set_igmp_snooping_state(self, v, load=False): """ Setter method for igmp_snooping_state, mapped from YANG variable /igmp_snooping_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_igmp_snooping_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_igmp_snooping_state() directly. YANG Description: IGMP Snooping Root MO """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=igmp_snooping_state.igmp_snooping_state, is_container='container', presence=False, yang_name="igmp-snooping-state", rest_name="igmp-snooping-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mc-hms-igmp-snooping', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """igmp_snooping_state must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=igmp_snooping_state.igmp_snooping_state, is_container='container', presence=False, yang_name="igmp-snooping-state", rest_name="igmp-snooping-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mc-hms-igmp-snooping', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='container', is_config=True)""", }) self.__igmp_snooping_state = t if hasattr(self, '_set'): self._set()
['def', '_set_igmp_snooping_state', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'igmp_snooping_state', '.', 'igmp_snooping_state', ',', 'is_container', '=', "'container'", ',', 'presence', '=', 'False', ',', 'yang_name', '=', '"igmp-snooping-state"', ',', 'rest_name', '=', '"igmp-snooping-state"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'callpoint'", ':', "u'mc-hms-igmp-snooping'", ',', "u'cli-suppress-show-path'", ':', 'None', '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-mc-hms-operational'", ',', 'defining_module', '=', "'brocade-mc-hms-operational'", ',', 'yang_type', '=', "'container'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""igmp_snooping_state must be of a type compatible with container"""', ',', "'defined-type'", ':', '"container"', ',', "'generated-type'", ':', '"""YANGDynClass(base=igmp_snooping_state.igmp_snooping_state, is_container=\'container\', presence=False, yang_name="igmp-snooping-state", rest_name="igmp-snooping-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'callpoint\': u\'mc-hms-igmp-snooping\', u\'cli-suppress-show-path\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-mc-hms-operational\', defining_module=\'brocade-mc-hms-operational\', yang_type=\'container\', is_config=True)"""', ',', '}', ')', 'self', '.', '__igmp_snooping_state', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')']
Setter method for igmp_snooping_state, mapped from YANG variable /igmp_snooping_state (container) If this variable is read-only (config: false) in the source YANG file, then _set_igmp_snooping_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_igmp_snooping_state() directly. YANG Description: IGMP Snooping Root MO
['Setter', 'method', 'for', 'igmp_snooping_state', 'mapped', 'from', 'YANG', 'variable', '/', 'igmp_snooping_state', '(', 'container', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_igmp_snooping_state', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_igmp_snooping_state', '()', 'directly', '.']
train
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/__init__.py#L1023-L1046
1,369
ARMmbed/icetea
icetea_lib/build/build.py
BuildHttp.get_file
def get_file(self): """ Load data into a file and return file path. :return: path to file as string """ content = self._load() if not content: return None filename = "temporary_file.bin" with open(filename, "wb") as file_name: file_name.write(content) return filename
python
def get_file(self): """ Load data into a file and return file path. :return: path to file as string """ content = self._load() if not content: return None filename = "temporary_file.bin" with open(filename, "wb") as file_name: file_name.write(content) return filename
['def', 'get_file', '(', 'self', ')', ':', 'content', '=', 'self', '.', '_load', '(', ')', 'if', 'not', 'content', ':', 'return', 'None', 'filename', '=', '"temporary_file.bin"', 'with', 'open', '(', 'filename', ',', '"wb"', ')', 'as', 'file_name', ':', 'file_name', '.', 'write', '(', 'content', ')', 'return', 'filename']
Load data into a file and return file path. :return: path to file as string
['Load', 'data', 'into', 'a', 'file', 'and', 'return', 'file', 'path', '.']
train
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/build/build.py#L200-L212
1,370
tensorpack/tensorpack
tensorpack/models/_old_batch_norm.py
BatchNorm
def BatchNorm(inputs, training=None, momentum=0.9, epsilon=1e-5, center=True, scale=True, gamma_initializer=tf.ones_initializer(), data_format='channels_last', internal_update=False): """ Mostly equivalent to `tf.layers.batch_normalization`, but difference in the following: 1. Accepts `data_format` rather than `axis`. For 2D input, this argument will be ignored. 2. Default value for `momentum` and `epsilon` is different. 3. Default value for `training` is automatically obtained from `TowerContext`. 4. Support the `internal_update` option. Args: internal_update (bool): if False, add EMA update ops to `tf.GraphKeys.UPDATE_OPS`. If True, update EMA inside the layer by control dependencies. Variable Names: * ``beta``: the bias term. Will be zero-inited by default. * ``gamma``: the scale term. Will be one-inited by default. Input will be transformed by ``x * gamma + beta``. * ``mean/EMA``: the moving average of mean. * ``variance/EMA``: the moving average of variance. Note: 1. About multi-GPU training: moving averages across GPUs are not aggregated. Batch statistics are computed independently. This is consistent with most frameworks. 2. Combinations of ``training`` and ``ctx.is_training``: * ``training == ctx.is_training``: standard BN, EMA are maintained during training and used during inference. This is the default. * ``training and not ctx.is_training``: still use batch statistics in inference. * ``not training and ctx.is_training``: use EMA to normalize in training. This is useful when you load a pre-trained BN and don't want to fine tune the EMA. EMA will not be updated in this case. """ data_format = get_data_format(data_format, keras_mode=False) shape = inputs.get_shape().as_list() ndims = len(shape) assert ndims in [2, 4] if ndims == 2: data_format = 'NHWC' if data_format == 'NCHW': n_out = shape[1] else: n_out = shape[-1] # channel assert n_out is not None, "Input to BatchNorm cannot have unknown channels!" beta, gamma, moving_mean, moving_var = get_bn_variables(n_out, scale, center, gamma_initializer) ctx = get_current_tower_context() use_local_stat = training if use_local_stat is None: use_local_stat = ctx.is_training use_local_stat = bool(use_local_stat) if use_local_stat: if ndims == 2: inputs = tf.reshape(inputs, [-1, 1, 1, n_out]) # fused_bn only takes 4D input # fused_bn has error using NCHW? (see #190) xn, batch_mean, batch_var = tf.nn.fused_batch_norm( inputs, gamma, beta, epsilon=epsilon, is_training=True, data_format=data_format) if ndims == 2: xn = tf.squeeze(xn, [1, 2]) else: if ctx.is_training: assert get_tf_version_tuple() >= (1, 4), \ "Fine tuning a BatchNorm model with fixed statistics is only " \ "supported after https://github.com/tensorflow/tensorflow/pull/12580 " if ctx.is_main_training_tower: # only warn in first tower logger.warn("[BatchNorm] Using moving_mean/moving_variance in training.") # Using moving_mean/moving_variance in training, which means we # loaded a pre-trained BN and only fine-tuning the affine part. xn, _, _ = tf.nn.fused_batch_norm( inputs, gamma, beta, mean=moving_mean, variance=moving_var, epsilon=epsilon, data_format=data_format, is_training=False) else: if ndims == 4: xn, _, _ = tf.nn.fused_batch_norm( inputs, gamma, beta, mean=moving_mean, variance=moving_var, epsilon=epsilon, data_format=data_format, is_training=False) else: xn = tf.nn.batch_normalization( inputs, moving_mean, moving_var, beta, gamma, epsilon) # maintain EMA only on one GPU is OK, even in replicated mode. # because training time doesn't use EMA if ctx.is_main_training_tower: add_model_variable(moving_mean) add_model_variable(moving_var) if ctx.is_main_training_tower and use_local_stat: ret = update_bn_ema(xn, batch_mean, batch_var, moving_mean, moving_var, momentum, internal_update) else: ret = tf.identity(xn, name='output') vh = ret.variables = VariableHolder(mean=moving_mean, variance=moving_var) if scale: vh.gamma = gamma if center: vh.beta = beta return ret
python
def BatchNorm(inputs, training=None, momentum=0.9, epsilon=1e-5, center=True, scale=True, gamma_initializer=tf.ones_initializer(), data_format='channels_last', internal_update=False): """ Mostly equivalent to `tf.layers.batch_normalization`, but difference in the following: 1. Accepts `data_format` rather than `axis`. For 2D input, this argument will be ignored. 2. Default value for `momentum` and `epsilon` is different. 3. Default value for `training` is automatically obtained from `TowerContext`. 4. Support the `internal_update` option. Args: internal_update (bool): if False, add EMA update ops to `tf.GraphKeys.UPDATE_OPS`. If True, update EMA inside the layer by control dependencies. Variable Names: * ``beta``: the bias term. Will be zero-inited by default. * ``gamma``: the scale term. Will be one-inited by default. Input will be transformed by ``x * gamma + beta``. * ``mean/EMA``: the moving average of mean. * ``variance/EMA``: the moving average of variance. Note: 1. About multi-GPU training: moving averages across GPUs are not aggregated. Batch statistics are computed independently. This is consistent with most frameworks. 2. Combinations of ``training`` and ``ctx.is_training``: * ``training == ctx.is_training``: standard BN, EMA are maintained during training and used during inference. This is the default. * ``training and not ctx.is_training``: still use batch statistics in inference. * ``not training and ctx.is_training``: use EMA to normalize in training. This is useful when you load a pre-trained BN and don't want to fine tune the EMA. EMA will not be updated in this case. """ data_format = get_data_format(data_format, keras_mode=False) shape = inputs.get_shape().as_list() ndims = len(shape) assert ndims in [2, 4] if ndims == 2: data_format = 'NHWC' if data_format == 'NCHW': n_out = shape[1] else: n_out = shape[-1] # channel assert n_out is not None, "Input to BatchNorm cannot have unknown channels!" beta, gamma, moving_mean, moving_var = get_bn_variables(n_out, scale, center, gamma_initializer) ctx = get_current_tower_context() use_local_stat = training if use_local_stat is None: use_local_stat = ctx.is_training use_local_stat = bool(use_local_stat) if use_local_stat: if ndims == 2: inputs = tf.reshape(inputs, [-1, 1, 1, n_out]) # fused_bn only takes 4D input # fused_bn has error using NCHW? (see #190) xn, batch_mean, batch_var = tf.nn.fused_batch_norm( inputs, gamma, beta, epsilon=epsilon, is_training=True, data_format=data_format) if ndims == 2: xn = tf.squeeze(xn, [1, 2]) else: if ctx.is_training: assert get_tf_version_tuple() >= (1, 4), \ "Fine tuning a BatchNorm model with fixed statistics is only " \ "supported after https://github.com/tensorflow/tensorflow/pull/12580 " if ctx.is_main_training_tower: # only warn in first tower logger.warn("[BatchNorm] Using moving_mean/moving_variance in training.") # Using moving_mean/moving_variance in training, which means we # loaded a pre-trained BN and only fine-tuning the affine part. xn, _, _ = tf.nn.fused_batch_norm( inputs, gamma, beta, mean=moving_mean, variance=moving_var, epsilon=epsilon, data_format=data_format, is_training=False) else: if ndims == 4: xn, _, _ = tf.nn.fused_batch_norm( inputs, gamma, beta, mean=moving_mean, variance=moving_var, epsilon=epsilon, data_format=data_format, is_training=False) else: xn = tf.nn.batch_normalization( inputs, moving_mean, moving_var, beta, gamma, epsilon) # maintain EMA only on one GPU is OK, even in replicated mode. # because training time doesn't use EMA if ctx.is_main_training_tower: add_model_variable(moving_mean) add_model_variable(moving_var) if ctx.is_main_training_tower and use_local_stat: ret = update_bn_ema(xn, batch_mean, batch_var, moving_mean, moving_var, momentum, internal_update) else: ret = tf.identity(xn, name='output') vh = ret.variables = VariableHolder(mean=moving_mean, variance=moving_var) if scale: vh.gamma = gamma if center: vh.beta = beta return ret
['def', 'BatchNorm', '(', 'inputs', ',', 'training', '=', 'None', ',', 'momentum', '=', '0.9', ',', 'epsilon', '=', '1e-5', ',', 'center', '=', 'True', ',', 'scale', '=', 'True', ',', 'gamma_initializer', '=', 'tf', '.', 'ones_initializer', '(', ')', ',', 'data_format', '=', "'channels_last'", ',', 'internal_update', '=', 'False', ')', ':', 'data_format', '=', 'get_data_format', '(', 'data_format', ',', 'keras_mode', '=', 'False', ')', 'shape', '=', 'inputs', '.', 'get_shape', '(', ')', '.', 'as_list', '(', ')', 'ndims', '=', 'len', '(', 'shape', ')', 'assert', 'ndims', 'in', '[', '2', ',', '4', ']', 'if', 'ndims', '==', '2', ':', 'data_format', '=', "'NHWC'", 'if', 'data_format', '==', "'NCHW'", ':', 'n_out', '=', 'shape', '[', '1', ']', 'else', ':', 'n_out', '=', 'shape', '[', '-', '1', ']', '# channel', 'assert', 'n_out', 'is', 'not', 'None', ',', '"Input to BatchNorm cannot have unknown channels!"', 'beta', ',', 'gamma', ',', 'moving_mean', ',', 'moving_var', '=', 'get_bn_variables', '(', 'n_out', ',', 'scale', ',', 'center', ',', 'gamma_initializer', ')', 'ctx', '=', 'get_current_tower_context', '(', ')', 'use_local_stat', '=', 'training', 'if', 'use_local_stat', 'is', 'None', ':', 'use_local_stat', '=', 'ctx', '.', 'is_training', 'use_local_stat', '=', 'bool', '(', 'use_local_stat', ')', 'if', 'use_local_stat', ':', 'if', 'ndims', '==', '2', ':', 'inputs', '=', 'tf', '.', 'reshape', '(', 'inputs', ',', '[', '-', '1', ',', '1', ',', '1', ',', 'n_out', ']', ')', '# fused_bn only takes 4D input', '# fused_bn has error using NCHW? (see #190)', 'xn', ',', 'batch_mean', ',', 'batch_var', '=', 'tf', '.', 'nn', '.', 'fused_batch_norm', '(', 'inputs', ',', 'gamma', ',', 'beta', ',', 'epsilon', '=', 'epsilon', ',', 'is_training', '=', 'True', ',', 'data_format', '=', 'data_format', ')', 'if', 'ndims', '==', '2', ':', 'xn', '=', 'tf', '.', 'squeeze', '(', 'xn', ',', '[', '1', ',', '2', ']', ')', 'else', ':', 'if', 'ctx', '.', 'is_training', ':', 'assert', 'get_tf_version_tuple', '(', ')', '>=', '(', '1', ',', '4', ')', ',', '"Fine tuning a BatchNorm model with fixed statistics is only "', '"supported after https://github.com/tensorflow/tensorflow/pull/12580 "', 'if', 'ctx', '.', 'is_main_training_tower', ':', '# only warn in first tower', 'logger', '.', 'warn', '(', '"[BatchNorm] Using moving_mean/moving_variance in training."', ')', '# Using moving_mean/moving_variance in training, which means we', '# loaded a pre-trained BN and only fine-tuning the affine part.', 'xn', ',', '_', ',', '_', '=', 'tf', '.', 'nn', '.', 'fused_batch_norm', '(', 'inputs', ',', 'gamma', ',', 'beta', ',', 'mean', '=', 'moving_mean', ',', 'variance', '=', 'moving_var', ',', 'epsilon', '=', 'epsilon', ',', 'data_format', '=', 'data_format', ',', 'is_training', '=', 'False', ')', 'else', ':', 'if', 'ndims', '==', '4', ':', 'xn', ',', '_', ',', '_', '=', 'tf', '.', 'nn', '.', 'fused_batch_norm', '(', 'inputs', ',', 'gamma', ',', 'beta', ',', 'mean', '=', 'moving_mean', ',', 'variance', '=', 'moving_var', ',', 'epsilon', '=', 'epsilon', ',', 'data_format', '=', 'data_format', ',', 'is_training', '=', 'False', ')', 'else', ':', 'xn', '=', 'tf', '.', 'nn', '.', 'batch_normalization', '(', 'inputs', ',', 'moving_mean', ',', 'moving_var', ',', 'beta', ',', 'gamma', ',', 'epsilon', ')', '# maintain EMA only on one GPU is OK, even in replicated mode.', "# because training time doesn't use EMA", 'if', 'ctx', '.', 'is_main_training_tower', ':', 'add_model_variable', '(', 'moving_mean', ')', 'add_model_variable', '(', 'moving_var', ')', 'if', 'ctx', '.', 'is_main_training_tower', 'and', 'use_local_stat', ':', 'ret', '=', 'update_bn_ema', '(', 'xn', ',', 'batch_mean', ',', 'batch_var', ',', 'moving_mean', ',', 'moving_var', ',', 'momentum', ',', 'internal_update', ')', 'else', ':', 'ret', '=', 'tf', '.', 'identity', '(', 'xn', ',', 'name', '=', "'output'", ')', 'vh', '=', 'ret', '.', 'variables', '=', 'VariableHolder', '(', 'mean', '=', 'moving_mean', ',', 'variance', '=', 'moving_var', ')', 'if', 'scale', ':', 'vh', '.', 'gamma', '=', 'gamma', 'if', 'center', ':', 'vh', '.', 'beta', '=', 'beta', 'return', 'ret']
Mostly equivalent to `tf.layers.batch_normalization`, but difference in the following: 1. Accepts `data_format` rather than `axis`. For 2D input, this argument will be ignored. 2. Default value for `momentum` and `epsilon` is different. 3. Default value for `training` is automatically obtained from `TowerContext`. 4. Support the `internal_update` option. Args: internal_update (bool): if False, add EMA update ops to `tf.GraphKeys.UPDATE_OPS`. If True, update EMA inside the layer by control dependencies. Variable Names: * ``beta``: the bias term. Will be zero-inited by default. * ``gamma``: the scale term. Will be one-inited by default. Input will be transformed by ``x * gamma + beta``. * ``mean/EMA``: the moving average of mean. * ``variance/EMA``: the moving average of variance. Note: 1. About multi-GPU training: moving averages across GPUs are not aggregated. Batch statistics are computed independently. This is consistent with most frameworks. 2. Combinations of ``training`` and ``ctx.is_training``: * ``training == ctx.is_training``: standard BN, EMA are maintained during training and used during inference. This is the default. * ``training and not ctx.is_training``: still use batch statistics in inference. * ``not training and ctx.is_training``: use EMA to normalize in training. This is useful when you load a pre-trained BN and don't want to fine tune the EMA. EMA will not be updated in this case.
['Mostly', 'equivalent', 'to', 'tf', '.', 'layers', '.', 'batch_normalization', 'but', 'difference', 'in', 'the', 'following', ':', '1', '.', 'Accepts', 'data_format', 'rather', 'than', 'axis', '.', 'For', '2D', 'input', 'this', 'argument', 'will', 'be', 'ignored', '.', '2', '.', 'Default', 'value', 'for', 'momentum', 'and', 'epsilon', 'is', 'different', '.', '3', '.', 'Default', 'value', 'for', 'training', 'is', 'automatically', 'obtained', 'from', 'TowerContext', '.', '4', '.', 'Support', 'the', 'internal_update', 'option', '.', 'Args', ':', 'internal_update', '(', 'bool', ')', ':', 'if', 'False', 'add', 'EMA', 'update', 'ops', 'to', 'tf', '.', 'GraphKeys', '.', 'UPDATE_OPS', '.', 'If', 'True', 'update', 'EMA', 'inside', 'the', 'layer', 'by', 'control', 'dependencies', '.', 'Variable', 'Names', ':', '*', 'beta', ':', 'the', 'bias', 'term', '.', 'Will', 'be', 'zero', '-', 'inited', 'by', 'default', '.', '*', 'gamma', ':', 'the', 'scale', 'term', '.', 'Will', 'be', 'one', '-', 'inited', 'by', 'default', '.', 'Input', 'will', 'be', 'transformed', 'by', 'x', '*', 'gamma', '+', 'beta', '.', '*', 'mean', '/', 'EMA', ':', 'the', 'moving', 'average', 'of', 'mean', '.', '*', 'variance', '/', 'EMA', ':', 'the', 'moving', 'average', 'of', 'variance', '.', 'Note', ':', '1', '.', 'About', 'multi', '-', 'GPU', 'training', ':', 'moving', 'averages', 'across', 'GPUs', 'are', 'not', 'aggregated', '.', 'Batch', 'statistics', 'are', 'computed', 'independently', '.', 'This', 'is', 'consistent', 'with', 'most', 'frameworks', '.', '2', '.', 'Combinations', 'of', 'training', 'and', 'ctx', '.', 'is_training', ':', '*', 'training', '==', 'ctx', '.', 'is_training', ':', 'standard', 'BN', 'EMA', 'are', 'maintained', 'during', 'training', 'and', 'used', 'during', 'inference', '.', 'This', 'is', 'the', 'default', '.', '*', 'training', 'and', 'not', 'ctx', '.', 'is_training', ':', 'still', 'use', 'batch', 'statistics', 'in', 'inference', '.', '*', 'not', 'training', 'and', 'ctx', '.', 'is_training', ':', 'use', 'EMA', 'to', 'normalize', 'in', 'training', '.', 'This', 'is', 'useful', 'when', 'you', 'load', 'a', 'pre', '-', 'trained', 'BN', 'and', 'don', 't', 'want', 'to', 'fine', 'tune', 'the', 'EMA', '.', 'EMA', 'will', 'not', 'be', 'updated', 'in', 'this', 'case', '.']
train
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/models/_old_batch_norm.py#L67-L169
1,371
NORDUnet/python-norduniclient
norduniclient/core.py
create_location_relationship
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type): """ Makes relationship between the two nodes and returns the relationship. If a relationship is not possible NoRelationshipPossible exception is raised. """ other_meta_type = get_node_meta_type(manager, other_handle_id) if other_meta_type == 'Location' and rel_type == 'Has': return _create_relationship(manager, location_handle_id, other_handle_id, rel_type) raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
python
def create_location_relationship(manager, location_handle_id, other_handle_id, rel_type): """ Makes relationship between the two nodes and returns the relationship. If a relationship is not possible NoRelationshipPossible exception is raised. """ other_meta_type = get_node_meta_type(manager, other_handle_id) if other_meta_type == 'Location' and rel_type == 'Has': return _create_relationship(manager, location_handle_id, other_handle_id, rel_type) raise exceptions.NoRelationshipPossible(location_handle_id, 'Location', other_handle_id, other_meta_type, rel_type)
['def', 'create_location_relationship', '(', 'manager', ',', 'location_handle_id', ',', 'other_handle_id', ',', 'rel_type', ')', ':', 'other_meta_type', '=', 'get_node_meta_type', '(', 'manager', ',', 'other_handle_id', ')', 'if', 'other_meta_type', '==', "'Location'", 'and', 'rel_type', '==', "'Has'", ':', 'return', '_create_relationship', '(', 'manager', ',', 'location_handle_id', ',', 'other_handle_id', ',', 'rel_type', ')', 'raise', 'exceptions', '.', 'NoRelationshipPossible', '(', 'location_handle_id', ',', "'Location'", ',', 'other_handle_id', ',', 'other_meta_type', ',', 'rel_type', ')']
Makes relationship between the two nodes and returns the relationship. If a relationship is not possible NoRelationshipPossible exception is raised.
['Makes', 'relationship', 'between', 'the', 'two', 'nodes', 'and', 'returns', 'the', 'relationship', '.', 'If', 'a', 'relationship', 'is', 'not', 'possible', 'NoRelationshipPossible', 'exception', 'is', 'raised', '.']
train
https://github.com/NORDUnet/python-norduniclient/blob/ee5084a6f45caac614b4fda4a023749ca52f786c/norduniclient/core.py#L594-L603
1,372
talkincode/toughlib
toughlib/btforms/net.py
validip
def validip(ip, defaultaddr="0.0.0.0", defaultport=8080): """Returns `(ip_address, port)` from string `ip_addr_port`""" addr = defaultaddr port = defaultport ip = ip.split(":", 1) if len(ip) == 1: if not ip[0]: pass elif validipaddr(ip[0]): addr = ip[0] elif validipport(ip[0]): port = int(ip[0]) else: raise ValueError, ':'.join(ip) + ' is not a valid IP address/port' elif len(ip) == 2: addr, port = ip if not validipaddr(addr) and validipport(port): raise ValueError, ':'.join(ip) + ' is not a valid IP address/port' port = int(port) else: raise ValueError, ':'.join(ip) + ' is not a valid IP address/port' return (addr, port)
python
def validip(ip, defaultaddr="0.0.0.0", defaultport=8080): """Returns `(ip_address, port)` from string `ip_addr_port`""" addr = defaultaddr port = defaultport ip = ip.split(":", 1) if len(ip) == 1: if not ip[0]: pass elif validipaddr(ip[0]): addr = ip[0] elif validipport(ip[0]): port = int(ip[0]) else: raise ValueError, ':'.join(ip) + ' is not a valid IP address/port' elif len(ip) == 2: addr, port = ip if not validipaddr(addr) and validipport(port): raise ValueError, ':'.join(ip) + ' is not a valid IP address/port' port = int(port) else: raise ValueError, ':'.join(ip) + ' is not a valid IP address/port' return (addr, port)
['def', 'validip', '(', 'ip', ',', 'defaultaddr', '=', '"0.0.0.0"', ',', 'defaultport', '=', '8080', ')', ':', 'addr', '=', 'defaultaddr', 'port', '=', 'defaultport', 'ip', '=', 'ip', '.', 'split', '(', '":"', ',', '1', ')', 'if', 'len', '(', 'ip', ')', '==', '1', ':', 'if', 'not', 'ip', '[', '0', ']', ':', 'pass', 'elif', 'validipaddr', '(', 'ip', '[', '0', ']', ')', ':', 'addr', '=', 'ip', '[', '0', ']', 'elif', 'validipport', '(', 'ip', '[', '0', ']', ')', ':', 'port', '=', 'int', '(', 'ip', '[', '0', ']', ')', 'else', ':', 'raise', 'ValueError', ',', "':'", '.', 'join', '(', 'ip', ')', '+', "' is not a valid IP address/port'", 'elif', 'len', '(', 'ip', ')', '==', '2', ':', 'addr', ',', 'port', '=', 'ip', 'if', 'not', 'validipaddr', '(', 'addr', ')', 'and', 'validipport', '(', 'port', ')', ':', 'raise', 'ValueError', ',', "':'", '.', 'join', '(', 'ip', ')', '+', "' is not a valid IP address/port'", 'port', '=', 'int', '(', 'port', ')', 'else', ':', 'raise', 'ValueError', ',', "':'", '.', 'join', '(', 'ip', ')', '+', "' is not a valid IP address/port'", 'return', '(', 'addr', ',', 'port', ')']
Returns `(ip_address, port)` from string `ip_addr_port`
['Returns', '(', 'ip_address', 'port', ')', 'from', 'string', 'ip_addr_port']
train
https://github.com/talkincode/toughlib/blob/1c2f7dde3a7f101248f1b5f5d428cc85466995cf/toughlib/btforms/net.py#L54-L76
1,373
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/update/update.py
UpdateAPI.get_firmware_manifest
def get_firmware_manifest(self, manifest_id): """Get manifest with provided manifest_id. :param str manifest_id: ID of manifest to retrieve (Required) :return: FirmwareManifest """ api = self._get_api(update_service.DefaultApi) return FirmwareManifest(api.firmware_manifest_retrieve(manifest_id=manifest_id))
python
def get_firmware_manifest(self, manifest_id): """Get manifest with provided manifest_id. :param str manifest_id: ID of manifest to retrieve (Required) :return: FirmwareManifest """ api = self._get_api(update_service.DefaultApi) return FirmwareManifest(api.firmware_manifest_retrieve(manifest_id=manifest_id))
['def', 'get_firmware_manifest', '(', 'self', ',', 'manifest_id', ')', ':', 'api', '=', 'self', '.', '_get_api', '(', 'update_service', '.', 'DefaultApi', ')', 'return', 'FirmwareManifest', '(', 'api', '.', 'firmware_manifest_retrieve', '(', 'manifest_id', '=', 'manifest_id', ')', ')']
Get manifest with provided manifest_id. :param str manifest_id: ID of manifest to retrieve (Required) :return: FirmwareManifest
['Get', 'manifest', 'with', 'provided', 'manifest_id', '.']
train
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/update/update.py#L248-L255
1,374
locationlabs/mockredis
mockredis/script.py
Script._python_to_lua
def _python_to_lua(pval): """ Convert Python object(s) into Lua object(s), as at times Python object(s) are not compatible with Lua functions """ import lua if pval is None: # Python None --> Lua None return lua.eval("") if isinstance(pval, (list, tuple, set)): # Python list --> Lua table # e.g.: in lrange # in Python returns: [v1, v2, v3] # in Lua returns: {v1, v2, v3} lua_list = lua.eval("{}") lua_table = lua.eval("table") for item in pval: lua_table.insert(lua_list, Script._python_to_lua(item)) return lua_list elif isinstance(pval, dict): # Python dict --> Lua dict # e.g.: in hgetall # in Python returns: {k1:v1, k2:v2, k3:v3} # in Lua returns: {k1, v1, k2, v2, k3, v3} lua_dict = lua.eval("{}") lua_table = lua.eval("table") for k, v in pval.iteritems(): lua_table.insert(lua_dict, Script._python_to_lua(k)) lua_table.insert(lua_dict, Script._python_to_lua(v)) return lua_dict elif isinstance(pval, str): # Python string --> Lua userdata return pval elif isinstance(pval, bool): # Python bool--> Lua boolean return lua.eval(str(pval).lower()) elif isinstance(pval, (int, long, float)): # Python int --> Lua number lua_globals = lua.globals() return lua_globals.tonumber(str(pval)) raise RuntimeError("Invalid Python type: " + str(type(pval)))
python
def _python_to_lua(pval): """ Convert Python object(s) into Lua object(s), as at times Python object(s) are not compatible with Lua functions """ import lua if pval is None: # Python None --> Lua None return lua.eval("") if isinstance(pval, (list, tuple, set)): # Python list --> Lua table # e.g.: in lrange # in Python returns: [v1, v2, v3] # in Lua returns: {v1, v2, v3} lua_list = lua.eval("{}") lua_table = lua.eval("table") for item in pval: lua_table.insert(lua_list, Script._python_to_lua(item)) return lua_list elif isinstance(pval, dict): # Python dict --> Lua dict # e.g.: in hgetall # in Python returns: {k1:v1, k2:v2, k3:v3} # in Lua returns: {k1, v1, k2, v2, k3, v3} lua_dict = lua.eval("{}") lua_table = lua.eval("table") for k, v in pval.iteritems(): lua_table.insert(lua_dict, Script._python_to_lua(k)) lua_table.insert(lua_dict, Script._python_to_lua(v)) return lua_dict elif isinstance(pval, str): # Python string --> Lua userdata return pval elif isinstance(pval, bool): # Python bool--> Lua boolean return lua.eval(str(pval).lower()) elif isinstance(pval, (int, long, float)): # Python int --> Lua number lua_globals = lua.globals() return lua_globals.tonumber(str(pval)) raise RuntimeError("Invalid Python type: " + str(type(pval)))
['def', '_python_to_lua', '(', 'pval', ')', ':', 'import', 'lua', 'if', 'pval', 'is', 'None', ':', '# Python None --> Lua None', 'return', 'lua', '.', 'eval', '(', '""', ')', 'if', 'isinstance', '(', 'pval', ',', '(', 'list', ',', 'tuple', ',', 'set', ')', ')', ':', '# Python list --> Lua table', '# e.g.: in lrange', '# in Python returns: [v1, v2, v3]', '# in Lua returns: {v1, v2, v3}', 'lua_list', '=', 'lua', '.', 'eval', '(', '"{}"', ')', 'lua_table', '=', 'lua', '.', 'eval', '(', '"table"', ')', 'for', 'item', 'in', 'pval', ':', 'lua_table', '.', 'insert', '(', 'lua_list', ',', 'Script', '.', '_python_to_lua', '(', 'item', ')', ')', 'return', 'lua_list', 'elif', 'isinstance', '(', 'pval', ',', 'dict', ')', ':', '# Python dict --> Lua dict', '# e.g.: in hgetall', '# in Python returns: {k1:v1, k2:v2, k3:v3}', '# in Lua returns: {k1, v1, k2, v2, k3, v3}', 'lua_dict', '=', 'lua', '.', 'eval', '(', '"{}"', ')', 'lua_table', '=', 'lua', '.', 'eval', '(', '"table"', ')', 'for', 'k', ',', 'v', 'in', 'pval', '.', 'iteritems', '(', ')', ':', 'lua_table', '.', 'insert', '(', 'lua_dict', ',', 'Script', '.', '_python_to_lua', '(', 'k', ')', ')', 'lua_table', '.', 'insert', '(', 'lua_dict', ',', 'Script', '.', '_python_to_lua', '(', 'v', ')', ')', 'return', 'lua_dict', 'elif', 'isinstance', '(', 'pval', ',', 'str', ')', ':', '# Python string --> Lua userdata', 'return', 'pval', 'elif', 'isinstance', '(', 'pval', ',', 'bool', ')', ':', '# Python bool--> Lua boolean', 'return', 'lua', '.', 'eval', '(', 'str', '(', 'pval', ')', '.', 'lower', '(', ')', ')', 'elif', 'isinstance', '(', 'pval', ',', '(', 'int', ',', 'long', ',', 'float', ')', ')', ':', '# Python int --> Lua number', 'lua_globals', '=', 'lua', '.', 'globals', '(', ')', 'return', 'lua_globals', '.', 'tonumber', '(', 'str', '(', 'pval', ')', ')', 'raise', 'RuntimeError', '(', '"Invalid Python type: "', '+', 'str', '(', 'type', '(', 'pval', ')', ')', ')']
Convert Python object(s) into Lua object(s), as at times Python object(s) are not compatible with Lua functions
['Convert', 'Python', 'object', '(', 's', ')', 'into', 'Lua', 'object', '(', 's', ')', 'as', 'at', 'times', 'Python', 'object', '(', 's', ')', 'are', 'not', 'compatible', 'with', 'Lua', 'functions']
train
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/script.py#L138-L179
1,375
secdev/scapy
scapy/modules/krack/automaton.py
KrackAP.build_GTK_KDE
def build_GTK_KDE(self): """Build the Key Data Encapsulation for GTK KeyID: 0 Ref: 802.11i p81 """ return b''.join([ b'\xdd', # Type KDE chb(len(self.gtk_full) + 6), b'\x00\x0f\xac', # OUI b'\x01', # GTK KDE b'\x00\x00', # KeyID - Tx - Reserved x2 self.gtk_full, ])
python
def build_GTK_KDE(self): """Build the Key Data Encapsulation for GTK KeyID: 0 Ref: 802.11i p81 """ return b''.join([ b'\xdd', # Type KDE chb(len(self.gtk_full) + 6), b'\x00\x0f\xac', # OUI b'\x01', # GTK KDE b'\x00\x00', # KeyID - Tx - Reserved x2 self.gtk_full, ])
['def', 'build_GTK_KDE', '(', 'self', ')', ':', 'return', "b''", '.', 'join', '(', '[', "b'\\xdd'", ',', '# Type KDE', 'chb', '(', 'len', '(', 'self', '.', 'gtk_full', ')', '+', '6', ')', ',', "b'\\x00\\x0f\\xac'", ',', '# OUI', "b'\\x01'", ',', '# GTK KDE', "b'\\x00\\x00'", ',', '# KeyID - Tx - Reserved x2', 'self', '.', 'gtk_full', ',', ']', ')']
Build the Key Data Encapsulation for GTK KeyID: 0 Ref: 802.11i p81
['Build', 'the', 'Key', 'Data', 'Encapsulation', 'for', 'GTK', 'KeyID', ':', '0', 'Ref', ':', '802', '.', '11i', 'p81']
train
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/modules/krack/automaton.py#L301-L313
1,376
hsolbrig/PyShEx
pyshex/shape_expressions_language/p5_context.py
Context._visit_shape_te
def _visit_shape_te(self, te: ShExJ.tripleExpr, visit_center: _VisitorCenter) -> None: """ Visit a triple expression that was reached through a shape. This, in turn, is used to visit additional shapes that are referenced by a TripleConstraint :param te: Triple expression reached through a Shape.expression :param visit_center: context used in shape visitor """ if isinstance(te, ShExJ.TripleConstraint) and te.valueExpr is not None: visit_center.f(visit_center.arg_cntxt, te.valueExpr, self)
python
def _visit_shape_te(self, te: ShExJ.tripleExpr, visit_center: _VisitorCenter) -> None: """ Visit a triple expression that was reached through a shape. This, in turn, is used to visit additional shapes that are referenced by a TripleConstraint :param te: Triple expression reached through a Shape.expression :param visit_center: context used in shape visitor """ if isinstance(te, ShExJ.TripleConstraint) and te.valueExpr is not None: visit_center.f(visit_center.arg_cntxt, te.valueExpr, self)
['def', '_visit_shape_te', '(', 'self', ',', 'te', ':', 'ShExJ', '.', 'tripleExpr', ',', 'visit_center', ':', '_VisitorCenter', ')', '->', 'None', ':', 'if', 'isinstance', '(', 'te', ',', 'ShExJ', '.', 'TripleConstraint', ')', 'and', 'te', '.', 'valueExpr', 'is', 'not', 'None', ':', 'visit_center', '.', 'f', '(', 'visit_center', '.', 'arg_cntxt', ',', 'te', '.', 'valueExpr', ',', 'self', ')']
Visit a triple expression that was reached through a shape. This, in turn, is used to visit additional shapes that are referenced by a TripleConstraint :param te: Triple expression reached through a Shape.expression :param visit_center: context used in shape visitor
['Visit', 'a', 'triple', 'expression', 'that', 'was', 'reached', 'through', 'a', 'shape', '.', 'This', 'in', 'turn', 'is', 'used', 'to', 'visit', 'additional', 'shapes', 'that', 'are', 'referenced', 'by', 'a', 'TripleConstraint', ':', 'param', 'te', ':', 'Triple', 'expression', 'reached', 'through', 'a', 'Shape', '.', 'expression', ':', 'param', 'visit_center', ':', 'context', 'used', 'in', 'shape', 'visitor']
train
https://github.com/hsolbrig/PyShEx/blob/9d659cc36e808afd66d4a6d60e8ea21cb12eb744/pyshex/shape_expressions_language/p5_context.py#L335-L343
1,377
merll/docker-map
dockermap/build/context.py
get_exclusions
def get_exclusions(path): """ Generates exclusion patterns from a ``.dockerignore`` file located in the given path. Returns ``None`` if the file does not exist. :param path: Path to look up the ``.dockerignore`` in. :type path: unicode | str :return: List of patterns, that can be passed into :func:`get_filter_func`. :rtype: list[(__RegEx, bool)] """ if not os.path.isdir(path): return None dockerignore_file = os.path.join(path, '.dockerignore') if not os.path.isfile(dockerignore_file): return None with open(dockerignore_file, 'rb') as dif: return list(preprocess_matches(dif.readlines()))
python
def get_exclusions(path): """ Generates exclusion patterns from a ``.dockerignore`` file located in the given path. Returns ``None`` if the file does not exist. :param path: Path to look up the ``.dockerignore`` in. :type path: unicode | str :return: List of patterns, that can be passed into :func:`get_filter_func`. :rtype: list[(__RegEx, bool)] """ if not os.path.isdir(path): return None dockerignore_file = os.path.join(path, '.dockerignore') if not os.path.isfile(dockerignore_file): return None with open(dockerignore_file, 'rb') as dif: return list(preprocess_matches(dif.readlines()))
['def', 'get_exclusions', '(', 'path', ')', ':', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'path', ')', ':', 'return', 'None', 'dockerignore_file', '=', 'os', '.', 'path', '.', 'join', '(', 'path', ',', "'.dockerignore'", ')', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'dockerignore_file', ')', ':', 'return', 'None', 'with', 'open', '(', 'dockerignore_file', ',', "'rb'", ')', 'as', 'dif', ':', 'return', 'list', '(', 'preprocess_matches', '(', 'dif', '.', 'readlines', '(', ')', ')', ')']
Generates exclusion patterns from a ``.dockerignore`` file located in the given path. Returns ``None`` if the file does not exist. :param path: Path to look up the ``.dockerignore`` in. :type path: unicode | str :return: List of patterns, that can be passed into :func:`get_filter_func`. :rtype: list[(__RegEx, bool)]
['Generates', 'exclusion', 'patterns', 'from', 'a', '.', 'dockerignore', 'file', 'located', 'in', 'the', 'given', 'path', '.', 'Returns', 'None', 'if', 'the', 'file', 'does', 'not', 'exist', '.']
train
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/build/context.py#L45-L61
1,378
productml/blurr
blurr/core/field_complex.py
Map.set
def set(self, key: Any, value: Any) -> None: """ Sets the value of a key to a supplied value """ if key is not None: self[key] = value
python
def set(self, key: Any, value: Any) -> None: """ Sets the value of a key to a supplied value """ if key is not None: self[key] = value
['def', 'set', '(', 'self', ',', 'key', ':', 'Any', ',', 'value', ':', 'Any', ')', '->', 'None', ':', 'if', 'key', 'is', 'not', 'None', ':', 'self', '[', 'key', ']', '=', 'value']
Sets the value of a key to a supplied value
['Sets', 'the', 'value', 'of', 'a', 'key', 'to', 'a', 'supplied', 'value']
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/core/field_complex.py#L11-L14
1,379
Robpol86/terminaltables
terminaltables/base_table.py
BaseTable.gen_row_lines
def gen_row_lines(self, row, style, inner_widths, height): r"""Combine cells in row and group them into lines with vertical borders. Caller is expected to pass yielded lines to ''.join() to combine them into a printable line. Caller must append newline character to the end of joined line. In: ['Row One Column One', 'Two', 'Three'] Out: [ ('|', ' Row One Column One ', '|', ' Two ', '|', ' Three ', '|'), ] In: ['Row One\nColumn One', 'Two', 'Three'], Out: [ ('|', ' Row One ', '|', ' Two ', '|', ' Three ', '|'), ('|', ' Column One ', '|', ' ', '|', ' ', '|'), ] :param iter row: One row in the table. List of cells. :param str style: Type of border characters to use. :param iter inner_widths: List of widths (no padding) for each column. :param int height: Inner height (no padding) (number of lines) to expand row to. :return: Yields lines split into components in a list. Caller must ''.join() line. """ cells_in_row = list() # Resize row if it doesn't have enough cells. if len(row) != len(inner_widths): row = row + [''] * (len(inner_widths) - len(row)) # Pad and align each cell. Split each cell into lines to support multi-line cells. for i, cell in enumerate(row): align = (self.justify_columns.get(i),) inner_dimensions = (inner_widths[i], height) padding = (self.padding_left, self.padding_right, 0, 0) cells_in_row.append(align_and_pad_cell(cell, align, inner_dimensions, padding)) # Determine border characters. if style == 'heading': left = self.CHAR_H_OUTER_LEFT_VERTICAL if self.outer_border else '' center = self.CHAR_H_INNER_VERTICAL if self.inner_column_border else '' right = self.CHAR_H_OUTER_RIGHT_VERTICAL if self.outer_border else '' elif style == 'footing': left = self.CHAR_F_OUTER_LEFT_VERTICAL if self.outer_border else '' center = self.CHAR_F_INNER_VERTICAL if self.inner_column_border else '' right = self.CHAR_F_OUTER_RIGHT_VERTICAL if self.outer_border else '' else: left = self.CHAR_OUTER_LEFT_VERTICAL if self.outer_border else '' center = self.CHAR_INNER_VERTICAL if self.inner_column_border else '' right = self.CHAR_OUTER_RIGHT_VERTICAL if self.outer_border else '' # Yield each line. for line in build_row(cells_in_row, left, center, right): yield line
python
def gen_row_lines(self, row, style, inner_widths, height): r"""Combine cells in row and group them into lines with vertical borders. Caller is expected to pass yielded lines to ''.join() to combine them into a printable line. Caller must append newline character to the end of joined line. In: ['Row One Column One', 'Two', 'Three'] Out: [ ('|', ' Row One Column One ', '|', ' Two ', '|', ' Three ', '|'), ] In: ['Row One\nColumn One', 'Two', 'Three'], Out: [ ('|', ' Row One ', '|', ' Two ', '|', ' Three ', '|'), ('|', ' Column One ', '|', ' ', '|', ' ', '|'), ] :param iter row: One row in the table. List of cells. :param str style: Type of border characters to use. :param iter inner_widths: List of widths (no padding) for each column. :param int height: Inner height (no padding) (number of lines) to expand row to. :return: Yields lines split into components in a list. Caller must ''.join() line. """ cells_in_row = list() # Resize row if it doesn't have enough cells. if len(row) != len(inner_widths): row = row + [''] * (len(inner_widths) - len(row)) # Pad and align each cell. Split each cell into lines to support multi-line cells. for i, cell in enumerate(row): align = (self.justify_columns.get(i),) inner_dimensions = (inner_widths[i], height) padding = (self.padding_left, self.padding_right, 0, 0) cells_in_row.append(align_and_pad_cell(cell, align, inner_dimensions, padding)) # Determine border characters. if style == 'heading': left = self.CHAR_H_OUTER_LEFT_VERTICAL if self.outer_border else '' center = self.CHAR_H_INNER_VERTICAL if self.inner_column_border else '' right = self.CHAR_H_OUTER_RIGHT_VERTICAL if self.outer_border else '' elif style == 'footing': left = self.CHAR_F_OUTER_LEFT_VERTICAL if self.outer_border else '' center = self.CHAR_F_INNER_VERTICAL if self.inner_column_border else '' right = self.CHAR_F_OUTER_RIGHT_VERTICAL if self.outer_border else '' else: left = self.CHAR_OUTER_LEFT_VERTICAL if self.outer_border else '' center = self.CHAR_INNER_VERTICAL if self.inner_column_border else '' right = self.CHAR_OUTER_RIGHT_VERTICAL if self.outer_border else '' # Yield each line. for line in build_row(cells_in_row, left, center, right): yield line
['def', 'gen_row_lines', '(', 'self', ',', 'row', ',', 'style', ',', 'inner_widths', ',', 'height', ')', ':', 'cells_in_row', '=', 'list', '(', ')', "# Resize row if it doesn't have enough cells.", 'if', 'len', '(', 'row', ')', '!=', 'len', '(', 'inner_widths', ')', ':', 'row', '=', 'row', '+', '[', "''", ']', '*', '(', 'len', '(', 'inner_widths', ')', '-', 'len', '(', 'row', ')', ')', '# Pad and align each cell. Split each cell into lines to support multi-line cells.', 'for', 'i', ',', 'cell', 'in', 'enumerate', '(', 'row', ')', ':', 'align', '=', '(', 'self', '.', 'justify_columns', '.', 'get', '(', 'i', ')', ',', ')', 'inner_dimensions', '=', '(', 'inner_widths', '[', 'i', ']', ',', 'height', ')', 'padding', '=', '(', 'self', '.', 'padding_left', ',', 'self', '.', 'padding_right', ',', '0', ',', '0', ')', 'cells_in_row', '.', 'append', '(', 'align_and_pad_cell', '(', 'cell', ',', 'align', ',', 'inner_dimensions', ',', 'padding', ')', ')', '# Determine border characters.', 'if', 'style', '==', "'heading'", ':', 'left', '=', 'self', '.', 'CHAR_H_OUTER_LEFT_VERTICAL', 'if', 'self', '.', 'outer_border', 'else', "''", 'center', '=', 'self', '.', 'CHAR_H_INNER_VERTICAL', 'if', 'self', '.', 'inner_column_border', 'else', "''", 'right', '=', 'self', '.', 'CHAR_H_OUTER_RIGHT_VERTICAL', 'if', 'self', '.', 'outer_border', 'else', "''", 'elif', 'style', '==', "'footing'", ':', 'left', '=', 'self', '.', 'CHAR_F_OUTER_LEFT_VERTICAL', 'if', 'self', '.', 'outer_border', 'else', "''", 'center', '=', 'self', '.', 'CHAR_F_INNER_VERTICAL', 'if', 'self', '.', 'inner_column_border', 'else', "''", 'right', '=', 'self', '.', 'CHAR_F_OUTER_RIGHT_VERTICAL', 'if', 'self', '.', 'outer_border', 'else', "''", 'else', ':', 'left', '=', 'self', '.', 'CHAR_OUTER_LEFT_VERTICAL', 'if', 'self', '.', 'outer_border', 'else', "''", 'center', '=', 'self', '.', 'CHAR_INNER_VERTICAL', 'if', 'self', '.', 'inner_column_border', 'else', "''", 'right', '=', 'self', '.', 'CHAR_OUTER_RIGHT_VERTICAL', 'if', 'self', '.', 'outer_border', 'else', "''", '# Yield each line.', 'for', 'line', 'in', 'build_row', '(', 'cells_in_row', ',', 'left', ',', 'center', ',', 'right', ')', ':', 'yield', 'line']
r"""Combine cells in row and group them into lines with vertical borders. Caller is expected to pass yielded lines to ''.join() to combine them into a printable line. Caller must append newline character to the end of joined line. In: ['Row One Column One', 'Two', 'Three'] Out: [ ('|', ' Row One Column One ', '|', ' Two ', '|', ' Three ', '|'), ] In: ['Row One\nColumn One', 'Two', 'Three'], Out: [ ('|', ' Row One ', '|', ' Two ', '|', ' Three ', '|'), ('|', ' Column One ', '|', ' ', '|', ' ', '|'), ] :param iter row: One row in the table. List of cells. :param str style: Type of border characters to use. :param iter inner_widths: List of widths (no padding) for each column. :param int height: Inner height (no padding) (number of lines) to expand row to. :return: Yields lines split into components in a list. Caller must ''.join() line.
['r', 'Combine', 'cells', 'in', 'row', 'and', 'group', 'them', 'into', 'lines', 'with', 'vertical', 'borders', '.']
train
https://github.com/Robpol86/terminaltables/blob/ad8f46e50afdbaea377fc1f713bc0e7a31c4fccc/terminaltables/base_table.py#L112-L169
1,380
pypa/pipenv
pipenv/vendor/distlib/locators.py
SimpleScrapingLocator._process_download
def _process_download(self, url): """ See if an URL is a suitable download for a project. If it is, register information in the result dictionary (for _get_project) about the specific version it's for. Note that the return value isn't actually used other than as a boolean value. """ if self.platform_check and self._is_platform_dependent(url): info = None else: info = self.convert_url_to_download_info(url, self.project_name) logger.debug('process_download: %s -> %s', url, info) if info: with self._lock: # needed because self.result is shared self._update_version_data(self.result, info) return info
python
def _process_download(self, url): """ See if an URL is a suitable download for a project. If it is, register information in the result dictionary (for _get_project) about the specific version it's for. Note that the return value isn't actually used other than as a boolean value. """ if self.platform_check and self._is_platform_dependent(url): info = None else: info = self.convert_url_to_download_info(url, self.project_name) logger.debug('process_download: %s -> %s', url, info) if info: with self._lock: # needed because self.result is shared self._update_version_data(self.result, info) return info
['def', '_process_download', '(', 'self', ',', 'url', ')', ':', 'if', 'self', '.', 'platform_check', 'and', 'self', '.', '_is_platform_dependent', '(', 'url', ')', ':', 'info', '=', 'None', 'else', ':', 'info', '=', 'self', '.', 'convert_url_to_download_info', '(', 'url', ',', 'self', '.', 'project_name', ')', 'logger', '.', 'debug', '(', "'process_download: %s -> %s'", ',', 'url', ',', 'info', ')', 'if', 'info', ':', 'with', 'self', '.', '_lock', ':', '# needed because self.result is shared', 'self', '.', '_update_version_data', '(', 'self', '.', 'result', ',', 'info', ')', 'return', 'info']
See if an URL is a suitable download for a project. If it is, register information in the result dictionary (for _get_project) about the specific version it's for. Note that the return value isn't actually used other than as a boolean value.
['See', 'if', 'an', 'URL', 'is', 'a', 'suitable', 'download', 'for', 'a', 'project', '.']
train
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/locators.py#L673-L691
1,381
ansible/ansible-container
container/openshift/deploy.py
Deploy.get_route_templates
def get_route_templates(self): """ Generate Openshift route templates or playbook tasks. Each port on a service definition found in container.yml represents an externally exposed port. """ def _get_published_ports(service_config): result = [] for port in service_config.get('ports', []): protocol = 'TCP' if isinstance(port, string_types) and '/' in port: port, protocol = port.split('/') if isinstance(port, string_types) and ':' in port: host, container = port.split(':') else: host = port result.append({'port': host, 'protocol': protocol.lower()}) return result templates = [] for name, service_config in self._services.items(): state = service_config.get(self.CONFIG_KEY, {}).get('state', 'present') force = service_config.get(self.CONFIG_KEY, {}).get('force', False) published_ports = _get_published_ports(service_config) if state != 'present': continue for port in published_ports: route_name = "%s-%s" % (name, port['port']) labels = dict( app=self._namespace_name, service=name ) template = CommentedMap() template['apiVersion'] = self.DEFAULT_API_VERSION template['kind'] = 'Route' template['force'] = force template['metadata'] = CommentedMap([ ('name', route_name), ('namespace', self._namespace_name), ('labels', labels.copy()) ]) template['spec'] = CommentedMap([ ('to', CommentedMap([ ('kind', 'Service'), ('name', name) ])), ('port', CommentedMap([ ('targetPort', 'port-{}-{}'.format(port['port'], port['protocol'])) ])) ]) if service_config.get(self.CONFIG_KEY, {}).get('routes'): for route in service_config[self.CONFIG_KEY]['routes']: if str(route.get('port')) == str(port['port']): for key, value in route.items(): if key not in ('force', 'port'): self.copy_attribute(template['spec'], key, value) templates.append(template) return templates
python
def get_route_templates(self): """ Generate Openshift route templates or playbook tasks. Each port on a service definition found in container.yml represents an externally exposed port. """ def _get_published_ports(service_config): result = [] for port in service_config.get('ports', []): protocol = 'TCP' if isinstance(port, string_types) and '/' in port: port, protocol = port.split('/') if isinstance(port, string_types) and ':' in port: host, container = port.split(':') else: host = port result.append({'port': host, 'protocol': protocol.lower()}) return result templates = [] for name, service_config in self._services.items(): state = service_config.get(self.CONFIG_KEY, {}).get('state', 'present') force = service_config.get(self.CONFIG_KEY, {}).get('force', False) published_ports = _get_published_ports(service_config) if state != 'present': continue for port in published_ports: route_name = "%s-%s" % (name, port['port']) labels = dict( app=self._namespace_name, service=name ) template = CommentedMap() template['apiVersion'] = self.DEFAULT_API_VERSION template['kind'] = 'Route' template['force'] = force template['metadata'] = CommentedMap([ ('name', route_name), ('namespace', self._namespace_name), ('labels', labels.copy()) ]) template['spec'] = CommentedMap([ ('to', CommentedMap([ ('kind', 'Service'), ('name', name) ])), ('port', CommentedMap([ ('targetPort', 'port-{}-{}'.format(port['port'], port['protocol'])) ])) ]) if service_config.get(self.CONFIG_KEY, {}).get('routes'): for route in service_config[self.CONFIG_KEY]['routes']: if str(route.get('port')) == str(port['port']): for key, value in route.items(): if key not in ('force', 'port'): self.copy_attribute(template['spec'], key, value) templates.append(template) return templates
['def', 'get_route_templates', '(', 'self', ')', ':', 'def', '_get_published_ports', '(', 'service_config', ')', ':', 'result', '=', '[', ']', 'for', 'port', 'in', 'service_config', '.', 'get', '(', "'ports'", ',', '[', ']', ')', ':', 'protocol', '=', "'TCP'", 'if', 'isinstance', '(', 'port', ',', 'string_types', ')', 'and', "'/'", 'in', 'port', ':', 'port', ',', 'protocol', '=', 'port', '.', 'split', '(', "'/'", ')', 'if', 'isinstance', '(', 'port', ',', 'string_types', ')', 'and', "':'", 'in', 'port', ':', 'host', ',', 'container', '=', 'port', '.', 'split', '(', "':'", ')', 'else', ':', 'host', '=', 'port', 'result', '.', 'append', '(', '{', "'port'", ':', 'host', ',', "'protocol'", ':', 'protocol', '.', 'lower', '(', ')', '}', ')', 'return', 'result', 'templates', '=', '[', ']', 'for', 'name', ',', 'service_config', 'in', 'self', '.', '_services', '.', 'items', '(', ')', ':', 'state', '=', 'service_config', '.', 'get', '(', 'self', '.', 'CONFIG_KEY', ',', '{', '}', ')', '.', 'get', '(', "'state'", ',', "'present'", ')', 'force', '=', 'service_config', '.', 'get', '(', 'self', '.', 'CONFIG_KEY', ',', '{', '}', ')', '.', 'get', '(', "'force'", ',', 'False', ')', 'published_ports', '=', '_get_published_ports', '(', 'service_config', ')', 'if', 'state', '!=', "'present'", ':', 'continue', 'for', 'port', 'in', 'published_ports', ':', 'route_name', '=', '"%s-%s"', '%', '(', 'name', ',', 'port', '[', "'port'", ']', ')', 'labels', '=', 'dict', '(', 'app', '=', 'self', '.', '_namespace_name', ',', 'service', '=', 'name', ')', 'template', '=', 'CommentedMap', '(', ')', 'template', '[', "'apiVersion'", ']', '=', 'self', '.', 'DEFAULT_API_VERSION', 'template', '[', "'kind'", ']', '=', "'Route'", 'template', '[', "'force'", ']', '=', 'force', 'template', '[', "'metadata'", ']', '=', 'CommentedMap', '(', '[', '(', "'name'", ',', 'route_name', ')', ',', '(', "'namespace'", ',', 'self', '.', '_namespace_name', ')', ',', '(', "'labels'", ',', 'labels', '.', 'copy', '(', ')', ')', ']', ')', 'template', '[', "'spec'", ']', '=', 'CommentedMap', '(', '[', '(', "'to'", ',', 'CommentedMap', '(', '[', '(', "'kind'", ',', "'Service'", ')', ',', '(', "'name'", ',', 'name', ')', ']', ')', ')', ',', '(', "'port'", ',', 'CommentedMap', '(', '[', '(', "'targetPort'", ',', "'port-{}-{}'", '.', 'format', '(', 'port', '[', "'port'", ']', ',', 'port', '[', "'protocol'", ']', ')', ')', ']', ')', ')', ']', ')', 'if', 'service_config', '.', 'get', '(', 'self', '.', 'CONFIG_KEY', ',', '{', '}', ')', '.', 'get', '(', "'routes'", ')', ':', 'for', 'route', 'in', 'service_config', '[', 'self', '.', 'CONFIG_KEY', ']', '[', "'routes'", ']', ':', 'if', 'str', '(', 'route', '.', 'get', '(', "'port'", ')', ')', '==', 'str', '(', 'port', '[', "'port'", ']', ')', ':', 'for', 'key', ',', 'value', 'in', 'route', '.', 'items', '(', ')', ':', 'if', 'key', 'not', 'in', '(', "'force'", ',', "'port'", ')', ':', 'self', '.', 'copy_attribute', '(', 'template', '[', "'spec'", ']', ',', 'key', ',', 'value', ')', 'templates', '.', 'append', '(', 'template', ')', 'return', 'templates']
Generate Openshift route templates or playbook tasks. Each port on a service definition found in container.yml represents an externally exposed port.
['Generate', 'Openshift', 'route', 'templates', 'or', 'playbook', 'tasks', '.', 'Each', 'port', 'on', 'a', 'service', 'definition', 'found', 'in', 'container', '.', 'yml', 'represents', 'an', 'externally', 'exposed', 'port', '.']
train
https://github.com/ansible/ansible-container/blob/d031c1a6133d5482a5d054fcbdbecafb923f8b4b/container/openshift/deploy.py#L56-L117
1,382
studionow/pybrightcove
pybrightcove/video.py
Video.add_custom_metadata
def add_custom_metadata(self, key, value, meta_type=None): """ Add custom metadata to the Video. meta_type is required for XML API. """ self.metadata.append({'key': key, 'value': value, 'type': meta_type})
python
def add_custom_metadata(self, key, value, meta_type=None): """ Add custom metadata to the Video. meta_type is required for XML API. """ self.metadata.append({'key': key, 'value': value, 'type': meta_type})
['def', 'add_custom_metadata', '(', 'self', ',', 'key', ',', 'value', ',', 'meta_type', '=', 'None', ')', ':', 'self', '.', 'metadata', '.', 'append', '(', '{', "'key'", ':', 'key', ',', "'value'", ':', 'value', ',', "'type'", ':', 'meta_type', '}', ')']
Add custom metadata to the Video. meta_type is required for XML API.
['Add', 'custom', 'metadata', 'to', 'the', 'Video', '.', 'meta_type', 'is', 'required', 'for', 'XML', 'API', '.']
train
https://github.com/studionow/pybrightcove/blob/19c946b689a80156e070fe9bc35589c4b768e614/pybrightcove/video.py#L487-L491
1,383
shidenggui/easytrader
easytrader/yh_clienttrader.py
YHClientTrader.login
def login(self, user, password, exe_path, comm_password=None, **kwargs): """ 登陆客户端 :param user: 账号 :param password: 明文密码 :param exe_path: 客户端路径类似 'C:\\中国银河证券双子星3.2\\Binarystar.exe', 默认 'C:\\中国银河证券双子星3.2\\Binarystar.exe' :param comm_password: 通讯密码, 华泰需要,可不设 :return: """ try: self._app = pywinauto.Application().connect( path=self._run_exe_path(exe_path), timeout=1 ) # pylint: disable=broad-except except Exception: self._app = pywinauto.Application().start(exe_path) is_xiadan = True if "xiadan.exe" in exe_path else False # wait login window ready while True: try: self._app.top_window().Edit1.wait("ready") break except RuntimeError: pass self._app.top_window().Edit1.type_keys(user) self._app.top_window().Edit2.type_keys(password) while True: self._app.top_window().Edit3.type_keys( self._handle_verify_code(is_xiadan) ) self._app.top_window()["确定" if is_xiadan else "登录"].click() # detect login is success or not try: self._app.top_window().wait_not("exists visible", 10) break # pylint: disable=broad-except except Exception: if is_xiadan: self._app.top_window()["确定"].click() self._app = pywinauto.Application().connect( path=self._run_exe_path(exe_path), timeout=10 ) self._close_prompt_windows() self._main = self._app.window(title="网上股票交易系统5.0") try: self._main.child_window( control_id=129, class_name="SysTreeView32" ).wait("ready", 2) # pylint: disable=broad-except except Exception: self.wait(2) self._switch_window_to_normal_mode()
python
def login(self, user, password, exe_path, comm_password=None, **kwargs): """ 登陆客户端 :param user: 账号 :param password: 明文密码 :param exe_path: 客户端路径类似 'C:\\中国银河证券双子星3.2\\Binarystar.exe', 默认 'C:\\中国银河证券双子星3.2\\Binarystar.exe' :param comm_password: 通讯密码, 华泰需要,可不设 :return: """ try: self._app = pywinauto.Application().connect( path=self._run_exe_path(exe_path), timeout=1 ) # pylint: disable=broad-except except Exception: self._app = pywinauto.Application().start(exe_path) is_xiadan = True if "xiadan.exe" in exe_path else False # wait login window ready while True: try: self._app.top_window().Edit1.wait("ready") break except RuntimeError: pass self._app.top_window().Edit1.type_keys(user) self._app.top_window().Edit2.type_keys(password) while True: self._app.top_window().Edit3.type_keys( self._handle_verify_code(is_xiadan) ) self._app.top_window()["确定" if is_xiadan else "登录"].click() # detect login is success or not try: self._app.top_window().wait_not("exists visible", 10) break # pylint: disable=broad-except except Exception: if is_xiadan: self._app.top_window()["确定"].click() self._app = pywinauto.Application().connect( path=self._run_exe_path(exe_path), timeout=10 ) self._close_prompt_windows() self._main = self._app.window(title="网上股票交易系统5.0") try: self._main.child_window( control_id=129, class_name="SysTreeView32" ).wait("ready", 2) # pylint: disable=broad-except except Exception: self.wait(2) self._switch_window_to_normal_mode()
['def', 'login', '(', 'self', ',', 'user', ',', 'password', ',', 'exe_path', ',', 'comm_password', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'try', ':', 'self', '.', '_app', '=', 'pywinauto', '.', 'Application', '(', ')', '.', 'connect', '(', 'path', '=', 'self', '.', '_run_exe_path', '(', 'exe_path', ')', ',', 'timeout', '=', '1', ')', '# pylint: disable=broad-except', 'except', 'Exception', ':', 'self', '.', '_app', '=', 'pywinauto', '.', 'Application', '(', ')', '.', 'start', '(', 'exe_path', ')', 'is_xiadan', '=', 'True', 'if', '"xiadan.exe"', 'in', 'exe_path', 'else', 'False', '# wait login window ready', 'while', 'True', ':', 'try', ':', 'self', '.', '_app', '.', 'top_window', '(', ')', '.', 'Edit1', '.', 'wait', '(', '"ready"', ')', 'break', 'except', 'RuntimeError', ':', 'pass', 'self', '.', '_app', '.', 'top_window', '(', ')', '.', 'Edit1', '.', 'type_keys', '(', 'user', ')', 'self', '.', '_app', '.', 'top_window', '(', ')', '.', 'Edit2', '.', 'type_keys', '(', 'password', ')', 'while', 'True', ':', 'self', '.', '_app', '.', 'top_window', '(', ')', '.', 'Edit3', '.', 'type_keys', '(', 'self', '.', '_handle_verify_code', '(', 'is_xiadan', ')', ')', 'self', '.', '_app', '.', 'top_window', '(', ')', '[', '"确定" if ', 's_', 'iadan els', ' "登录', '].click(', ')', '', '', '', '', '# detect login is success or not', 'try', ':', 'self', '.', '_app', '.', 'top_window', '(', ')', '.', 'wait_not', '(', '"exists visible"', ',', '10', ')', 'break', '# pylint: disable=broad-except', 'except', 'Exception', ':', 'if', 'is_xiadan', ':', 'self', '.', '_app', '.', 'top_window', '(', ')', '[', '"确定"].cl', 'i', 'c', 'k()', '', '', 'self', '.', '_app', '=', 'pywinauto', '.', 'Application', '(', ')', '.', 'connect', '(', 'path', '=', 'self', '.', '_run_exe_path', '(', 'exe_path', ')', ',', 'timeout', '=', '10', ')', 'self', '.', '_close_prompt_windows', '(', ')', 'self', '.', '_main', '=', 'self', '.', '_app', '.', 'window', '(', 'title', '=', '"网上股票交易系统5.0")', '', 'try', ':', 'self', '.', '_main', '.', 'child_window', '(', 'control_id', '=', '129', ',', 'class_name', '=', '"SysTreeView32"', ')', '.', 'wait', '(', '"ready"', ',', '2', ')', '# pylint: disable=broad-except', 'except', 'Exception', ':', 'self', '.', 'wait', '(', '2', ')', 'self', '.', '_switch_window_to_normal_mode', '(', ')']
登陆客户端 :param user: 账号 :param password: 明文密码 :param exe_path: 客户端路径类似 'C:\\中国银河证券双子星3.2\\Binarystar.exe', 默认 'C:\\中国银河证券双子星3.2\\Binarystar.exe' :param comm_password: 通讯密码, 华泰需要,可不设 :return:
['登陆客户端', ':', 'param', 'user', ':', '账号', ':', 'param', 'password', ':', '明文密码', ':', 'param', 'exe_path', ':', '客户端路径类似', 'C', ':', '\\\\', '中国银河证券双子星3', '.', '2', '\\\\', 'Binarystar', '.', 'exe', '默认', 'C', ':', '\\\\', '中国银河证券双子星3', '.', '2', '\\\\', 'Binarystar', '.', 'exe', ':', 'param', 'comm_password', ':', '通讯密码', '华泰需要,可不设', ':', 'return', ':']
train
https://github.com/shidenggui/easytrader/blob/e5ae4daeda4ea125763a95b280dd694c7f68257d/easytrader/yh_clienttrader.py#L25-L80
1,384
Vito2015/pyextend
pyextend/formula/lbstools.py
calc_distance
def calc_distance(lng1, lat1, lng2, lat2): """Calc distance (km) by geo-coordinates. @:param lng1: first coordinate.lng @:param lat1: first coordinate.lat @:param lng2: second coordinate.lng @:param lat2: second coordinate.lat @:return distance: km """ ra = 6378.140 # 赤道半径 (km) rb = 6356.755 # 极半径 (km) flatten = (ra - rb) / ra # 地球扁率 rad_lat_1 = math.radians(lat1) rad_lng_1 = math.radians(lng1) rad_lat_2 = math.radians(lat2) rad_lng_2 = math.radians(lng2) p1 = math.atan(rb / ra * math.tan(rad_lat_1)) p2 = math.atan(rb / ra * math.tan(rad_lat_2)) xx = math.acos(math.sin(p1) * math.sin(p2) + math.cos(p1) * math.cos(p2) * math.cos(rad_lng_1 - rad_lng_2)) c1 = (math.sin(xx) - xx) * (math.sin(p1) + math.sin(p2)) ** 2 / math.cos(xx / 2) ** 2 c2 = (math.sin(xx) + xx) * (math.sin(p1) - math.sin(p2)) ** 2 / math.sin(xx / 2) ** 2 dr = flatten / 8 * (c1 - c2) distance = ra * (xx + dr) return distance
python
def calc_distance(lng1, lat1, lng2, lat2): """Calc distance (km) by geo-coordinates. @:param lng1: first coordinate.lng @:param lat1: first coordinate.lat @:param lng2: second coordinate.lng @:param lat2: second coordinate.lat @:return distance: km """ ra = 6378.140 # 赤道半径 (km) rb = 6356.755 # 极半径 (km) flatten = (ra - rb) / ra # 地球扁率 rad_lat_1 = math.radians(lat1) rad_lng_1 = math.radians(lng1) rad_lat_2 = math.radians(lat2) rad_lng_2 = math.radians(lng2) p1 = math.atan(rb / ra * math.tan(rad_lat_1)) p2 = math.atan(rb / ra * math.tan(rad_lat_2)) xx = math.acos(math.sin(p1) * math.sin(p2) + math.cos(p1) * math.cos(p2) * math.cos(rad_lng_1 - rad_lng_2)) c1 = (math.sin(xx) - xx) * (math.sin(p1) + math.sin(p2)) ** 2 / math.cos(xx / 2) ** 2 c2 = (math.sin(xx) + xx) * (math.sin(p1) - math.sin(p2)) ** 2 / math.sin(xx / 2) ** 2 dr = flatten / 8 * (c1 - c2) distance = ra * (xx + dr) return distance
['def', 'calc_distance', '(', 'lng1', ',', 'lat1', ',', 'lng2', ',', 'lat2', ')', ':', 'ra', '=', '6378.140', '# 赤道半径 (km)', 'rb', '=', '6356.755', '# 极半径 (km)', 'flatten', '=', '(', 'ra', '-', 'rb', ')', '/', 'ra', '# 地球扁率', 'rad_lat_1', '=', 'math', '.', 'radians', '(', 'lat1', ')', 'rad_lng_1', '=', 'math', '.', 'radians', '(', 'lng1', ')', 'rad_lat_2', '=', 'math', '.', 'radians', '(', 'lat2', ')', 'rad_lng_2', '=', 'math', '.', 'radians', '(', 'lng2', ')', 'p1', '=', 'math', '.', 'atan', '(', 'rb', '/', 'ra', '*', 'math', '.', 'tan', '(', 'rad_lat_1', ')', ')', 'p2', '=', 'math', '.', 'atan', '(', 'rb', '/', 'ra', '*', 'math', '.', 'tan', '(', 'rad_lat_2', ')', ')', 'xx', '=', 'math', '.', 'acos', '(', 'math', '.', 'sin', '(', 'p1', ')', '*', 'math', '.', 'sin', '(', 'p2', ')', '+', 'math', '.', 'cos', '(', 'p1', ')', '*', 'math', '.', 'cos', '(', 'p2', ')', '*', 'math', '.', 'cos', '(', 'rad_lng_1', '-', 'rad_lng_2', ')', ')', 'c1', '=', '(', 'math', '.', 'sin', '(', 'xx', ')', '-', 'xx', ')', '*', '(', 'math', '.', 'sin', '(', 'p1', ')', '+', 'math', '.', 'sin', '(', 'p2', ')', ')', '**', '2', '/', 'math', '.', 'cos', '(', 'xx', '/', '2', ')', '**', '2', 'c2', '=', '(', 'math', '.', 'sin', '(', 'xx', ')', '+', 'xx', ')', '*', '(', 'math', '.', 'sin', '(', 'p1', ')', '-', 'math', '.', 'sin', '(', 'p2', ')', ')', '**', '2', '/', 'math', '.', 'sin', '(', 'xx', '/', '2', ')', '**', '2', 'dr', '=', 'flatten', '/', '8', '*', '(', 'c1', '-', 'c2', ')', 'distance', '=', 'ra', '*', '(', 'xx', '+', 'dr', ')', 'return', 'distance']
Calc distance (km) by geo-coordinates. @:param lng1: first coordinate.lng @:param lat1: first coordinate.lat @:param lng2: second coordinate.lng @:param lat2: second coordinate.lat @:return distance: km
['Calc', 'distance', '(', 'km', ')', 'by', 'geo', '-', 'coordinates', '.']
train
https://github.com/Vito2015/pyextend/blob/36861dfe1087e437ffe9b5a1da9345c85b4fa4a1/pyextend/formula/lbstools.py#L32-L54
1,385
edoburu/django-any-urlfield
any_urlfield/templatetags/any_urlfield_tags.py
WithDictNode.render
def render(self, context): """ Render the tag, with extra context layer. """ extra_context = self.context_expr.resolve(context) if not isinstance(extra_context, dict): raise TemplateSyntaxError("{% withdict %} expects the argument to be a dictionary.") with context.push(**extra_context): return self.nodelist.render(context)
python
def render(self, context): """ Render the tag, with extra context layer. """ extra_context = self.context_expr.resolve(context) if not isinstance(extra_context, dict): raise TemplateSyntaxError("{% withdict %} expects the argument to be a dictionary.") with context.push(**extra_context): return self.nodelist.render(context)
['def', 'render', '(', 'self', ',', 'context', ')', ':', 'extra_context', '=', 'self', '.', 'context_expr', '.', 'resolve', '(', 'context', ')', 'if', 'not', 'isinstance', '(', 'extra_context', ',', 'dict', ')', ':', 'raise', 'TemplateSyntaxError', '(', '"{% withdict %} expects the argument to be a dictionary."', ')', 'with', 'context', '.', 'push', '(', '*', '*', 'extra_context', ')', ':', 'return', 'self', '.', 'nodelist', '.', 'render', '(', 'context', ')']
Render the tag, with extra context layer.
['Render', 'the', 'tag', 'with', 'extra', 'context', 'layer', '.']
train
https://github.com/edoburu/django-any-urlfield/blob/8d7d36c8a1fc251930f6dbdcc8b5b5151d20e3ab/any_urlfield/templatetags/any_urlfield_tags.py#L18-L27
1,386
markovmodel/PyEMMA
pyemma/coordinates/estimation/koopman.py
_compute_u
def _compute_u(K): """ Estimate an approximation of the ratio of stationary over empirical distribution from the basis. Parameters: ----------- K0, ndarray(M+1, M+1), time-lagged correlation matrix for the whitened and padded data set. Returns: -------- u : ndarray(M,) coefficients of the ratio stationary / empirical dist. from the whitened and expanded basis. """ M = K.shape[0] - 1 # Compute right and left eigenvectors: l, U = scl.eig(K.T) l, U = sort_by_norm(l, U) # Extract the eigenvector for eigenvalue one and normalize: u = np.real(U[:, 0]) v = np.zeros(M+1) v[M] = 1.0 u = u / np.dot(u, v) return u
python
def _compute_u(K): """ Estimate an approximation of the ratio of stationary over empirical distribution from the basis. Parameters: ----------- K0, ndarray(M+1, M+1), time-lagged correlation matrix for the whitened and padded data set. Returns: -------- u : ndarray(M,) coefficients of the ratio stationary / empirical dist. from the whitened and expanded basis. """ M = K.shape[0] - 1 # Compute right and left eigenvectors: l, U = scl.eig(K.T) l, U = sort_by_norm(l, U) # Extract the eigenvector for eigenvalue one and normalize: u = np.real(U[:, 0]) v = np.zeros(M+1) v[M] = 1.0 u = u / np.dot(u, v) return u
['def', '_compute_u', '(', 'K', ')', ':', 'M', '=', 'K', '.', 'shape', '[', '0', ']', '-', '1', '# Compute right and left eigenvectors:', 'l', ',', 'U', '=', 'scl', '.', 'eig', '(', 'K', '.', 'T', ')', 'l', ',', 'U', '=', 'sort_by_norm', '(', 'l', ',', 'U', ')', '# Extract the eigenvector for eigenvalue one and normalize:', 'u', '=', 'np', '.', 'real', '(', 'U', '[', ':', ',', '0', ']', ')', 'v', '=', 'np', '.', 'zeros', '(', 'M', '+', '1', ')', 'v', '[', 'M', ']', '=', '1.0', 'u', '=', 'u', '/', 'np', '.', 'dot', '(', 'u', ',', 'v', ')', 'return', 'u']
Estimate an approximation of the ratio of stationary over empirical distribution from the basis. Parameters: ----------- K0, ndarray(M+1, M+1), time-lagged correlation matrix for the whitened and padded data set. Returns: -------- u : ndarray(M,) coefficients of the ratio stationary / empirical dist. from the whitened and expanded basis.
['Estimate', 'an', 'approximation', 'of', 'the', 'ratio', 'of', 'stationary', 'over', 'empirical', 'distribution', 'from', 'the', 'basis', '.', 'Parameters', ':', '-----------', 'K0', 'ndarray', '(', 'M', '+', '1', 'M', '+', '1', ')', 'time', '-', 'lagged', 'correlation', 'matrix', 'for', 'the', 'whitened', 'and', 'padded', 'data', 'set', '.', 'Returns', ':', '--------', 'u', ':', 'ndarray', '(', 'M', ')', 'coefficients', 'of', 'the', 'ratio', 'stationary', '/', 'empirical', 'dist', '.', 'from', 'the', 'whitened', 'and', 'expanded', 'basis', '.']
train
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/estimation/koopman.py#L29-L50
1,387
ZELLMECHANIK-DRESDEN/dclab
dclab/isoelastics/__init__.py
Isoelastics.load_data
def load_data(self, path): """Load isoelastics from a text file The text file is loaded with `numpy.loadtxt` and must have three columns, representing the two data columns and the elastic modulus with units defined in `definitions.py`. The file header must have a section defining meta data of the content like so: # [...] # # - column 1: area_um # - column 2: deform # - column 3: emodulus # - channel width [um]: 20 # - flow rate [ul/s]: 0.04 # - viscosity [mPa*s]: 15 # - method: analytical # # [...] Parameters ---------- path: str Path to a isoelastics text file """ path = pathlib.Path(path).resolve() # Get metadata meta = {} with path.open() as fd: while True: line = fd.readline().strip() if line.startswith("# - "): line = line.strip("#- ") var, val = line.split(":") if val.strip().replace(".", "").isdigit(): # channel width, flow rate, viscosity val = float(val) else: # columns, calculation val = val.strip().lower() meta[var.strip()] = val elif line and not line.startswith("#"): break assert meta["column 1"] in dfn.scalar_feature_names assert meta["column 2"] in dfn.scalar_feature_names assert meta["column 3"] == "emodulus" assert meta["method"] in VALID_METHODS # Load isoelasics with path.open("rb") as isfd: isodata = np.loadtxt(isfd) # Slice out individual isoelastics emoduli = np.unique(isodata[:, 2]) isoel = [] for emod in emoduli: where = isodata[:, 2] == emod isoel.append(isodata[where]) # Add isoelastics to instance self.add(isoel=isoel, col1=meta["column 1"], col2=meta["column 2"], channel_width=meta["channel width [um]"], flow_rate=meta["flow rate [ul/s]"], viscosity=meta["viscosity [mPa*s]"], method=meta["method"])
python
def load_data(self, path): """Load isoelastics from a text file The text file is loaded with `numpy.loadtxt` and must have three columns, representing the two data columns and the elastic modulus with units defined in `definitions.py`. The file header must have a section defining meta data of the content like so: # [...] # # - column 1: area_um # - column 2: deform # - column 3: emodulus # - channel width [um]: 20 # - flow rate [ul/s]: 0.04 # - viscosity [mPa*s]: 15 # - method: analytical # # [...] Parameters ---------- path: str Path to a isoelastics text file """ path = pathlib.Path(path).resolve() # Get metadata meta = {} with path.open() as fd: while True: line = fd.readline().strip() if line.startswith("# - "): line = line.strip("#- ") var, val = line.split(":") if val.strip().replace(".", "").isdigit(): # channel width, flow rate, viscosity val = float(val) else: # columns, calculation val = val.strip().lower() meta[var.strip()] = val elif line and not line.startswith("#"): break assert meta["column 1"] in dfn.scalar_feature_names assert meta["column 2"] in dfn.scalar_feature_names assert meta["column 3"] == "emodulus" assert meta["method"] in VALID_METHODS # Load isoelasics with path.open("rb") as isfd: isodata = np.loadtxt(isfd) # Slice out individual isoelastics emoduli = np.unique(isodata[:, 2]) isoel = [] for emod in emoduli: where = isodata[:, 2] == emod isoel.append(isodata[where]) # Add isoelastics to instance self.add(isoel=isoel, col1=meta["column 1"], col2=meta["column 2"], channel_width=meta["channel width [um]"], flow_rate=meta["flow rate [ul/s]"], viscosity=meta["viscosity [mPa*s]"], method=meta["method"])
['def', 'load_data', '(', 'self', ',', 'path', ')', ':', 'path', '=', 'pathlib', '.', 'Path', '(', 'path', ')', '.', 'resolve', '(', ')', '# Get metadata', 'meta', '=', '{', '}', 'with', 'path', '.', 'open', '(', ')', 'as', 'fd', ':', 'while', 'True', ':', 'line', '=', 'fd', '.', 'readline', '(', ')', '.', 'strip', '(', ')', 'if', 'line', '.', 'startswith', '(', '"# - "', ')', ':', 'line', '=', 'line', '.', 'strip', '(', '"#- "', ')', 'var', ',', 'val', '=', 'line', '.', 'split', '(', '":"', ')', 'if', 'val', '.', 'strip', '(', ')', '.', 'replace', '(', '"."', ',', '""', ')', '.', 'isdigit', '(', ')', ':', '# channel width, flow rate, viscosity', 'val', '=', 'float', '(', 'val', ')', 'else', ':', '# columns, calculation', 'val', '=', 'val', '.', 'strip', '(', ')', '.', 'lower', '(', ')', 'meta', '[', 'var', '.', 'strip', '(', ')', ']', '=', 'val', 'elif', 'line', 'and', 'not', 'line', '.', 'startswith', '(', '"#"', ')', ':', 'break', 'assert', 'meta', '[', '"column 1"', ']', 'in', 'dfn', '.', 'scalar_feature_names', 'assert', 'meta', '[', '"column 2"', ']', 'in', 'dfn', '.', 'scalar_feature_names', 'assert', 'meta', '[', '"column 3"', ']', '==', '"emodulus"', 'assert', 'meta', '[', '"method"', ']', 'in', 'VALID_METHODS', '# Load isoelasics', 'with', 'path', '.', 'open', '(', '"rb"', ')', 'as', 'isfd', ':', 'isodata', '=', 'np', '.', 'loadtxt', '(', 'isfd', ')', '# Slice out individual isoelastics', 'emoduli', '=', 'np', '.', 'unique', '(', 'isodata', '[', ':', ',', '2', ']', ')', 'isoel', '=', '[', ']', 'for', 'emod', 'in', 'emoduli', ':', 'where', '=', 'isodata', '[', ':', ',', '2', ']', '==', 'emod', 'isoel', '.', 'append', '(', 'isodata', '[', 'where', ']', ')', '# Add isoelastics to instance', 'self', '.', 'add', '(', 'isoel', '=', 'isoel', ',', 'col1', '=', 'meta', '[', '"column 1"', ']', ',', 'col2', '=', 'meta', '[', '"column 2"', ']', ',', 'channel_width', '=', 'meta', '[', '"channel width [um]"', ']', ',', 'flow_rate', '=', 'meta', '[', '"flow rate [ul/s]"', ']', ',', 'viscosity', '=', 'meta', '[', '"viscosity [mPa*s]"', ']', ',', 'method', '=', 'meta', '[', '"method"', ']', ')']
Load isoelastics from a text file The text file is loaded with `numpy.loadtxt` and must have three columns, representing the two data columns and the elastic modulus with units defined in `definitions.py`. The file header must have a section defining meta data of the content like so: # [...] # # - column 1: area_um # - column 2: deform # - column 3: emodulus # - channel width [um]: 20 # - flow rate [ul/s]: 0.04 # - viscosity [mPa*s]: 15 # - method: analytical # # [...] Parameters ---------- path: str Path to a isoelastics text file
['Load', 'isoelastics', 'from', 'a', 'text', 'file']
train
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/isoelastics/__init__.py#L348-L417
1,388
pettarin/ipapy
ipapy/compatibility.py
is_unicode_string
def is_unicode_string(string): """ Return ``True`` if the given string is a Unicode string, that is, of type ``unicode`` in Python 2 or ``str`` in Python 3. Return ``None`` if ``string`` is ``None``. :param str string: the string to be checked :rtype: bool """ if string is None: return None if PY2: return isinstance(string, unicode) return isinstance(string, str)
python
def is_unicode_string(string): """ Return ``True`` if the given string is a Unicode string, that is, of type ``unicode`` in Python 2 or ``str`` in Python 3. Return ``None`` if ``string`` is ``None``. :param str string: the string to be checked :rtype: bool """ if string is None: return None if PY2: return isinstance(string, unicode) return isinstance(string, str)
['def', 'is_unicode_string', '(', 'string', ')', ':', 'if', 'string', 'is', 'None', ':', 'return', 'None', 'if', 'PY2', ':', 'return', 'isinstance', '(', 'string', ',', 'unicode', ')', 'return', 'isinstance', '(', 'string', ',', 'str', ')']
Return ``True`` if the given string is a Unicode string, that is, of type ``unicode`` in Python 2 or ``str`` in Python 3. Return ``None`` if ``string`` is ``None``. :param str string: the string to be checked :rtype: bool
['Return', 'True', 'if', 'the', 'given', 'string', 'is', 'a', 'Unicode', 'string', 'that', 'is', 'of', 'type', 'unicode', 'in', 'Python', '2', 'or', 'str', 'in', 'Python', '3', '.']
train
https://github.com/pettarin/ipapy/blob/ede4b3c40636f6eb90068369d31a2e75c7115324/ipapy/compatibility.py#L25-L39
1,389
zsethna/OLGA
olga/generate_sequences.py
main
def main(): """ Generate sequences.""" parser = OptionParser(conflict_handler="resolve") parser.add_option('--humanTRA', '--human_T_alpha', action='store_true', dest='humanTRA', default=False, help='use default human TRA model (T cell alpha chain)') parser.add_option('--humanTRB', '--human_T_beta', action='store_true', dest='humanTRB', default=False, help='use default human TRB model (T cell beta chain)') parser.add_option('--mouseTRB', '--mouse_T_beta', action='store_true', dest='mouseTRB', default=False, help='use default mouse TRB model (T cell beta chain)') parser.add_option('--humanIGH', '--human_B_heavy', action='store_true', dest='humanIGH', default=False, help='use default human IGH model (B cell heavy chain)') parser.add_option('--VDJ_model_folder', dest='vdj_model_folder', metavar='PATH/TO/FOLDER/', help='specify PATH/TO/FOLDER/ for a custom VDJ generative model') parser.add_option('--VJ_model_folder', dest='vj_model_folder', metavar='PATH/TO/FOLDER/', help='specify PATH/TO/FOLDER/ for a custom VJ generative model') parser.add_option('-o', '--outfile', dest = 'outfile_name', metavar='PATH/TO/FILE', help='write CDR3 sequences to PATH/TO/FILE') parser.add_option('-n', '--num_seqs', type='float', metavar='N', default = 0, dest='num_seqs_to_generate', help='specify the number of sequences to generate.') parser.add_option('--seed', type='int', dest='seed', help='set seed for pseudorandom number generator. Default is to not set a seed.') parser.add_option('--seqs_per_time_update', type='float', default = 100000, dest='seqs_per_time_update', help='specify the number of sequences between time updates. Default is 1e5') parser.add_option('--conserved_J_residues', type='string', default = 'FVW', dest='conserved_J_residues', help="specify conserved J residues. Default is 'FVW'.") parser.add_option('--time_updates_off', action='store_false', dest='time_updates', default=True, help='turn time updates off.') parser.add_option('--seq_type', type='choice', default = 'all', dest='seq_type', choices=['all', 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'], help="declare sequence type for output sequences. Choices: 'all' [default], 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'") parser.add_option('--record_genes_off', action='store_false', dest="record_genes", default=True, help='turn off recording V and J gene info.') parser.add_option('-d', '--delimiter', type='choice', dest='delimiter', choices=['tab', 'space', ',', ';', ':'], help="declare delimiter choice. Default is tab for .tsv output files, comma for .csv files, and tab for all others. Choices: 'tab', 'space', ',', ';', ':'") parser.add_option('--raw_delimiter', type='str', dest='delimiter', help="declare delimiter choice as a raw string.") (options, args) = parser.parse_args() main_folder = os.path.dirname(__file__) default_models = {} default_models['humanTRA'] = [os.path.join(main_folder, 'default_models', 'human_T_alpha'), 'VJ'] default_models['humanTRB'] = [os.path.join(main_folder, 'default_models', 'human_T_beta'), 'VDJ'] default_models['mouseTRB'] = [os.path.join(main_folder, 'default_models', 'mouse_T_beta'), 'VDJ'] default_models['humanIGH'] = [os.path.join(main_folder, 'default_models', 'human_B_heavy'), 'VDJ'] num_models_specified = sum([1 for x in default_models.keys() + ['vj_model_folder', 'vdj_model_folder'] if getattr(options, x)]) if num_models_specified == 1: #exactly one model specified try: d_model = [x for x in default_models.keys() if getattr(options, x)][0] model_folder = default_models[d_model][0] recomb_type = default_models[d_model][1] except IndexError: if options.vdj_model_folder: #custom VDJ model specified model_folder = options.vdj_model_folder recomb_type = 'VDJ' elif options.vj_model_folder: #custom VJ model specified model_folder = options.vj_model_folder recomb_type = 'VJ' elif num_models_specified == 0: print 'Need to indicate generative model.' print 'Exiting...' return -1 elif num_models_specified > 1: print 'Only specify one model' print 'Exiting...' return -1 #Check that all model and genomic files exist in the indicated model folder if not os.path.isdir(model_folder): print 'Check pathing... cannot find the model folder: ' + model_folder print 'Exiting...' return -1 params_file_name = os.path.join(model_folder,'model_params.txt') marginals_file_name = os.path.join(model_folder,'model_marginals.txt') V_anchor_pos_file = os.path.join(model_folder,'V_gene_CDR3_anchors.csv') J_anchor_pos_file = os.path.join(model_folder,'J_gene_CDR3_anchors.csv') for x in [params_file_name, marginals_file_name, V_anchor_pos_file, J_anchor_pos_file]: if not os.path.isfile(x): print 'Cannot find: ' + x print 'Please check the files (and naming conventions) in the model folder ' + model_folder print 'Exiting...' return -1 if options.outfile_name is not None: outfile_name = options.outfile_name if os.path.isfile(outfile_name): if not raw_input(outfile_name + ' already exists. Overwrite (y/n)? ').strip().lower() in ['y', 'yes']: print 'Exiting...' return -1 #Parse arguments num_seqs_to_generate = int(options.num_seqs_to_generate) if num_seqs_to_generate <= 0: print 'Need to specify num_seqs (number of sequences to generate).' print 'Exiting...' return -1 #Parse default delimiter delimiter = options.delimiter if delimiter is None: delimiter = '\t' if options.outfile_name is not None: if outfile_name.endswith('.tsv'): delimiter = '\t' elif outfile_name.endswith('.csv'): delimiter = ',' else: try: delimiter = {'tab': '\t', 'space': ' ', ',': ',', ';': ';', ':': ':'}[delimiter] except KeyError: pass #Other raw string. #Optional flags seq_type = {'all': 'all', 'ntseq': 'ntseq', 'nucleotide': 'ntseq', 'aaseq': 'aaseq', 'amino_acid': 'aaseq'}[options.seq_type] record_genes = options.record_genes seqs_per_time_update = int(options.seqs_per_time_update) time_updates = options.time_updates conserved_J_residues = options.conserved_J_residues if options.seed is not None: np.random.seed(options.seed) #VDJ recomb case --- used for TCRB and IGH if recomb_type == 'VDJ': genomic_data = load_model.GenomicDataVDJ() genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file) generative_model = load_model.GenerativeModelVDJ() generative_model.load_and_process_igor_model(marginals_file_name) seq_gen = sequence_generation.SequenceGenerationVDJ(generative_model, genomic_data) #VJ recomb case --- used for TCRA and light chain elif recomb_type == 'VJ': genomic_data = load_model.GenomicDataVJ() genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file) generative_model = load_model.GenerativeModelVJ() generative_model.load_and_process_igor_model(marginals_file_name) seq_gen = sequence_generation.SequenceGenerationVJ(generative_model, genomic_data) V_gene_names = [V[0].split('*')[0] for V in genomic_data.genV] J_gene_names = [J[0].split('*')[0] for J in genomic_data.genJ] if options.outfile_name is not None: outfile = open(outfile_name, 'w') print 'Starting sequence generation... ' start_time = time.time() for i in range(num_seqs_to_generate): ntseq, aaseq, V_in, J_in = seq_gen.gen_rnd_prod_CDR3(conserved_J_residues) if seq_type == 'all': #default, include both ntseq and aaseq current_line_out = ntseq + delimiter + aaseq elif seq_type == 'ntseq': #only record ntseq current_line_out = ntseq elif seq_type == 'aaseq': #only record aaseq current_line_out = aaseq if record_genes: current_line_out += delimiter + V_gene_names[V_in] + delimiter + J_gene_names[J_in] outfile.write(current_line_out + '\n') if (i+1)%seqs_per_time_update == 0 and time_updates: c_time = time.time() - start_time eta = ((num_seqs_to_generate - (i+1))/float(i+1))*c_time if c_time > 86400: #more than a day c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 3600: #more than an hr c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 60: #more than a min c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60) else: c_time_str = '%.2f seconds.'%(c_time) if eta > 86400: #more than a day eta_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(eta)/86400, (int(eta)/3600)%24, (int(eta)/60)%60, eta%60) elif eta > 3600: #more than an hr eta_str = '%d hours, %d minutes, and %.2f seconds.'%((int(eta)/3600)%24, (int(eta)/60)%60, eta%60) elif eta > 60: #more than a min eta_str = '%d minutes and %.2f seconds.'%((int(eta)/60)%60, eta%60) else: eta_str = '%.2f seconds.'%(eta) print '%d sequences generated in %s Estimated time remaining: %s'%(i+1, c_time_str, eta_str) c_time = time.time() - start_time if c_time > 86400: #more than a day c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 3600: #more than an hr c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 60: #more than a min c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60) else: c_time_str = '%.2f seconds.'%(c_time) print 'Completed generating all %d sequences in %s'%(num_seqs_to_generate, c_time_str) outfile.close() else: #print to stdout for i in range(num_seqs_to_generate): ntseq, aaseq, V_in, J_in = seq_gen.gen_rnd_prod_CDR3(conserved_J_residues) if seq_type == 'all': #default, include both ntseq and aaseq current_line_out = ntseq + delimiter + aaseq elif seq_type == 'ntseq': #only record ntseq current_line_out = ntseq elif seq_type == 'aaseq': #only record aaseq current_line_out = aaseq if record_genes: current_line_out += delimiter + V_gene_names[V_in] + delimiter + J_gene_names[J_in] print current_line_out
python
def main(): """ Generate sequences.""" parser = OptionParser(conflict_handler="resolve") parser.add_option('--humanTRA', '--human_T_alpha', action='store_true', dest='humanTRA', default=False, help='use default human TRA model (T cell alpha chain)') parser.add_option('--humanTRB', '--human_T_beta', action='store_true', dest='humanTRB', default=False, help='use default human TRB model (T cell beta chain)') parser.add_option('--mouseTRB', '--mouse_T_beta', action='store_true', dest='mouseTRB', default=False, help='use default mouse TRB model (T cell beta chain)') parser.add_option('--humanIGH', '--human_B_heavy', action='store_true', dest='humanIGH', default=False, help='use default human IGH model (B cell heavy chain)') parser.add_option('--VDJ_model_folder', dest='vdj_model_folder', metavar='PATH/TO/FOLDER/', help='specify PATH/TO/FOLDER/ for a custom VDJ generative model') parser.add_option('--VJ_model_folder', dest='vj_model_folder', metavar='PATH/TO/FOLDER/', help='specify PATH/TO/FOLDER/ for a custom VJ generative model') parser.add_option('-o', '--outfile', dest = 'outfile_name', metavar='PATH/TO/FILE', help='write CDR3 sequences to PATH/TO/FILE') parser.add_option('-n', '--num_seqs', type='float', metavar='N', default = 0, dest='num_seqs_to_generate', help='specify the number of sequences to generate.') parser.add_option('--seed', type='int', dest='seed', help='set seed for pseudorandom number generator. Default is to not set a seed.') parser.add_option('--seqs_per_time_update', type='float', default = 100000, dest='seqs_per_time_update', help='specify the number of sequences between time updates. Default is 1e5') parser.add_option('--conserved_J_residues', type='string', default = 'FVW', dest='conserved_J_residues', help="specify conserved J residues. Default is 'FVW'.") parser.add_option('--time_updates_off', action='store_false', dest='time_updates', default=True, help='turn time updates off.') parser.add_option('--seq_type', type='choice', default = 'all', dest='seq_type', choices=['all', 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'], help="declare sequence type for output sequences. Choices: 'all' [default], 'ntseq', 'nucleotide', 'aaseq', 'amino_acid'") parser.add_option('--record_genes_off', action='store_false', dest="record_genes", default=True, help='turn off recording V and J gene info.') parser.add_option('-d', '--delimiter', type='choice', dest='delimiter', choices=['tab', 'space', ',', ';', ':'], help="declare delimiter choice. Default is tab for .tsv output files, comma for .csv files, and tab for all others. Choices: 'tab', 'space', ',', ';', ':'") parser.add_option('--raw_delimiter', type='str', dest='delimiter', help="declare delimiter choice as a raw string.") (options, args) = parser.parse_args() main_folder = os.path.dirname(__file__) default_models = {} default_models['humanTRA'] = [os.path.join(main_folder, 'default_models', 'human_T_alpha'), 'VJ'] default_models['humanTRB'] = [os.path.join(main_folder, 'default_models', 'human_T_beta'), 'VDJ'] default_models['mouseTRB'] = [os.path.join(main_folder, 'default_models', 'mouse_T_beta'), 'VDJ'] default_models['humanIGH'] = [os.path.join(main_folder, 'default_models', 'human_B_heavy'), 'VDJ'] num_models_specified = sum([1 for x in default_models.keys() + ['vj_model_folder', 'vdj_model_folder'] if getattr(options, x)]) if num_models_specified == 1: #exactly one model specified try: d_model = [x for x in default_models.keys() if getattr(options, x)][0] model_folder = default_models[d_model][0] recomb_type = default_models[d_model][1] except IndexError: if options.vdj_model_folder: #custom VDJ model specified model_folder = options.vdj_model_folder recomb_type = 'VDJ' elif options.vj_model_folder: #custom VJ model specified model_folder = options.vj_model_folder recomb_type = 'VJ' elif num_models_specified == 0: print 'Need to indicate generative model.' print 'Exiting...' return -1 elif num_models_specified > 1: print 'Only specify one model' print 'Exiting...' return -1 #Check that all model and genomic files exist in the indicated model folder if not os.path.isdir(model_folder): print 'Check pathing... cannot find the model folder: ' + model_folder print 'Exiting...' return -1 params_file_name = os.path.join(model_folder,'model_params.txt') marginals_file_name = os.path.join(model_folder,'model_marginals.txt') V_anchor_pos_file = os.path.join(model_folder,'V_gene_CDR3_anchors.csv') J_anchor_pos_file = os.path.join(model_folder,'J_gene_CDR3_anchors.csv') for x in [params_file_name, marginals_file_name, V_anchor_pos_file, J_anchor_pos_file]: if not os.path.isfile(x): print 'Cannot find: ' + x print 'Please check the files (and naming conventions) in the model folder ' + model_folder print 'Exiting...' return -1 if options.outfile_name is not None: outfile_name = options.outfile_name if os.path.isfile(outfile_name): if not raw_input(outfile_name + ' already exists. Overwrite (y/n)? ').strip().lower() in ['y', 'yes']: print 'Exiting...' return -1 #Parse arguments num_seqs_to_generate = int(options.num_seqs_to_generate) if num_seqs_to_generate <= 0: print 'Need to specify num_seqs (number of sequences to generate).' print 'Exiting...' return -1 #Parse default delimiter delimiter = options.delimiter if delimiter is None: delimiter = '\t' if options.outfile_name is not None: if outfile_name.endswith('.tsv'): delimiter = '\t' elif outfile_name.endswith('.csv'): delimiter = ',' else: try: delimiter = {'tab': '\t', 'space': ' ', ',': ',', ';': ';', ':': ':'}[delimiter] except KeyError: pass #Other raw string. #Optional flags seq_type = {'all': 'all', 'ntseq': 'ntseq', 'nucleotide': 'ntseq', 'aaseq': 'aaseq', 'amino_acid': 'aaseq'}[options.seq_type] record_genes = options.record_genes seqs_per_time_update = int(options.seqs_per_time_update) time_updates = options.time_updates conserved_J_residues = options.conserved_J_residues if options.seed is not None: np.random.seed(options.seed) #VDJ recomb case --- used for TCRB and IGH if recomb_type == 'VDJ': genomic_data = load_model.GenomicDataVDJ() genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file) generative_model = load_model.GenerativeModelVDJ() generative_model.load_and_process_igor_model(marginals_file_name) seq_gen = sequence_generation.SequenceGenerationVDJ(generative_model, genomic_data) #VJ recomb case --- used for TCRA and light chain elif recomb_type == 'VJ': genomic_data = load_model.GenomicDataVJ() genomic_data.load_igor_genomic_data(params_file_name, V_anchor_pos_file, J_anchor_pos_file) generative_model = load_model.GenerativeModelVJ() generative_model.load_and_process_igor_model(marginals_file_name) seq_gen = sequence_generation.SequenceGenerationVJ(generative_model, genomic_data) V_gene_names = [V[0].split('*')[0] for V in genomic_data.genV] J_gene_names = [J[0].split('*')[0] for J in genomic_data.genJ] if options.outfile_name is not None: outfile = open(outfile_name, 'w') print 'Starting sequence generation... ' start_time = time.time() for i in range(num_seqs_to_generate): ntseq, aaseq, V_in, J_in = seq_gen.gen_rnd_prod_CDR3(conserved_J_residues) if seq_type == 'all': #default, include both ntseq and aaseq current_line_out = ntseq + delimiter + aaseq elif seq_type == 'ntseq': #only record ntseq current_line_out = ntseq elif seq_type == 'aaseq': #only record aaseq current_line_out = aaseq if record_genes: current_line_out += delimiter + V_gene_names[V_in] + delimiter + J_gene_names[J_in] outfile.write(current_line_out + '\n') if (i+1)%seqs_per_time_update == 0 and time_updates: c_time = time.time() - start_time eta = ((num_seqs_to_generate - (i+1))/float(i+1))*c_time if c_time > 86400: #more than a day c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 3600: #more than an hr c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 60: #more than a min c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60) else: c_time_str = '%.2f seconds.'%(c_time) if eta > 86400: #more than a day eta_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(eta)/86400, (int(eta)/3600)%24, (int(eta)/60)%60, eta%60) elif eta > 3600: #more than an hr eta_str = '%d hours, %d minutes, and %.2f seconds.'%((int(eta)/3600)%24, (int(eta)/60)%60, eta%60) elif eta > 60: #more than a min eta_str = '%d minutes and %.2f seconds.'%((int(eta)/60)%60, eta%60) else: eta_str = '%.2f seconds.'%(eta) print '%d sequences generated in %s Estimated time remaining: %s'%(i+1, c_time_str, eta_str) c_time = time.time() - start_time if c_time > 86400: #more than a day c_time_str = '%d days, %d hours, %d minutes, and %.2f seconds.'%(int(c_time)/86400, (int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 3600: #more than an hr c_time_str = '%d hours, %d minutes, and %.2f seconds.'%((int(c_time)/3600)%24, (int(c_time)/60)%60, c_time%60) elif c_time > 60: #more than a min c_time_str = '%d minutes and %.2f seconds.'%((int(c_time)/60)%60, c_time%60) else: c_time_str = '%.2f seconds.'%(c_time) print 'Completed generating all %d sequences in %s'%(num_seqs_to_generate, c_time_str) outfile.close() else: #print to stdout for i in range(num_seqs_to_generate): ntseq, aaseq, V_in, J_in = seq_gen.gen_rnd_prod_CDR3(conserved_J_residues) if seq_type == 'all': #default, include both ntseq and aaseq current_line_out = ntseq + delimiter + aaseq elif seq_type == 'ntseq': #only record ntseq current_line_out = ntseq elif seq_type == 'aaseq': #only record aaseq current_line_out = aaseq if record_genes: current_line_out += delimiter + V_gene_names[V_in] + delimiter + J_gene_names[J_in] print current_line_out
['def', 'main', '(', ')', ':', 'parser', '=', 'OptionParser', '(', 'conflict_handler', '=', '"resolve"', ')', 'parser', '.', 'add_option', '(', "'--humanTRA'", ',', "'--human_T_alpha'", ',', 'action', '=', "'store_true'", ',', 'dest', '=', "'humanTRA'", ',', 'default', '=', 'False', ',', 'help', '=', "'use default human TRA model (T cell alpha chain)'", ')', 'parser', '.', 'add_option', '(', "'--humanTRB'", ',', "'--human_T_beta'", ',', 'action', '=', "'store_true'", ',', 'dest', '=', "'humanTRB'", ',', 'default', '=', 'False', ',', 'help', '=', "'use default human TRB model (T cell beta chain)'", ')', 'parser', '.', 'add_option', '(', "'--mouseTRB'", ',', "'--mouse_T_beta'", ',', 'action', '=', "'store_true'", ',', 'dest', '=', "'mouseTRB'", ',', 'default', '=', 'False', ',', 'help', '=', "'use default mouse TRB model (T cell beta chain)'", ')', 'parser', '.', 'add_option', '(', "'--humanIGH'", ',', "'--human_B_heavy'", ',', 'action', '=', "'store_true'", ',', 'dest', '=', "'humanIGH'", ',', 'default', '=', 'False', ',', 'help', '=', "'use default human IGH model (B cell heavy chain)'", ')', 'parser', '.', 'add_option', '(', "'--VDJ_model_folder'", ',', 'dest', '=', "'vdj_model_folder'", ',', 'metavar', '=', "'PATH/TO/FOLDER/'", ',', 'help', '=', "'specify PATH/TO/FOLDER/ for a custom VDJ generative model'", ')', 'parser', '.', 'add_option', '(', "'--VJ_model_folder'", ',', 'dest', '=', "'vj_model_folder'", ',', 'metavar', '=', "'PATH/TO/FOLDER/'", ',', 'help', '=', "'specify PATH/TO/FOLDER/ for a custom VJ generative model'", ')', 'parser', '.', 'add_option', '(', "'-o'", ',', "'--outfile'", ',', 'dest', '=', "'outfile_name'", ',', 'metavar', '=', "'PATH/TO/FILE'", ',', 'help', '=', "'write CDR3 sequences to PATH/TO/FILE'", ')', 'parser', '.', 'add_option', '(', "'-n'", ',', "'--num_seqs'", ',', 'type', '=', "'float'", ',', 'metavar', '=', "'N'", ',', 'default', '=', '0', ',', 'dest', '=', "'num_seqs_to_generate'", ',', 'help', '=', "'specify the number of sequences to generate.'", ')', 'parser', '.', 'add_option', '(', "'--seed'", ',', 'type', '=', "'int'", ',', 'dest', '=', "'seed'", ',', 'help', '=', "'set seed for pseudorandom number generator. Default is to not set a seed.'", ')', 'parser', '.', 'add_option', '(', "'--seqs_per_time_update'", ',', 'type', '=', "'float'", ',', 'default', '=', '100000', ',', 'dest', '=', "'seqs_per_time_update'", ',', 'help', '=', "'specify the number of sequences between time updates. Default is 1e5'", ')', 'parser', '.', 'add_option', '(', "'--conserved_J_residues'", ',', 'type', '=', "'string'", ',', 'default', '=', "'FVW'", ',', 'dest', '=', "'conserved_J_residues'", ',', 'help', '=', '"specify conserved J residues. Default is \'FVW\'."', ')', 'parser', '.', 'add_option', '(', "'--time_updates_off'", ',', 'action', '=', "'store_false'", ',', 'dest', '=', "'time_updates'", ',', 'default', '=', 'True', ',', 'help', '=', "'turn time updates off.'", ')', 'parser', '.', 'add_option', '(', "'--seq_type'", ',', 'type', '=', "'choice'", ',', 'default', '=', "'all'", ',', 'dest', '=', "'seq_type'", ',', 'choices', '=', '[', "'all'", ',', "'ntseq'", ',', "'nucleotide'", ',', "'aaseq'", ',', "'amino_acid'", ']', ',', 'help', '=', '"declare sequence type for output sequences. Choices: \'all\' [default], \'ntseq\', \'nucleotide\', \'aaseq\', \'amino_acid\'"', ')', 'parser', '.', 'add_option', '(', "'--record_genes_off'", ',', 'action', '=', "'store_false'", ',', 'dest', '=', '"record_genes"', ',', 'default', '=', 'True', ',', 'help', '=', "'turn off recording V and J gene info.'", ')', 'parser', '.', 'add_option', '(', "'-d'", ',', "'--delimiter'", ',', 'type', '=', "'choice'", ',', 'dest', '=', "'delimiter'", ',', 'choices', '=', '[', "'tab'", ',', "'space'", ',', "','", ',', "';'", ',', "':'", ']', ',', 'help', '=', '"declare delimiter choice. Default is tab for .tsv output files, comma for .csv files, and tab for all others. Choices: \'tab\', \'space\', \',\', \';\', \':\'"', ')', 'parser', '.', 'add_option', '(', "'--raw_delimiter'", ',', 'type', '=', "'str'", ',', 'dest', '=', "'delimiter'", ',', 'help', '=', '"declare delimiter choice as a raw string."', ')', '(', 'options', ',', 'args', ')', '=', 'parser', '.', 'parse_args', '(', ')', 'main_folder', '=', 'os', '.', 'path', '.', 'dirname', '(', '__file__', ')', 'default_models', '=', '{', '}', 'default_models', '[', "'humanTRA'", ']', '=', '[', 'os', '.', 'path', '.', 'join', '(', 'main_folder', ',', "'default_models'", ',', "'human_T_alpha'", ')', ',', "'VJ'", ']', 'default_models', '[', "'humanTRB'", ']', '=', '[', 'os', '.', 'path', '.', 'join', '(', 'main_folder', ',', "'default_models'", ',', "'human_T_beta'", ')', ',', "'VDJ'", ']', 'default_models', '[', "'mouseTRB'", ']', '=', '[', 'os', '.', 'path', '.', 'join', '(', 'main_folder', ',', "'default_models'", ',', "'mouse_T_beta'", ')', ',', "'VDJ'", ']', 'default_models', '[', "'humanIGH'", ']', '=', '[', 'os', '.', 'path', '.', 'join', '(', 'main_folder', ',', "'default_models'", ',', "'human_B_heavy'", ')', ',', "'VDJ'", ']', 'num_models_specified', '=', 'sum', '(', '[', '1', 'for', 'x', 'in', 'default_models', '.', 'keys', '(', ')', '+', '[', "'vj_model_folder'", ',', "'vdj_model_folder'", ']', 'if', 'getattr', '(', 'options', ',', 'x', ')', ']', ')', 'if', 'num_models_specified', '==', '1', ':', '#exactly one model specified', 'try', ':', 'd_model', '=', '[', 'x', 'for', 'x', 'in', 'default_models', '.', 'keys', '(', ')', 'if', 'getattr', '(', 'options', ',', 'x', ')', ']', '[', '0', ']', 'model_folder', '=', 'default_models', '[', 'd_model', ']', '[', '0', ']', 'recomb_type', '=', 'default_models', '[', 'd_model', ']', '[', '1', ']', 'except', 'IndexError', ':', 'if', 'options', '.', 'vdj_model_folder', ':', '#custom VDJ model specified', 'model_folder', '=', 'options', '.', 'vdj_model_folder', 'recomb_type', '=', "'VDJ'", 'elif', 'options', '.', 'vj_model_folder', ':', '#custom VJ model specified', 'model_folder', '=', 'options', '.', 'vj_model_folder', 'recomb_type', '=', "'VJ'", 'elif', 'num_models_specified', '==', '0', ':', 'print', "'Need to indicate generative model.'", 'print', "'Exiting...'", 'return', '-', '1', 'elif', 'num_models_specified', '>', '1', ':', 'print', "'Only specify one model'", 'print', "'Exiting...'", 'return', '-', '1', '#Check that all model and genomic files exist in the indicated model folder', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'model_folder', ')', ':', 'print', "'Check pathing... cannot find the model folder: '", '+', 'model_folder', 'print', "'Exiting...'", 'return', '-', '1', 'params_file_name', '=', 'os', '.', 'path', '.', 'join', '(', 'model_folder', ',', "'model_params.txt'", ')', 'marginals_file_name', '=', 'os', '.', 'path', '.', 'join', '(', 'model_folder', ',', "'model_marginals.txt'", ')', 'V_anchor_pos_file', '=', 'os', '.', 'path', '.', 'join', '(', 'model_folder', ',', "'V_gene_CDR3_anchors.csv'", ')', 'J_anchor_pos_file', '=', 'os', '.', 'path', '.', 'join', '(', 'model_folder', ',', "'J_gene_CDR3_anchors.csv'", ')', 'for', 'x', 'in', '[', 'params_file_name', ',', 'marginals_file_name', ',', 'V_anchor_pos_file', ',', 'J_anchor_pos_file', ']', ':', 'if', 'not', 'os', '.', 'path', '.', 'isfile', '(', 'x', ')', ':', 'print', "'Cannot find: '", '+', 'x', 'print', "'Please check the files (and naming conventions) in the model folder '", '+', 'model_folder', 'print', "'Exiting...'", 'return', '-', '1', 'if', 'options', '.', 'outfile_name', 'is', 'not', 'None', ':', 'outfile_name', '=', 'options', '.', 'outfile_name', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'outfile_name', ')', ':', 'if', 'not', 'raw_input', '(', 'outfile_name', '+', "' already exists. Overwrite (y/n)? '", ')', '.', 'strip', '(', ')', '.', 'lower', '(', ')', 'in', '[', "'y'", ',', "'yes'", ']', ':', 'print', "'Exiting...'", 'return', '-', '1', '#Parse arguments', 'num_seqs_to_generate', '=', 'int', '(', 'options', '.', 'num_seqs_to_generate', ')', 'if', 'num_seqs_to_generate', '<=', '0', ':', 'print', "'Need to specify num_seqs (number of sequences to generate).'", 'print', "'Exiting...'", 'return', '-', '1', '#Parse default delimiter', 'delimiter', '=', 'options', '.', 'delimiter', 'if', 'delimiter', 'is', 'None', ':', 'delimiter', '=', "'\\t'", 'if', 'options', '.', 'outfile_name', 'is', 'not', 'None', ':', 'if', 'outfile_name', '.', 'endswith', '(', "'.tsv'", ')', ':', 'delimiter', '=', "'\\t'", 'elif', 'outfile_name', '.', 'endswith', '(', "'.csv'", ')', ':', 'delimiter', '=', "','", 'else', ':', 'try', ':', 'delimiter', '=', '{', "'tab'", ':', "'\\t'", ',', "'space'", ':', "' '", ',', "','", ':', "','", ',', "';'", ':', "';'", ',', "':'", ':', "':'", '}', '[', 'delimiter', ']', 'except', 'KeyError', ':', 'pass', '#Other raw string.', '#Optional flags', 'seq_type', '=', '{', "'all'", ':', "'all'", ',', "'ntseq'", ':', "'ntseq'", ',', "'nucleotide'", ':', "'ntseq'", ',', "'aaseq'", ':', "'aaseq'", ',', "'amino_acid'", ':', "'aaseq'", '}', '[', 'options', '.', 'seq_type', ']', 'record_genes', '=', 'options', '.', 'record_genes', 'seqs_per_time_update', '=', 'int', '(', 'options', '.', 'seqs_per_time_update', ')', 'time_updates', '=', 'options', '.', 'time_updates', 'conserved_J_residues', '=', 'options', '.', 'conserved_J_residues', 'if', 'options', '.', 'seed', 'is', 'not', 'None', ':', 'np', '.', 'random', '.', 'seed', '(', 'options', '.', 'seed', ')', '#VDJ recomb case --- used for TCRB and IGH', 'if', 'recomb_type', '==', "'VDJ'", ':', 'genomic_data', '=', 'load_model', '.', 'GenomicDataVDJ', '(', ')', 'genomic_data', '.', 'load_igor_genomic_data', '(', 'params_file_name', ',', 'V_anchor_pos_file', ',', 'J_anchor_pos_file', ')', 'generative_model', '=', 'load_model', '.', 'GenerativeModelVDJ', '(', ')', 'generative_model', '.', 'load_and_process_igor_model', '(', 'marginals_file_name', ')', 'seq_gen', '=', 'sequence_generation', '.', 'SequenceGenerationVDJ', '(', 'generative_model', ',', 'genomic_data', ')', '#VJ recomb case --- used for TCRA and light chain', 'elif', 'recomb_type', '==', "'VJ'", ':', 'genomic_data', '=', 'load_model', '.', 'GenomicDataVJ', '(', ')', 'genomic_data', '.', 'load_igor_genomic_data', '(', 'params_file_name', ',', 'V_anchor_pos_file', ',', 'J_anchor_pos_file', ')', 'generative_model', '=', 'load_model', '.', 'GenerativeModelVJ', '(', ')', 'generative_model', '.', 'load_and_process_igor_model', '(', 'marginals_file_name', ')', 'seq_gen', '=', 'sequence_generation', '.', 'SequenceGenerationVJ', '(', 'generative_model', ',', 'genomic_data', ')', 'V_gene_names', '=', '[', 'V', '[', '0', ']', '.', 'split', '(', "'*'", ')', '[', '0', ']', 'for', 'V', 'in', 'genomic_data', '.', 'genV', ']', 'J_gene_names', '=', '[', 'J', '[', '0', ']', '.', 'split', '(', "'*'", ')', '[', '0', ']', 'for', 'J', 'in', 'genomic_data', '.', 'genJ', ']', 'if', 'options', '.', 'outfile_name', 'is', 'not', 'None', ':', 'outfile', '=', 'open', '(', 'outfile_name', ',', "'w'", ')', 'print', "'Starting sequence generation... '", 'start_time', '=', 'time', '.', 'time', '(', ')', 'for', 'i', 'in', 'range', '(', 'num_seqs_to_generate', ')', ':', 'ntseq', ',', 'aaseq', ',', 'V_in', ',', 'J_in', '=', 'seq_gen', '.', 'gen_rnd_prod_CDR3', '(', 'conserved_J_residues', ')', 'if', 'seq_type', '==', "'all'", ':', '#default, include both ntseq and aaseq', 'current_line_out', '=', 'ntseq', '+', 'delimiter', '+', 'aaseq', 'elif', 'seq_type', '==', "'ntseq'", ':', '#only record ntseq', 'current_line_out', '=', 'ntseq', 'elif', 'seq_type', '==', "'aaseq'", ':', '#only record aaseq', 'current_line_out', '=', 'aaseq', 'if', 'record_genes', ':', 'current_line_out', '+=', 'delimiter', '+', 'V_gene_names', '[', 'V_in', ']', '+', 'delimiter', '+', 'J_gene_names', '[', 'J_in', ']', 'outfile', '.', 'write', '(', 'current_line_out', '+', "'\\n'", ')', 'if', '(', 'i', '+', '1', ')', '%', 'seqs_per_time_update', '==', '0', 'and', 'time_updates', ':', 'c_time', '=', 'time', '.', 'time', '(', ')', '-', 'start_time', 'eta', '=', '(', '(', 'num_seqs_to_generate', '-', '(', 'i', '+', '1', ')', ')', '/', 'float', '(', 'i', '+', '1', ')', ')', '*', 'c_time', 'if', 'c_time', '>', '86400', ':', '#more than a day', 'c_time_str', '=', "'%d days, %d hours, %d minutes, and %.2f seconds.'", '%', '(', 'int', '(', 'c_time', ')', '/', '86400', ',', '(', 'int', '(', 'c_time', ')', '/', '3600', ')', '%', '24', ',', '(', 'int', '(', 'c_time', ')', '/', '60', ')', '%', '60', ',', 'c_time', '%', '60', ')', 'elif', 'c_time', '>', '3600', ':', '#more than an hr', 'c_time_str', '=', "'%d hours, %d minutes, and %.2f seconds.'", '%', '(', '(', 'int', '(', 'c_time', ')', '/', '3600', ')', '%', '24', ',', '(', 'int', '(', 'c_time', ')', '/', '60', ')', '%', '60', ',', 'c_time', '%', '60', ')', 'elif', 'c_time', '>', '60', ':', '#more than a min', 'c_time_str', '=', "'%d minutes and %.2f seconds.'", '%', '(', '(', 'int', '(', 'c_time', ')', '/', '60', ')', '%', '60', ',', 'c_time', '%', '60', ')', 'else', ':', 'c_time_str', '=', "'%.2f seconds.'", '%', '(', 'c_time', ')', 'if', 'eta', '>', '86400', ':', '#more than a day', 'eta_str', '=', "'%d days, %d hours, %d minutes, and %.2f seconds.'", '%', '(', 'int', '(', 'eta', ')', '/', '86400', ',', '(', 'int', '(', 'eta', ')', '/', '3600', ')', '%', '24', ',', '(', 'int', '(', 'eta', ')', '/', '60', ')', '%', '60', ',', 'eta', '%', '60', ')', 'elif', 'eta', '>', '3600', ':', '#more than an hr', 'eta_str', '=', "'%d hours, %d minutes, and %.2f seconds.'", '%', '(', '(', 'int', '(', 'eta', ')', '/', '3600', ')', '%', '24', ',', '(', 'int', '(', 'eta', ')', '/', '60', ')', '%', '60', ',', 'eta', '%', '60', ')', 'elif', 'eta', '>', '60', ':', '#more than a min', 'eta_str', '=', "'%d minutes and %.2f seconds.'", '%', '(', '(', 'int', '(', 'eta', ')', '/', '60', ')', '%', '60', ',', 'eta', '%', '60', ')', 'else', ':', 'eta_str', '=', "'%.2f seconds.'", '%', '(', 'eta', ')', 'print', "'%d sequences generated in %s Estimated time remaining: %s'", '%', '(', 'i', '+', '1', ',', 'c_time_str', ',', 'eta_str', ')', 'c_time', '=', 'time', '.', 'time', '(', ')', '-', 'start_time', 'if', 'c_time', '>', '86400', ':', '#more than a day', 'c_time_str', '=', "'%d days, %d hours, %d minutes, and %.2f seconds.'", '%', '(', 'int', '(', 'c_time', ')', '/', '86400', ',', '(', 'int', '(', 'c_time', ')', '/', '3600', ')', '%', '24', ',', '(', 'int', '(', 'c_time', ')', '/', '60', ')', '%', '60', ',', 'c_time', '%', '60', ')', 'elif', 'c_time', '>', '3600', ':', '#more than an hr', 'c_time_str', '=', "'%d hours, %d minutes, and %.2f seconds.'", '%', '(', '(', 'int', '(', 'c_time', ')', '/', '3600', ')', '%', '24', ',', '(', 'int', '(', 'c_time', ')', '/', '60', ')', '%', '60', ',', 'c_time', '%', '60', ')', 'elif', 'c_time', '>', '60', ':', '#more than a min', 'c_time_str', '=', "'%d minutes and %.2f seconds.'", '%', '(', '(', 'int', '(', 'c_time', ')', '/', '60', ')', '%', '60', ',', 'c_time', '%', '60', ')', 'else', ':', 'c_time_str', '=', "'%.2f seconds.'", '%', '(', 'c_time', ')', 'print', "'Completed generating all %d sequences in %s'", '%', '(', 'num_seqs_to_generate', ',', 'c_time_str', ')', 'outfile', '.', 'close', '(', ')', 'else', ':', '#print to stdout', 'for', 'i', 'in', 'range', '(', 'num_seqs_to_generate', ')', ':', 'ntseq', ',', 'aaseq', ',', 'V_in', ',', 'J_in', '=', 'seq_gen', '.', 'gen_rnd_prod_CDR3', '(', 'conserved_J_residues', ')', 'if', 'seq_type', '==', "'all'", ':', '#default, include both ntseq and aaseq', 'current_line_out', '=', 'ntseq', '+', 'delimiter', '+', 'aaseq', 'elif', 'seq_type', '==', "'ntseq'", ':', '#only record ntseq', 'current_line_out', '=', 'ntseq', 'elif', 'seq_type', '==', "'aaseq'", ':', '#only record aaseq', 'current_line_out', '=', 'aaseq', 'if', 'record_genes', ':', 'current_line_out', '+=', 'delimiter', '+', 'V_gene_names', '[', 'V_in', ']', '+', 'delimiter', '+', 'J_gene_names', '[', 'J_in', ']', 'print', 'current_line_out']
Generate sequences.
['Generate', 'sequences', '.']
train
https://github.com/zsethna/OLGA/blob/e825c333f0f9a4eb02132e0bcf86f0dca9123114/olga/generate_sequences.py#L144-L345
1,390
tyarkoni/pliers
pliers/diagnostics/diagnostics.py
Diagnostics.flag_all
def flag_all(self, thresh_dict=None, include=None, exclude=None): ''' Returns indices of (rows, columns) that satisfy flag() on any diagnostic. Uses user-provided thresholds in thresh_dict/ Args: thresh_dict (dict): dictionary of diagnostic->threshold functions include (list): optional sublist of diagnostics to flag exclude (list): optional sublist of diagnostics to not flag ''' if thresh_dict is None: thresh_dict = {} row_idx = set() col_idx = set() include = self.results if include is None else include include = list( set(include) - set(exclude)) if exclude is not None else include for diagnostic in include: if diagnostic in thresh_dict: flagged = self.flag(diagnostic, thresh_dict[diagnostic]) else: flagged = self.flag(diagnostic) if diagnostic == 'RowMahalanobisDistances': row_idx = row_idx.union(flagged) else: col_idx = col_idx.union(flagged) return sorted(list(row_idx)), sorted(list(col_idx))
python
def flag_all(self, thresh_dict=None, include=None, exclude=None): ''' Returns indices of (rows, columns) that satisfy flag() on any diagnostic. Uses user-provided thresholds in thresh_dict/ Args: thresh_dict (dict): dictionary of diagnostic->threshold functions include (list): optional sublist of diagnostics to flag exclude (list): optional sublist of diagnostics to not flag ''' if thresh_dict is None: thresh_dict = {} row_idx = set() col_idx = set() include = self.results if include is None else include include = list( set(include) - set(exclude)) if exclude is not None else include for diagnostic in include: if diagnostic in thresh_dict: flagged = self.flag(diagnostic, thresh_dict[diagnostic]) else: flagged = self.flag(diagnostic) if diagnostic == 'RowMahalanobisDistances': row_idx = row_idx.union(flagged) else: col_idx = col_idx.union(flagged) return sorted(list(row_idx)), sorted(list(col_idx))
['def', 'flag_all', '(', 'self', ',', 'thresh_dict', '=', 'None', ',', 'include', '=', 'None', ',', 'exclude', '=', 'None', ')', ':', 'if', 'thresh_dict', 'is', 'None', ':', 'thresh_dict', '=', '{', '}', 'row_idx', '=', 'set', '(', ')', 'col_idx', '=', 'set', '(', ')', 'include', '=', 'self', '.', 'results', 'if', 'include', 'is', 'None', 'else', 'include', 'include', '=', 'list', '(', 'set', '(', 'include', ')', '-', 'set', '(', 'exclude', ')', ')', 'if', 'exclude', 'is', 'not', 'None', 'else', 'include', 'for', 'diagnostic', 'in', 'include', ':', 'if', 'diagnostic', 'in', 'thresh_dict', ':', 'flagged', '=', 'self', '.', 'flag', '(', 'diagnostic', ',', 'thresh_dict', '[', 'diagnostic', ']', ')', 'else', ':', 'flagged', '=', 'self', '.', 'flag', '(', 'diagnostic', ')', 'if', 'diagnostic', '==', "'RowMahalanobisDistances'", ':', 'row_idx', '=', 'row_idx', '.', 'union', '(', 'flagged', ')', 'else', ':', 'col_idx', '=', 'col_idx', '.', 'union', '(', 'flagged', ')', 'return', 'sorted', '(', 'list', '(', 'row_idx', ')', ')', ',', 'sorted', '(', 'list', '(', 'col_idx', ')', ')']
Returns indices of (rows, columns) that satisfy flag() on any diagnostic. Uses user-provided thresholds in thresh_dict/ Args: thresh_dict (dict): dictionary of diagnostic->threshold functions include (list): optional sublist of diagnostics to flag exclude (list): optional sublist of diagnostics to not flag
['Returns', 'indices', 'of', '(', 'rows', 'columns', ')', 'that', 'satisfy', 'flag', '()', 'on', 'any', 'diagnostic', '.', 'Uses', 'user', '-', 'provided', 'thresholds', 'in', 'thresh_dict', '/']
train
https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/diagnostics/diagnostics.py#L186-L214
1,391
contentful/contentful-management.py
contentful_management/space.py
Space.create_attributes
def create_attributes(klass, attributes, previous_object=None): """Attributes for space creation.""" if previous_object is not None: return {'name': attributes.get('name', previous_object.name)} return { 'name': attributes.get('name', ''), 'defaultLocale': attributes['default_locale'] }
python
def create_attributes(klass, attributes, previous_object=None): """Attributes for space creation.""" if previous_object is not None: return {'name': attributes.get('name', previous_object.name)} return { 'name': attributes.get('name', ''), 'defaultLocale': attributes['default_locale'] }
['def', 'create_attributes', '(', 'klass', ',', 'attributes', ',', 'previous_object', '=', 'None', ')', ':', 'if', 'previous_object', 'is', 'not', 'None', ':', 'return', '{', "'name'", ':', 'attributes', '.', 'get', '(', "'name'", ',', 'previous_object', '.', 'name', ')', '}', 'return', '{', "'name'", ':', 'attributes', '.', 'get', '(', "'name'", ',', "''", ')', ',', "'defaultLocale'", ':', 'attributes', '[', "'default_locale'", ']', '}']
Attributes for space creation.
['Attributes', 'for', 'space', 'creation', '.']
train
https://github.com/contentful/contentful-management.py/blob/707dd30883b98a10c7ff0f7f5bdb8edbdc1d8df0/contentful_management/space.py#L54-L62
1,392
inveniosoftware/invenio-files-rest
invenio_files_rest/models.py
MultipartObject.merge_parts
def merge_parts(self, version_id=None, **kwargs): """Merge parts into object version.""" self.file.update_checksum(**kwargs) with db.session.begin_nested(): obj = ObjectVersion.create( self.bucket, self.key, _file_id=self.file_id, version_id=version_id ) self.delete() return obj
python
def merge_parts(self, version_id=None, **kwargs): """Merge parts into object version.""" self.file.update_checksum(**kwargs) with db.session.begin_nested(): obj = ObjectVersion.create( self.bucket, self.key, _file_id=self.file_id, version_id=version_id ) self.delete() return obj
['def', 'merge_parts', '(', 'self', ',', 'version_id', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'self', '.', 'file', '.', 'update_checksum', '(', '*', '*', 'kwargs', ')', 'with', 'db', '.', 'session', '.', 'begin_nested', '(', ')', ':', 'obj', '=', 'ObjectVersion', '.', 'create', '(', 'self', '.', 'bucket', ',', 'self', '.', 'key', ',', '_file_id', '=', 'self', '.', 'file_id', ',', 'version_id', '=', 'version_id', ')', 'self', '.', 'delete', '(', ')', 'return', 'obj']
Merge parts into object version.
['Merge', 'parts', 'into', 'object', 'version', '.']
train
https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/models.py#L1490-L1501
1,393
fabioz/PyDev.Debugger
third_party/pep8/pycodestyle.py
whitespace_around_comma
def whitespace_around_comma(logical_line): r"""Avoid extraneous whitespace after a comma or a colon. Note: these checks are disabled by default Okay: a = (1, 2) E241: a = (1, 2) E242: a = (1,\t2) """ line = logical_line for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line): found = m.start() + 1 if '\t' in m.group(): yield found, "E242 tab after '%s'" % m.group()[0] else: yield found, "E241 multiple spaces after '%s'" % m.group()[0]
python
def whitespace_around_comma(logical_line): r"""Avoid extraneous whitespace after a comma or a colon. Note: these checks are disabled by default Okay: a = (1, 2) E241: a = (1, 2) E242: a = (1,\t2) """ line = logical_line for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line): found = m.start() + 1 if '\t' in m.group(): yield found, "E242 tab after '%s'" % m.group()[0] else: yield found, "E241 multiple spaces after '%s'" % m.group()[0]
['def', 'whitespace_around_comma', '(', 'logical_line', ')', ':', 'line', '=', 'logical_line', 'for', 'm', 'in', 'WHITESPACE_AFTER_COMMA_REGEX', '.', 'finditer', '(', 'line', ')', ':', 'found', '=', 'm', '.', 'start', '(', ')', '+', '1', 'if', "'\\t'", 'in', 'm', '.', 'group', '(', ')', ':', 'yield', 'found', ',', '"E242 tab after \'%s\'"', '%', 'm', '.', 'group', '(', ')', '[', '0', ']', 'else', ':', 'yield', 'found', ',', '"E241 multiple spaces after \'%s\'"', '%', 'm', '.', 'group', '(', ')', '[', '0', ']']
r"""Avoid extraneous whitespace after a comma or a colon. Note: these checks are disabled by default Okay: a = (1, 2) E241: a = (1, 2) E242: a = (1,\t2)
['r', 'Avoid', 'extraneous', 'whitespace', 'after', 'a', 'comma', 'or', 'a', 'colon', '.']
train
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/third_party/pep8/pycodestyle.py#L791-L806
1,394
Accelize/pycosio
pycosio/storage/s3.py
_S3System._remove
def _remove(self, client_kwargs): """ Remove an object. args: client_kwargs (dict): Client arguments. """ with _handle_client_error(): # Object if 'Key' in client_kwargs: return self.client.delete_object(**client_kwargs) # Bucket return self.client.delete_bucket(Bucket=client_kwargs['Bucket'])
python
def _remove(self, client_kwargs): """ Remove an object. args: client_kwargs (dict): Client arguments. """ with _handle_client_error(): # Object if 'Key' in client_kwargs: return self.client.delete_object(**client_kwargs) # Bucket return self.client.delete_bucket(Bucket=client_kwargs['Bucket'])
['def', '_remove', '(', 'self', ',', 'client_kwargs', ')', ':', 'with', '_handle_client_error', '(', ')', ':', '# Object', 'if', "'Key'", 'in', 'client_kwargs', ':', 'return', 'self', '.', 'client', '.', 'delete_object', '(', '*', '*', 'client_kwargs', ')', '# Bucket', 'return', 'self', '.', 'client', '.', 'delete_bucket', '(', 'Bucket', '=', 'client_kwargs', '[', "'Bucket'", ']', ')']
Remove an object. args: client_kwargs (dict): Client arguments.
['Remove', 'an', 'object', '.']
train
https://github.com/Accelize/pycosio/blob/1cc1f8fdf5394d92918b7bae2bfa682169ccc48c/pycosio/storage/s3.py#L247-L260
1,395
aws/sagemaker-containers
src/sagemaker_containers/_env.py
read_hyperparameters
def read_hyperparameters(): # type: () -> dict """Read the hyperparameters from /opt/ml/input/config/hyperparameters.json. For more information about hyperparameters.json: https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html#your-algorithms-training-algo-running-container-hyperparameters Returns: (dict[string, object]): a dictionary containing the hyperparameters. """ hyperparameters = _read_json(hyperparameters_file_dir) deserialized_hps = {} for k, v in hyperparameters.items(): try: v = json.loads(v) except (ValueError, TypeError): logger.info("Failed to parse hyperparameter %s value %s to Json.\n" "Returning the value itself", k, v) deserialized_hps[k] = v return deserialized_hps
python
def read_hyperparameters(): # type: () -> dict """Read the hyperparameters from /opt/ml/input/config/hyperparameters.json. For more information about hyperparameters.json: https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html#your-algorithms-training-algo-running-container-hyperparameters Returns: (dict[string, object]): a dictionary containing the hyperparameters. """ hyperparameters = _read_json(hyperparameters_file_dir) deserialized_hps = {} for k, v in hyperparameters.items(): try: v = json.loads(v) except (ValueError, TypeError): logger.info("Failed to parse hyperparameter %s value %s to Json.\n" "Returning the value itself", k, v) deserialized_hps[k] = v return deserialized_hps
['def', 'read_hyperparameters', '(', ')', ':', '# type: () -> dict', 'hyperparameters', '=', '_read_json', '(', 'hyperparameters_file_dir', ')', 'deserialized_hps', '=', '{', '}', 'for', 'k', ',', 'v', 'in', 'hyperparameters', '.', 'items', '(', ')', ':', 'try', ':', 'v', '=', 'json', '.', 'loads', '(', 'v', ')', 'except', '(', 'ValueError', ',', 'TypeError', ')', ':', 'logger', '.', 'info', '(', '"Failed to parse hyperparameter %s value %s to Json.\\n"', '"Returning the value itself"', ',', 'k', ',', 'v', ')', 'deserialized_hps', '[', 'k', ']', '=', 'v', 'return', 'deserialized_hps']
Read the hyperparameters from /opt/ml/input/config/hyperparameters.json. For more information about hyperparameters.json: https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html#your-algorithms-training-algo-running-container-hyperparameters Returns: (dict[string, object]): a dictionary containing the hyperparameters.
['Read', 'the', 'hyperparameters', 'from', '/', 'opt', '/', 'ml', '/', 'input', '/', 'config', '/', 'hyperparameters', '.', 'json', '.']
train
https://github.com/aws/sagemaker-containers/blob/0030f07abbaf22a55d986d97274d7a8d1aa1f10c/src/sagemaker_containers/_env.py#L197-L219
1,396
googledatalab/pydatalab
solutionbox/ml_workbench/xgboost/trainer/feature_transforms.py
get_transformed_feature_indices
def get_transformed_feature_indices(features, stats): """Returns information about the transformed features. Returns: List in the from [(transformed_feature_name, {size: int, index_start: int})] """ feature_indices = [] index_start = 1 for name, transform in sorted(six.iteritems(features)): transform_name = transform['transform'] source_column = transform['source_column'] info = {} if transform_name in [IDENTITY_TRANSFORM, SCALE_TRANSFORM]: info['size'] = 1 elif transform_name in [ONE_HOT_TRANSFORM, MULTI_HOT_TRANSFORM]: info['size'] = stats['column_stats'][source_column]['vocab_size'] elif transform_name == IMAGE_TRANSFORM: info['size'] = IMAGE_BOTTLENECK_TENSOR_SIZE elif transform_name == TARGET_TRANSFORM: info['size'] = 0 else: raise ValueError('xgboost does not support transform "%s"' % transform) info['index_start'] = index_start index_start += info['size'] feature_indices.append((name, info)) return feature_indices
python
def get_transformed_feature_indices(features, stats): """Returns information about the transformed features. Returns: List in the from [(transformed_feature_name, {size: int, index_start: int})] """ feature_indices = [] index_start = 1 for name, transform in sorted(six.iteritems(features)): transform_name = transform['transform'] source_column = transform['source_column'] info = {} if transform_name in [IDENTITY_TRANSFORM, SCALE_TRANSFORM]: info['size'] = 1 elif transform_name in [ONE_HOT_TRANSFORM, MULTI_HOT_TRANSFORM]: info['size'] = stats['column_stats'][source_column]['vocab_size'] elif transform_name == IMAGE_TRANSFORM: info['size'] = IMAGE_BOTTLENECK_TENSOR_SIZE elif transform_name == TARGET_TRANSFORM: info['size'] = 0 else: raise ValueError('xgboost does not support transform "%s"' % transform) info['index_start'] = index_start index_start += info['size'] feature_indices.append((name, info)) return feature_indices
['def', 'get_transformed_feature_indices', '(', 'features', ',', 'stats', ')', ':', 'feature_indices', '=', '[', ']', 'index_start', '=', '1', 'for', 'name', ',', 'transform', 'in', 'sorted', '(', 'six', '.', 'iteritems', '(', 'features', ')', ')', ':', 'transform_name', '=', 'transform', '[', "'transform'", ']', 'source_column', '=', 'transform', '[', "'source_column'", ']', 'info', '=', '{', '}', 'if', 'transform_name', 'in', '[', 'IDENTITY_TRANSFORM', ',', 'SCALE_TRANSFORM', ']', ':', 'info', '[', "'size'", ']', '=', '1', 'elif', 'transform_name', 'in', '[', 'ONE_HOT_TRANSFORM', ',', 'MULTI_HOT_TRANSFORM', ']', ':', 'info', '[', "'size'", ']', '=', 'stats', '[', "'column_stats'", ']', '[', 'source_column', ']', '[', "'vocab_size'", ']', 'elif', 'transform_name', '==', 'IMAGE_TRANSFORM', ':', 'info', '[', "'size'", ']', '=', 'IMAGE_BOTTLENECK_TENSOR_SIZE', 'elif', 'transform_name', '==', 'TARGET_TRANSFORM', ':', 'info', '[', "'size'", ']', '=', '0', 'else', ':', 'raise', 'ValueError', '(', '\'xgboost does not support transform "%s"\'', '%', 'transform', ')', 'info', '[', "'index_start'", ']', '=', 'index_start', 'index_start', '+=', 'info', '[', "'size'", ']', 'feature_indices', '.', 'append', '(', '(', 'name', ',', 'info', ')', ')', 'return', 'feature_indices']
Returns information about the transformed features. Returns: List in the from [(transformed_feature_name, {size: int, index_start: int})]
['Returns', 'information', 'about', 'the', 'transformed', 'features', '.']
train
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/ml_workbench/xgboost/trainer/feature_transforms.py#L415-L444
1,397
angr/angr
angr/state_plugins/heap/heap_ptmalloc.py
SimHeapPTMalloc._find_bck
def _find_bck(self, chunk): """ Simply finds the free chunk that would be the backwards chunk relative to the chunk at ptr. Hence, the free head and all other metadata are unaltered by this function. """ cur = self.free_head_chunk if cur is None: return None fwd = cur.fwd_chunk() if cur == fwd: return cur # At this point there should be at least two free chunks in the heap if cur < chunk: while cur < fwd < chunk: cur = fwd fwd = cur.fwd_chunk() return cur else: while fwd != self.free_head_chunk: cur = fwd fwd = cur.fwd_chunk() return cur
python
def _find_bck(self, chunk): """ Simply finds the free chunk that would be the backwards chunk relative to the chunk at ptr. Hence, the free head and all other metadata are unaltered by this function. """ cur = self.free_head_chunk if cur is None: return None fwd = cur.fwd_chunk() if cur == fwd: return cur # At this point there should be at least two free chunks in the heap if cur < chunk: while cur < fwd < chunk: cur = fwd fwd = cur.fwd_chunk() return cur else: while fwd != self.free_head_chunk: cur = fwd fwd = cur.fwd_chunk() return cur
['def', '_find_bck', '(', 'self', ',', 'chunk', ')', ':', 'cur', '=', 'self', '.', 'free_head_chunk', 'if', 'cur', 'is', 'None', ':', 'return', 'None', 'fwd', '=', 'cur', '.', 'fwd_chunk', '(', ')', 'if', 'cur', '==', 'fwd', ':', 'return', 'cur', '# At this point there should be at least two free chunks in the heap', 'if', 'cur', '<', 'chunk', ':', 'while', 'cur', '<', 'fwd', '<', 'chunk', ':', 'cur', '=', 'fwd', 'fwd', '=', 'cur', '.', 'fwd_chunk', '(', ')', 'return', 'cur', 'else', ':', 'while', 'fwd', '!=', 'self', '.', 'free_head_chunk', ':', 'cur', '=', 'fwd', 'fwd', '=', 'cur', '.', 'fwd_chunk', '(', ')', 'return', 'cur']
Simply finds the free chunk that would be the backwards chunk relative to the chunk at ptr. Hence, the free head and all other metadata are unaltered by this function.
['Simply', 'finds', 'the', 'free', 'chunk', 'that', 'would', 'be', 'the', 'backwards', 'chunk', 'relative', 'to', 'the', 'chunk', 'at', 'ptr', '.', 'Hence', 'the', 'free', 'head', 'and', 'all', 'other', 'metadata', 'are', 'unaltered', 'by', 'this', 'function', '.']
train
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/heap/heap_ptmalloc.py#L281-L302
1,398
tomnor/channelpack
channelpack/pulltxt.py
PatternPull.file_rows
def file_rows(self, fo): """Return the lines in the file as a list. fo is the open file object.""" rows = [] for i in range(NUMROWS): line = fo.readline() if not line: break rows += [line] return rows
python
def file_rows(self, fo): """Return the lines in the file as a list. fo is the open file object.""" rows = [] for i in range(NUMROWS): line = fo.readline() if not line: break rows += [line] return rows
['def', 'file_rows', '(', 'self', ',', 'fo', ')', ':', 'rows', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'NUMROWS', ')', ':', 'line', '=', 'fo', '.', 'readline', '(', ')', 'if', 'not', 'line', ':', 'break', 'rows', '+=', '[', 'line', ']', 'return', 'rows']
Return the lines in the file as a list. fo is the open file object.
['Return', 'the', 'lines', 'in', 'the', 'file', 'as', 'a', 'list', '.']
train
https://github.com/tomnor/channelpack/blob/9ad3cd11c698aed4c0fc178385b2ba38a7d0efae/channelpack/pulltxt.py#L101-L113
1,399
larryng/narwal
narwal/reddit.py
Reddit.flair
def flair(self, r, name, text, css_class): """Login required. Sets flair for a user. See https://github.com/reddit/reddit/wiki/API%3A-flair. Returns True or raises :class:`exceptions.UnexpectedResponse` if non-"truthy" value in response. URL: ``http://www.reddit.com/api/flair`` :param r: name of subreddit :param name: name of the user :param text: flair text to assign :param css_class: CSS class to assign to flair text """ data = dict(r=r, name=name, text=text, css_class=css_class) j = self.post('api', 'flair', data=data) return assert_truthy(j)
python
def flair(self, r, name, text, css_class): """Login required. Sets flair for a user. See https://github.com/reddit/reddit/wiki/API%3A-flair. Returns True or raises :class:`exceptions.UnexpectedResponse` if non-"truthy" value in response. URL: ``http://www.reddit.com/api/flair`` :param r: name of subreddit :param name: name of the user :param text: flair text to assign :param css_class: CSS class to assign to flair text """ data = dict(r=r, name=name, text=text, css_class=css_class) j = self.post('api', 'flair', data=data) return assert_truthy(j)
['def', 'flair', '(', 'self', ',', 'r', ',', 'name', ',', 'text', ',', 'css_class', ')', ':', 'data', '=', 'dict', '(', 'r', '=', 'r', ',', 'name', '=', 'name', ',', 'text', '=', 'text', ',', 'css_class', '=', 'css_class', ')', 'j', '=', 'self', '.', 'post', '(', "'api'", ',', "'flair'", ',', 'data', '=', 'data', ')', 'return', 'assert_truthy', '(', 'j', ')']
Login required. Sets flair for a user. See https://github.com/reddit/reddit/wiki/API%3A-flair. Returns True or raises :class:`exceptions.UnexpectedResponse` if non-"truthy" value in response. URL: ``http://www.reddit.com/api/flair`` :param r: name of subreddit :param name: name of the user :param text: flair text to assign :param css_class: CSS class to assign to flair text
['Login', 'required', '.', 'Sets', 'flair', 'for', 'a', 'user', '.', 'See', 'https', ':', '//', 'github', '.', 'com', '/', 'reddit', '/', 'reddit', '/', 'wiki', '/', 'API%3A', '-', 'flair', '.', 'Returns', 'True', 'or', 'raises', ':', 'class', ':', 'exceptions', '.', 'UnexpectedResponse', 'if', 'non', '-', 'truthy', 'value', 'in', 'response', '.', 'URL', ':', 'http', ':', '//', 'www', '.', 'reddit', '.', 'com', '/', 'api', '/', 'flair', ':', 'param', 'r', ':', 'name', 'of', 'subreddit', ':', 'param', 'name', ':', 'name', 'of', 'the', 'user', ':', 'param', 'text', ':', 'flair', 'text', 'to', 'assign', ':', 'param', 'css_class', ':', 'CSS', 'class', 'to', 'assign', 'to', 'flair', 'text']
train
https://github.com/larryng/narwal/blob/58c409a475c8ed865579a61d7010162ed8cef597/narwal/reddit.py#L877-L889