Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
9,400 | bxlab/bx-python | lib/bx/seq/seq.py | SeqFile.get | def get(self, start, length):
"""
Fetch subsequence starting at position `start` with length `length`.
This method is picky about parameters, the requested interval must
have non-negative length and fit entirely inside the NIB sequence,
the returned string will contain exactly 'length' characters, or an
AssertionError will be generated.
"""
# Check parameters
assert length >= 0, "Length must be non-negative (got %d)" % length
assert start >= 0,"Start must be greater than 0 (got %d)" % start
assert start + length <= self.length, \
"Interval beyond end of sequence (%s..%s > %s)" % ( start, start + length, self.length )
# Fetch sequence and reverse complement if necesary
if not self.revcomp:
return self.raw_fetch( start, length )
if self.revcomp == "-3'":
return self.reverse_complement(self.raw_fetch(start,length))
assert self.revcomp == "-5'", "unrecognized reverse complement scheme"
start = self.length - (start+length)
return self.reverse_complement(self.raw_fetch(start,length)) | python | def get(self, start, length):
"""
Fetch subsequence starting at position `start` with length `length`.
This method is picky about parameters, the requested interval must
have non-negative length and fit entirely inside the NIB sequence,
the returned string will contain exactly 'length' characters, or an
AssertionError will be generated.
"""
# Check parameters
assert length >= 0, "Length must be non-negative (got %d)" % length
assert start >= 0,"Start must be greater than 0 (got %d)" % start
assert start + length <= self.length, \
"Interval beyond end of sequence (%s..%s > %s)" % ( start, start + length, self.length )
# Fetch sequence and reverse complement if necesary
if not self.revcomp:
return self.raw_fetch( start, length )
if self.revcomp == "-3'":
return self.reverse_complement(self.raw_fetch(start,length))
assert self.revcomp == "-5'", "unrecognized reverse complement scheme"
start = self.length - (start+length)
return self.reverse_complement(self.raw_fetch(start,length)) | ['def', 'get', '(', 'self', ',', 'start', ',', 'length', ')', ':', '# Check parameters', 'assert', 'length', '>=', '0', ',', '"Length must be non-negative (got %d)"', '%', 'length', 'assert', 'start', '>=', '0', ',', '"Start must be greater than 0 (got %d)"', '%', 'start', 'assert', 'start', '+', 'length', '<=', 'self', '.', 'length', ',', '"Interval beyond end of sequence (%s..%s > %s)"', '%', '(', 'start', ',', 'start', '+', 'length', ',', 'self', '.', 'length', ')', '# Fetch sequence and reverse complement if necesary', 'if', 'not', 'self', '.', 'revcomp', ':', 'return', 'self', '.', 'raw_fetch', '(', 'start', ',', 'length', ')', 'if', 'self', '.', 'revcomp', '==', '"-3\'"', ':', 'return', 'self', '.', 'reverse_complement', '(', 'self', '.', 'raw_fetch', '(', 'start', ',', 'length', ')', ')', 'assert', 'self', '.', 'revcomp', '==', '"-5\'"', ',', '"unrecognized reverse complement scheme"', 'start', '=', 'self', '.', 'length', '-', '(', 'start', '+', 'length', ')', 'return', 'self', '.', 'reverse_complement', '(', 'self', '.', 'raw_fetch', '(', 'start', ',', 'length', ')', ')'] | Fetch subsequence starting at position `start` with length `length`.
This method is picky about parameters, the requested interval must
have non-negative length and fit entirely inside the NIB sequence,
the returned string will contain exactly 'length' characters, or an
AssertionError will be generated. | ['Fetch', 'subsequence', 'starting', 'at', 'position', 'start', 'with', 'length', 'length', '.', 'This', 'method', 'is', 'picky', 'about', 'parameters', 'the', 'requested', 'interval', 'must', 'have', 'non', '-', 'negative', 'length', 'and', 'fit', 'entirely', 'inside', 'the', 'NIB', 'sequence', 'the', 'returned', 'string', 'will', 'contain', 'exactly', 'length', 'characters', 'or', 'an', 'AssertionError', 'will', 'be', 'generated', '.'] | train | https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/seq/seq.py#L74-L94 |
9,401 | sosy-lab/benchexec | benchexec/check_cgroups.py | check_cgroup_availability_in_thread | def check_cgroup_availability_in_thread(options):
"""
Run check_cgroup_availability() in a separate thread to detect the following problem:
If "cgexec --sticky" is used to tell cgrulesengd to not interfere
with our child processes, the sticky flag unfortunately works only
for processes spawned by the main thread, not those spawned by other threads
(and this will happen if "benchexec -N" is used).
"""
thread = _CheckCgroupsThread(options)
thread.start()
thread.join()
if thread.error:
raise thread.error | python | def check_cgroup_availability_in_thread(options):
"""
Run check_cgroup_availability() in a separate thread to detect the following problem:
If "cgexec --sticky" is used to tell cgrulesengd to not interfere
with our child processes, the sticky flag unfortunately works only
for processes spawned by the main thread, not those spawned by other threads
(and this will happen if "benchexec -N" is used).
"""
thread = _CheckCgroupsThread(options)
thread.start()
thread.join()
if thread.error:
raise thread.error | ['def', 'check_cgroup_availability_in_thread', '(', 'options', ')', ':', 'thread', '=', '_CheckCgroupsThread', '(', 'options', ')', 'thread', '.', 'start', '(', ')', 'thread', '.', 'join', '(', ')', 'if', 'thread', '.', 'error', ':', 'raise', 'thread', '.', 'error'] | Run check_cgroup_availability() in a separate thread to detect the following problem:
If "cgexec --sticky" is used to tell cgrulesengd to not interfere
with our child processes, the sticky flag unfortunately works only
for processes spawned by the main thread, not those spawned by other threads
(and this will happen if "benchexec -N" is used). | ['Run', 'check_cgroup_availability', '()', 'in', 'a', 'separate', 'thread', 'to', 'detect', 'the', 'following', 'problem', ':', 'If', 'cgexec', '--', 'sticky', 'is', 'used', 'to', 'tell', 'cgrulesengd', 'to', 'not', 'interfere', 'with', 'our', 'child', 'processes', 'the', 'sticky', 'flag', 'unfortunately', 'works', 'only', 'for', 'processes', 'spawned', 'by', 'the', 'main', 'thread', 'not', 'those', 'spawned', 'by', 'other', 'threads', '(', 'and', 'this', 'will', 'happen', 'if', 'benchexec', '-', 'N', 'is', 'used', ')', '.'] | train | https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/check_cgroups.py#L83-L95 |
9,402 | f3at/feat | src/feat/extern/log/log.py | Loggable.warning | def warning(self, *args):
"""Log a warning. Used for non-fatal problems."""
if _canShortcutLogging(self.logCategory, WARN):
return
warningObject(self.logObjectName(), self.logCategory,
*self.logFunction(*args)) | python | def warning(self, *args):
"""Log a warning. Used for non-fatal problems."""
if _canShortcutLogging(self.logCategory, WARN):
return
warningObject(self.logObjectName(), self.logCategory,
*self.logFunction(*args)) | ['def', 'warning', '(', 'self', ',', '*', 'args', ')', ':', 'if', '_canShortcutLogging', '(', 'self', '.', 'logCategory', ',', 'WARN', ')', ':', 'return', 'warningObject', '(', 'self', '.', 'logObjectName', '(', ')', ',', 'self', '.', 'logCategory', ',', '*', 'self', '.', 'logFunction', '(', '*', 'args', ')', ')'] | Log a warning. Used for non-fatal problems. | ['Log', 'a', 'warning', '.', 'Used', 'for', 'non', '-', 'fatal', 'problems', '.'] | train | https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/extern/log/log.py#L805-L810 |
9,403 | tensorflow/datasets | tensorflow_datasets/core/download/kaggle.py | KaggleCompetitionDownloader.download_file | def download_file(self, fname, output_dir):
"""Downloads competition file to output_dir."""
if fname not in self.competition_files: # pylint: disable=unsupported-membership-test
raise ValueError("%s is not one of the competition's "
"files: %s" % (fname, self.competition_files))
command = [
"kaggle",
"competitions",
"download",
"--file",
fname,
"--path",
output_dir,
"-c",
self._competition_name,
]
_run_kaggle_command(command, self._competition_name)
return os.path.join(output_dir, fname) | python | def download_file(self, fname, output_dir):
"""Downloads competition file to output_dir."""
if fname not in self.competition_files: # pylint: disable=unsupported-membership-test
raise ValueError("%s is not one of the competition's "
"files: %s" % (fname, self.competition_files))
command = [
"kaggle",
"competitions",
"download",
"--file",
fname,
"--path",
output_dir,
"-c",
self._competition_name,
]
_run_kaggle_command(command, self._competition_name)
return os.path.join(output_dir, fname) | ['def', 'download_file', '(', 'self', ',', 'fname', ',', 'output_dir', ')', ':', 'if', 'fname', 'not', 'in', 'self', '.', 'competition_files', ':', '# pylint: disable=unsupported-membership-test', 'raise', 'ValueError', '(', '"%s is not one of the competition\'s "', '"files: %s"', '%', '(', 'fname', ',', 'self', '.', 'competition_files', ')', ')', 'command', '=', '[', '"kaggle"', ',', '"competitions"', ',', '"download"', ',', '"--file"', ',', 'fname', ',', '"--path"', ',', 'output_dir', ',', '"-c"', ',', 'self', '.', '_competition_name', ',', ']', '_run_kaggle_command', '(', 'command', ',', 'self', '.', '_competition_name', ')', 'return', 'os', '.', 'path', '.', 'join', '(', 'output_dir', ',', 'fname', ')'] | Downloads competition file to output_dir. | ['Downloads', 'competition', 'file', 'to', 'output_dir', '.'] | train | https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/download/kaggle.py#L118-L135 |
9,404 | pandas-dev/pandas | pandas/core/computation/eval.py | eval | def eval(expr, parser='pandas', engine=None, truediv=True,
local_dict=None, global_dict=None, resolvers=(), level=0,
target=None, inplace=False):
"""Evaluate a Python expression as a string using various backends.
The following arithmetic operations are supported: ``+``, ``-``, ``*``,
``/``, ``**``, ``%``, ``//`` (python engine only) along with the following
boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not).
Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`,
:keyword:`or`, and :keyword:`not` with the same semantics as the
corresponding bitwise operators. :class:`~pandas.Series` and
:class:`~pandas.DataFrame` objects are supported and behave as they would
with plain ol' Python evaluation.
Parameters
----------
expr : str or unicode
The expression to evaluate. This string cannot contain any Python
`statements
<https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__,
only Python `expressions
<https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__.
parser : string, default 'pandas', {'pandas', 'python'}
The parser to use to construct the syntax tree from the expression. The
default of ``'pandas'`` parses code slightly different than standard
Python. Alternatively, you can parse an expression using the
``'python'`` parser to retain strict Python semantics. See the
:ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
engine : string or None, default 'numexpr', {'python', 'numexpr'}
The engine used to evaluate the expression. Supported engines are
- None : tries to use ``numexpr``, falls back to ``python``
- ``'numexpr'``: This default engine evaluates pandas objects using
numexpr for large speed ups in complex expressions
with large frames.
- ``'python'``: Performs operations as if you had ``eval``'d in top
level python. This engine is generally not that useful.
More backends may be available in the future.
truediv : bool, optional
Whether to use true division, like in Python >= 3
local_dict : dict or None, optional
A dictionary of local variables, taken from locals() by default.
global_dict : dict or None, optional
A dictionary of global variables, taken from globals() by default.
resolvers : list of dict-like or None, optional
A list of objects implementing the ``__getitem__`` special method that
you can use to inject an additional collection of namespaces to use for
variable lookup. For example, this is used in the
:meth:`~DataFrame.query` method to inject the
``DataFrame.index`` and ``DataFrame.columns``
variables that refer to their respective :class:`~pandas.DataFrame`
instance attributes.
level : int, optional
The number of prior stack frames to traverse and add to the current
scope. Most users will **not** need to change this parameter.
target : object, optional, default None
This is the target object for assignment. It is used when there is
variable assignment in the expression. If so, then `target` must
support item assignment with string keys, and if a copy is being
returned, it must also support `.copy()`.
inplace : bool, default False
If `target` is provided, and the expression mutates `target`, whether
to modify `target` inplace. Otherwise, return a copy of `target` with
the mutation.
Returns
-------
ndarray, numeric scalar, DataFrame, Series
Raises
------
ValueError
There are many instances where such an error can be raised:
- `target=None`, but the expression is multiline.
- The expression is multiline, but not all them have item assignment.
An example of such an arrangement is this:
a = b + 1
a + 2
Here, there are expressions on different lines, making it multiline,
but the last line has no variable assigned to the output of `a + 2`.
- `inplace=True`, but the expression is missing item assignment.
- Item assignment is provided, but the `target` does not support
string item assignment.
- Item assignment is provided and `inplace=False`, but the `target`
does not support the `.copy()` method
See Also
--------
DataFrame.query
DataFrame.eval
Notes
-----
The ``dtype`` of any objects involved in an arithmetic ``%`` operation are
recursively cast to ``float64``.
See the :ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
"""
from pandas.core.computation.expr import Expr
inplace = validate_bool_kwarg(inplace, "inplace")
if isinstance(expr, str):
_check_expression(expr)
exprs = [e.strip() for e in expr.splitlines() if e.strip() != '']
else:
exprs = [expr]
multi_line = len(exprs) > 1
if multi_line and target is None:
raise ValueError("multi-line expressions are only valid in the "
"context of data, use DataFrame.eval")
ret = None
first_expr = True
target_modified = False
for expr in exprs:
expr = _convert_expression(expr)
engine = _check_engine(engine)
_check_parser(parser)
_check_resolvers(resolvers)
_check_for_locals(expr, level, parser)
# get our (possibly passed-in) scope
env = _ensure_scope(level + 1, global_dict=global_dict,
local_dict=local_dict, resolvers=resolvers,
target=target)
parsed_expr = Expr(expr, engine=engine, parser=parser, env=env,
truediv=truediv)
# construct the engine and evaluate the parsed expression
eng = _engines[engine]
eng_inst = eng(parsed_expr)
ret = eng_inst.evaluate()
if parsed_expr.assigner is None:
if multi_line:
raise ValueError("Multi-line expressions are only valid"
" if all expressions contain an assignment")
elif inplace:
raise ValueError("Cannot operate inplace "
"if there is no assignment")
# assign if needed
assigner = parsed_expr.assigner
if env.target is not None and assigner is not None:
target_modified = True
# if returning a copy, copy only on the first assignment
if not inplace and first_expr:
try:
target = env.target.copy()
except AttributeError:
raise ValueError("Cannot return a copy of the target")
else:
target = env.target
# TypeError is most commonly raised (e.g. int, list), but you
# get IndexError if you try to do this assignment on np.ndarray.
# we will ignore numpy warnings here; e.g. if trying
# to use a non-numeric indexer
try:
with warnings.catch_warnings(record=True):
# TODO: Filter the warnings we actually care about here.
target[assigner] = ret
except (TypeError, IndexError):
raise ValueError("Cannot assign expression output to target")
if not resolvers:
resolvers = ({assigner: ret},)
else:
# existing resolver needs updated to handle
# case of mutating existing column in copy
for resolver in resolvers:
if assigner in resolver:
resolver[assigner] = ret
break
else:
resolvers += ({assigner: ret},)
ret = None
first_expr = False
# We want to exclude `inplace=None` as being False.
if inplace is False:
return target if target_modified else ret | python | def eval(expr, parser='pandas', engine=None, truediv=True,
local_dict=None, global_dict=None, resolvers=(), level=0,
target=None, inplace=False):
"""Evaluate a Python expression as a string using various backends.
The following arithmetic operations are supported: ``+``, ``-``, ``*``,
``/``, ``**``, ``%``, ``//`` (python engine only) along with the following
boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not).
Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`,
:keyword:`or`, and :keyword:`not` with the same semantics as the
corresponding bitwise operators. :class:`~pandas.Series` and
:class:`~pandas.DataFrame` objects are supported and behave as they would
with plain ol' Python evaluation.
Parameters
----------
expr : str or unicode
The expression to evaluate. This string cannot contain any Python
`statements
<https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__,
only Python `expressions
<https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__.
parser : string, default 'pandas', {'pandas', 'python'}
The parser to use to construct the syntax tree from the expression. The
default of ``'pandas'`` parses code slightly different than standard
Python. Alternatively, you can parse an expression using the
``'python'`` parser to retain strict Python semantics. See the
:ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
engine : string or None, default 'numexpr', {'python', 'numexpr'}
The engine used to evaluate the expression. Supported engines are
- None : tries to use ``numexpr``, falls back to ``python``
- ``'numexpr'``: This default engine evaluates pandas objects using
numexpr for large speed ups in complex expressions
with large frames.
- ``'python'``: Performs operations as if you had ``eval``'d in top
level python. This engine is generally not that useful.
More backends may be available in the future.
truediv : bool, optional
Whether to use true division, like in Python >= 3
local_dict : dict or None, optional
A dictionary of local variables, taken from locals() by default.
global_dict : dict or None, optional
A dictionary of global variables, taken from globals() by default.
resolvers : list of dict-like or None, optional
A list of objects implementing the ``__getitem__`` special method that
you can use to inject an additional collection of namespaces to use for
variable lookup. For example, this is used in the
:meth:`~DataFrame.query` method to inject the
``DataFrame.index`` and ``DataFrame.columns``
variables that refer to their respective :class:`~pandas.DataFrame`
instance attributes.
level : int, optional
The number of prior stack frames to traverse and add to the current
scope. Most users will **not** need to change this parameter.
target : object, optional, default None
This is the target object for assignment. It is used when there is
variable assignment in the expression. If so, then `target` must
support item assignment with string keys, and if a copy is being
returned, it must also support `.copy()`.
inplace : bool, default False
If `target` is provided, and the expression mutates `target`, whether
to modify `target` inplace. Otherwise, return a copy of `target` with
the mutation.
Returns
-------
ndarray, numeric scalar, DataFrame, Series
Raises
------
ValueError
There are many instances where such an error can be raised:
- `target=None`, but the expression is multiline.
- The expression is multiline, but not all them have item assignment.
An example of such an arrangement is this:
a = b + 1
a + 2
Here, there are expressions on different lines, making it multiline,
but the last line has no variable assigned to the output of `a + 2`.
- `inplace=True`, but the expression is missing item assignment.
- Item assignment is provided, but the `target` does not support
string item assignment.
- Item assignment is provided and `inplace=False`, but the `target`
does not support the `.copy()` method
See Also
--------
DataFrame.query
DataFrame.eval
Notes
-----
The ``dtype`` of any objects involved in an arithmetic ``%`` operation are
recursively cast to ``float64``.
See the :ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
"""
from pandas.core.computation.expr import Expr
inplace = validate_bool_kwarg(inplace, "inplace")
if isinstance(expr, str):
_check_expression(expr)
exprs = [e.strip() for e in expr.splitlines() if e.strip() != '']
else:
exprs = [expr]
multi_line = len(exprs) > 1
if multi_line and target is None:
raise ValueError("multi-line expressions are only valid in the "
"context of data, use DataFrame.eval")
ret = None
first_expr = True
target_modified = False
for expr in exprs:
expr = _convert_expression(expr)
engine = _check_engine(engine)
_check_parser(parser)
_check_resolvers(resolvers)
_check_for_locals(expr, level, parser)
# get our (possibly passed-in) scope
env = _ensure_scope(level + 1, global_dict=global_dict,
local_dict=local_dict, resolvers=resolvers,
target=target)
parsed_expr = Expr(expr, engine=engine, parser=parser, env=env,
truediv=truediv)
# construct the engine and evaluate the parsed expression
eng = _engines[engine]
eng_inst = eng(parsed_expr)
ret = eng_inst.evaluate()
if parsed_expr.assigner is None:
if multi_line:
raise ValueError("Multi-line expressions are only valid"
" if all expressions contain an assignment")
elif inplace:
raise ValueError("Cannot operate inplace "
"if there is no assignment")
# assign if needed
assigner = parsed_expr.assigner
if env.target is not None and assigner is not None:
target_modified = True
# if returning a copy, copy only on the first assignment
if not inplace and first_expr:
try:
target = env.target.copy()
except AttributeError:
raise ValueError("Cannot return a copy of the target")
else:
target = env.target
# TypeError is most commonly raised (e.g. int, list), but you
# get IndexError if you try to do this assignment on np.ndarray.
# we will ignore numpy warnings here; e.g. if trying
# to use a non-numeric indexer
try:
with warnings.catch_warnings(record=True):
# TODO: Filter the warnings we actually care about here.
target[assigner] = ret
except (TypeError, IndexError):
raise ValueError("Cannot assign expression output to target")
if not resolvers:
resolvers = ({assigner: ret},)
else:
# existing resolver needs updated to handle
# case of mutating existing column in copy
for resolver in resolvers:
if assigner in resolver:
resolver[assigner] = ret
break
else:
resolvers += ({assigner: ret},)
ret = None
first_expr = False
# We want to exclude `inplace=None` as being False.
if inplace is False:
return target if target_modified else ret | ['def', 'eval', '(', 'expr', ',', 'parser', '=', "'pandas'", ',', 'engine', '=', 'None', ',', 'truediv', '=', 'True', ',', 'local_dict', '=', 'None', ',', 'global_dict', '=', 'None', ',', 'resolvers', '=', '(', ')', ',', 'level', '=', '0', ',', 'target', '=', 'None', ',', 'inplace', '=', 'False', ')', ':', 'from', 'pandas', '.', 'core', '.', 'computation', '.', 'expr', 'import', 'Expr', 'inplace', '=', 'validate_bool_kwarg', '(', 'inplace', ',', '"inplace"', ')', 'if', 'isinstance', '(', 'expr', ',', 'str', ')', ':', '_check_expression', '(', 'expr', ')', 'exprs', '=', '[', 'e', '.', 'strip', '(', ')', 'for', 'e', 'in', 'expr', '.', 'splitlines', '(', ')', 'if', 'e', '.', 'strip', '(', ')', '!=', "''", ']', 'else', ':', 'exprs', '=', '[', 'expr', ']', 'multi_line', '=', 'len', '(', 'exprs', ')', '>', '1', 'if', 'multi_line', 'and', 'target', 'is', 'None', ':', 'raise', 'ValueError', '(', '"multi-line expressions are only valid in the "', '"context of data, use DataFrame.eval"', ')', 'ret', '=', 'None', 'first_expr', '=', 'True', 'target_modified', '=', 'False', 'for', 'expr', 'in', 'exprs', ':', 'expr', '=', '_convert_expression', '(', 'expr', ')', 'engine', '=', '_check_engine', '(', 'engine', ')', '_check_parser', '(', 'parser', ')', '_check_resolvers', '(', 'resolvers', ')', '_check_for_locals', '(', 'expr', ',', 'level', ',', 'parser', ')', '# get our (possibly passed-in) scope', 'env', '=', '_ensure_scope', '(', 'level', '+', '1', ',', 'global_dict', '=', 'global_dict', ',', 'local_dict', '=', 'local_dict', ',', 'resolvers', '=', 'resolvers', ',', 'target', '=', 'target', ')', 'parsed_expr', '=', 'Expr', '(', 'expr', ',', 'engine', '=', 'engine', ',', 'parser', '=', 'parser', ',', 'env', '=', 'env', ',', 'truediv', '=', 'truediv', ')', '# construct the engine and evaluate the parsed expression', 'eng', '=', '_engines', '[', 'engine', ']', 'eng_inst', '=', 'eng', '(', 'parsed_expr', ')', 'ret', '=', 'eng_inst', '.', 'evaluate', '(', ')', 'if', 'parsed_expr', '.', 'assigner', 'is', 'None', ':', 'if', 'multi_line', ':', 'raise', 'ValueError', '(', '"Multi-line expressions are only valid"', '" if all expressions contain an assignment"', ')', 'elif', 'inplace', ':', 'raise', 'ValueError', '(', '"Cannot operate inplace "', '"if there is no assignment"', ')', '# assign if needed', 'assigner', '=', 'parsed_expr', '.', 'assigner', 'if', 'env', '.', 'target', 'is', 'not', 'None', 'and', 'assigner', 'is', 'not', 'None', ':', 'target_modified', '=', 'True', '# if returning a copy, copy only on the first assignment', 'if', 'not', 'inplace', 'and', 'first_expr', ':', 'try', ':', 'target', '=', 'env', '.', 'target', '.', 'copy', '(', ')', 'except', 'AttributeError', ':', 'raise', 'ValueError', '(', '"Cannot return a copy of the target"', ')', 'else', ':', 'target', '=', 'env', '.', 'target', '# TypeError is most commonly raised (e.g. int, list), but you', '# get IndexError if you try to do this assignment on np.ndarray.', '# we will ignore numpy warnings here; e.g. if trying', '# to use a non-numeric indexer', 'try', ':', 'with', 'warnings', '.', 'catch_warnings', '(', 'record', '=', 'True', ')', ':', '# TODO: Filter the warnings we actually care about here.', 'target', '[', 'assigner', ']', '=', 'ret', 'except', '(', 'TypeError', ',', 'IndexError', ')', ':', 'raise', 'ValueError', '(', '"Cannot assign expression output to target"', ')', 'if', 'not', 'resolvers', ':', 'resolvers', '=', '(', '{', 'assigner', ':', 'ret', '}', ',', ')', 'else', ':', '# existing resolver needs updated to handle', '# case of mutating existing column in copy', 'for', 'resolver', 'in', 'resolvers', ':', 'if', 'assigner', 'in', 'resolver', ':', 'resolver', '[', 'assigner', ']', '=', 'ret', 'break', 'else', ':', 'resolvers', '+=', '(', '{', 'assigner', ':', 'ret', '}', ',', ')', 'ret', '=', 'None', 'first_expr', '=', 'False', '# We want to exclude `inplace=None` as being False.', 'if', 'inplace', 'is', 'False', ':', 'return', 'target', 'if', 'target_modified', 'else', 'ret'] | Evaluate a Python expression as a string using various backends.
The following arithmetic operations are supported: ``+``, ``-``, ``*``,
``/``, ``**``, ``%``, ``//`` (python engine only) along with the following
boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not).
Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`,
:keyword:`or`, and :keyword:`not` with the same semantics as the
corresponding bitwise operators. :class:`~pandas.Series` and
:class:`~pandas.DataFrame` objects are supported and behave as they would
with plain ol' Python evaluation.
Parameters
----------
expr : str or unicode
The expression to evaluate. This string cannot contain any Python
`statements
<https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__,
only Python `expressions
<https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__.
parser : string, default 'pandas', {'pandas', 'python'}
The parser to use to construct the syntax tree from the expression. The
default of ``'pandas'`` parses code slightly different than standard
Python. Alternatively, you can parse an expression using the
``'python'`` parser to retain strict Python semantics. See the
:ref:`enhancing performance <enhancingperf.eval>` documentation for
more details.
engine : string or None, default 'numexpr', {'python', 'numexpr'}
The engine used to evaluate the expression. Supported engines are
- None : tries to use ``numexpr``, falls back to ``python``
- ``'numexpr'``: This default engine evaluates pandas objects using
numexpr for large speed ups in complex expressions
with large frames.
- ``'python'``: Performs operations as if you had ``eval``'d in top
level python. This engine is generally not that useful.
More backends may be available in the future.
truediv : bool, optional
Whether to use true division, like in Python >= 3
local_dict : dict or None, optional
A dictionary of local variables, taken from locals() by default.
global_dict : dict or None, optional
A dictionary of global variables, taken from globals() by default.
resolvers : list of dict-like or None, optional
A list of objects implementing the ``__getitem__`` special method that
you can use to inject an additional collection of namespaces to use for
variable lookup. For example, this is used in the
:meth:`~DataFrame.query` method to inject the
``DataFrame.index`` and ``DataFrame.columns``
variables that refer to their respective :class:`~pandas.DataFrame`
instance attributes.
level : int, optional
The number of prior stack frames to traverse and add to the current
scope. Most users will **not** need to change this parameter.
target : object, optional, default None
This is the target object for assignment. It is used when there is
variable assignment in the expression. If so, then `target` must
support item assignment with string keys, and if a copy is being
returned, it must also support `.copy()`.
inplace : bool, default False
If `target` is provided, and the expression mutates `target`, whether
to modify `target` inplace. Otherwise, return a copy of `target` with
the mutation.
Returns
-------
ndarray, numeric scalar, DataFrame, Series
Raises
------
ValueError
There are many instances where such an error can be raised:
- `target=None`, but the expression is multiline.
- The expression is multiline, but not all them have item assignment.
An example of such an arrangement is this:
a = b + 1
a + 2
Here, there are expressions on different lines, making it multiline,
but the last line has no variable assigned to the output of `a + 2`.
- `inplace=True`, but the expression is missing item assignment.
- Item assignment is provided, but the `target` does not support
string item assignment.
- Item assignment is provided and `inplace=False`, but the `target`
does not support the `.copy()` method
See Also
--------
DataFrame.query
DataFrame.eval
Notes
-----
The ``dtype`` of any objects involved in an arithmetic ``%`` operation are
recursively cast to ``float64``.
See the :ref:`enhancing performance <enhancingperf.eval>` documentation for
more details. | ['Evaluate', 'a', 'Python', 'expression', 'as', 'a', 'string', 'using', 'various', 'backends', '.'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/computation/eval.py#L155-L350 |
9,405 | tobami/littlechef | littlechef/runner.py | plugin | def plugin(name):
"""Executes the selected plugin
Plugins are expected to be found in the kitchen's 'plugins' directory
"""
env.host_string = lib.get_env_host_string()
plug = lib.import_plugin(name)
lib.print_header("Executing plugin '{0}' on "
"{1}".format(name, env.host_string))
node = lib.get_node(env.host_string)
if node == {'run_list': []}:
node['name'] = env.host_string
plug.execute(node)
print("Finished executing plugin") | python | def plugin(name):
"""Executes the selected plugin
Plugins are expected to be found in the kitchen's 'plugins' directory
"""
env.host_string = lib.get_env_host_string()
plug = lib.import_plugin(name)
lib.print_header("Executing plugin '{0}' on "
"{1}".format(name, env.host_string))
node = lib.get_node(env.host_string)
if node == {'run_list': []}:
node['name'] = env.host_string
plug.execute(node)
print("Finished executing plugin") | ['def', 'plugin', '(', 'name', ')', ':', 'env', '.', 'host_string', '=', 'lib', '.', 'get_env_host_string', '(', ')', 'plug', '=', 'lib', '.', 'import_plugin', '(', 'name', ')', 'lib', '.', 'print_header', '(', '"Executing plugin \'{0}\' on "', '"{1}"', '.', 'format', '(', 'name', ',', 'env', '.', 'host_string', ')', ')', 'node', '=', 'lib', '.', 'get_node', '(', 'env', '.', 'host_string', ')', 'if', 'node', '==', '{', "'run_list'", ':', '[', ']', '}', ':', 'node', '[', "'name'", ']', '=', 'env', '.', 'host_string', 'plug', '.', 'execute', '(', 'node', ')', 'print', '(', '"Finished executing plugin"', ')'] | Executes the selected plugin
Plugins are expected to be found in the kitchen's 'plugins' directory | ['Executes', 'the', 'selected', 'plugin', 'Plugins', 'are', 'expected', 'to', 'be', 'found', 'in', 'the', 'kitchen', 's', 'plugins', 'directory'] | train | https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/runner.py#L269-L282 |
9,406 | pavelsof/ipatok | ipatok/tokens.py | normalise | def normalise(string):
"""
Convert each character of the string to the normal form in which it was
defined in the IPA spec. This would be normal form D, except for the
voiceless palatar fricative (ç) which should be in normal form C.
Helper for tokenise_word(string, ..).
"""
string = unicodedata.normalize('NFD', string)
for char_c in ipa.get_precomposed_chars():
char_d = unicodedata.normalize('NFD', char_c)
if char_d in string:
string = string.replace(char_d, char_c)
return string | python | def normalise(string):
"""
Convert each character of the string to the normal form in which it was
defined in the IPA spec. This would be normal form D, except for the
voiceless palatar fricative (ç) which should be in normal form C.
Helper for tokenise_word(string, ..).
"""
string = unicodedata.normalize('NFD', string)
for char_c in ipa.get_precomposed_chars():
char_d = unicodedata.normalize('NFD', char_c)
if char_d in string:
string = string.replace(char_d, char_c)
return string | ['def', 'normalise', '(', 'string', ')', ':', 'string', '=', 'unicodedata', '.', 'normalize', '(', "'NFD'", ',', 'string', ')', 'for', 'char_c', 'in', 'ipa', '.', 'get_precomposed_chars', '(', ')', ':', 'char_d', '=', 'unicodedata', '.', 'normalize', '(', "'NFD'", ',', 'char_c', ')', 'if', 'char_d', 'in', 'string', ':', 'string', '=', 'string', '.', 'replace', '(', 'char_d', ',', 'char_c', ')', 'return', 'string'] | Convert each character of the string to the normal form in which it was
defined in the IPA spec. This would be normal form D, except for the
voiceless palatar fricative (ç) which should be in normal form C.
Helper for tokenise_word(string, ..). | ['Convert', 'each', 'character', 'of', 'the', 'string', 'to', 'the', 'normal', 'form', 'in', 'which', 'it', 'was', 'defined', 'in', 'the', 'IPA', 'spec', '.', 'This', 'would', 'be', 'normal', 'form', 'D', 'except', 'for', 'the', 'voiceless', 'palatar', 'fricative', '(', 'ç', ')', 'which', 'should', 'be', 'in', 'normal', 'form', 'C', '.'] | train | https://github.com/pavelsof/ipatok/blob/fde3c334b8573315fd1073f14341b71f50f7f006/ipatok/tokens.py#L7-L22 |
9,407 | Duke-GCB/DukeDSClient | ddsc/ddsclient.py | ShareCommand.run | def run(self, args):
"""
Gives user permission based on auth_role arg and sends email to that user.
:param args Namespace arguments parsed from the command line
"""
email = args.email # email of person to send email to
username = args.username # username of person to send email to, will be None if email is specified
force_send = args.resend # is this a resend so we should force sending
auth_role = args.auth_role # authorization role(project permissions) to give to the user
msg_file = args.msg_file # message file who's contents will be sent with the share
message = read_argument_file_contents(msg_file)
print("Sharing project.")
to_user = self.remote_store.lookup_or_register_user_by_email_or_username(email, username)
try:
project = self.fetch_project(args, must_exist=True, include_children=False)
dest_email = self.service.share(project, to_user, force_send, auth_role, message)
print("Share email message sent to " + dest_email)
except D4S2Error as ex:
if ex.warning:
print(ex.message)
else:
raise | python | def run(self, args):
"""
Gives user permission based on auth_role arg and sends email to that user.
:param args Namespace arguments parsed from the command line
"""
email = args.email # email of person to send email to
username = args.username # username of person to send email to, will be None if email is specified
force_send = args.resend # is this a resend so we should force sending
auth_role = args.auth_role # authorization role(project permissions) to give to the user
msg_file = args.msg_file # message file who's contents will be sent with the share
message = read_argument_file_contents(msg_file)
print("Sharing project.")
to_user = self.remote_store.lookup_or_register_user_by_email_or_username(email, username)
try:
project = self.fetch_project(args, must_exist=True, include_children=False)
dest_email = self.service.share(project, to_user, force_send, auth_role, message)
print("Share email message sent to " + dest_email)
except D4S2Error as ex:
if ex.warning:
print(ex.message)
else:
raise | ['def', 'run', '(', 'self', ',', 'args', ')', ':', 'email', '=', 'args', '.', 'email', '# email of person to send email to', 'username', '=', 'args', '.', 'username', '# username of person to send email to, will be None if email is specified', 'force_send', '=', 'args', '.', 'resend', '# is this a resend so we should force sending', 'auth_role', '=', 'args', '.', 'auth_role', '# authorization role(project permissions) to give to the user', 'msg_file', '=', 'args', '.', 'msg_file', "# message file who's contents will be sent with the share", 'message', '=', 'read_argument_file_contents', '(', 'msg_file', ')', 'print', '(', '"Sharing project."', ')', 'to_user', '=', 'self', '.', 'remote_store', '.', 'lookup_or_register_user_by_email_or_username', '(', 'email', ',', 'username', ')', 'try', ':', 'project', '=', 'self', '.', 'fetch_project', '(', 'args', ',', 'must_exist', '=', 'True', ',', 'include_children', '=', 'False', ')', 'dest_email', '=', 'self', '.', 'service', '.', 'share', '(', 'project', ',', 'to_user', ',', 'force_send', ',', 'auth_role', ',', 'message', ')', 'print', '(', '"Share email message sent to "', '+', 'dest_email', ')', 'except', 'D4S2Error', 'as', 'ex', ':', 'if', 'ex', '.', 'warning', ':', 'print', '(', 'ex', '.', 'message', ')', 'else', ':', 'raise'] | Gives user permission based on auth_role arg and sends email to that user.
:param args Namespace arguments parsed from the command line | ['Gives', 'user', 'permission', 'based', 'on', 'auth_role', 'arg', 'and', 'sends', 'email', 'to', 'that', 'user', '.', ':', 'param', 'args', 'Namespace', 'arguments', 'parsed', 'from', 'the', 'command', 'line'] | train | https://github.com/Duke-GCB/DukeDSClient/blob/117f68fb9bae82e4c81ea487ad5d61ac350f3726/ddsc/ddsclient.py#L267-L288 |
9,408 | SuryaSankar/flask-sqlalchemy-booster | flask_sqlalchemy_booster/model_booster/queryable_mixin.py | QueryableMixin.find_or_create | def find_or_create(cls, **kwargs):
"""Checks if an instance already exists by filtering with the
kwargs. If yes, returns that instance. If not, creates a new
instance with kwargs and returns it
Args:
**kwargs: The keyword arguments which are used for filtering
and initialization.
keys(list, optional): A special keyword argument.
If passed, only the set of keys mentioned here will be used
for filtering. Useful when we want to 'find' based on a subset
of the keys and create with all the keys
Examples:
>>> customer = Customer.find_or_create(
... name="vicky", email="[email protected]", country="India")
>>> customer.id
45
>>> customer1 = Customer.find_or_create(
... name="vicky", email="[email protected]", country="India")
>>> customer1==customer
True
>>> customer2 = Customer.find_or_create(
... name="vicky", email="[email protected]", country="Russia")
>>> customer2==customer
False
>>> customer3 = Customer.find_or_create(
... name="vicky", email="[email protected]", country="Russia",
... keys=['name', 'email'])
>>> customer3==customer
True
"""
keys = kwargs.pop('keys') if 'keys' in kwargs else []
return cls.first(**subdict(kwargs, keys)) or cls.create(**kwargs) | python | def find_or_create(cls, **kwargs):
"""Checks if an instance already exists by filtering with the
kwargs. If yes, returns that instance. If not, creates a new
instance with kwargs and returns it
Args:
**kwargs: The keyword arguments which are used for filtering
and initialization.
keys(list, optional): A special keyword argument.
If passed, only the set of keys mentioned here will be used
for filtering. Useful when we want to 'find' based on a subset
of the keys and create with all the keys
Examples:
>>> customer = Customer.find_or_create(
... name="vicky", email="[email protected]", country="India")
>>> customer.id
45
>>> customer1 = Customer.find_or_create(
... name="vicky", email="[email protected]", country="India")
>>> customer1==customer
True
>>> customer2 = Customer.find_or_create(
... name="vicky", email="[email protected]", country="Russia")
>>> customer2==customer
False
>>> customer3 = Customer.find_or_create(
... name="vicky", email="[email protected]", country="Russia",
... keys=['name', 'email'])
>>> customer3==customer
True
"""
keys = kwargs.pop('keys') if 'keys' in kwargs else []
return cls.first(**subdict(kwargs, keys)) or cls.create(**kwargs) | ['def', 'find_or_create', '(', 'cls', ',', '*', '*', 'kwargs', ')', ':', 'keys', '=', 'kwargs', '.', 'pop', '(', "'keys'", ')', 'if', "'keys'", 'in', 'kwargs', 'else', '[', ']', 'return', 'cls', '.', 'first', '(', '*', '*', 'subdict', '(', 'kwargs', ',', 'keys', ')', ')', 'or', 'cls', '.', 'create', '(', '*', '*', 'kwargs', ')'] | Checks if an instance already exists by filtering with the
kwargs. If yes, returns that instance. If not, creates a new
instance with kwargs and returns it
Args:
**kwargs: The keyword arguments which are used for filtering
and initialization.
keys(list, optional): A special keyword argument.
If passed, only the set of keys mentioned here will be used
for filtering. Useful when we want to 'find' based on a subset
of the keys and create with all the keys
Examples:
>>> customer = Customer.find_or_create(
... name="vicky", email="[email protected]", country="India")
>>> customer.id
45
>>> customer1 = Customer.find_or_create(
... name="vicky", email="[email protected]", country="India")
>>> customer1==customer
True
>>> customer2 = Customer.find_or_create(
... name="vicky", email="[email protected]", country="Russia")
>>> customer2==customer
False
>>> customer3 = Customer.find_or_create(
... name="vicky", email="[email protected]", country="Russia",
... keys=['name', 'email'])
>>> customer3==customer
True | ['Checks', 'if', 'an', 'instance', 'already', 'exists', 'by', 'filtering', 'with', 'the', 'kwargs', '.', 'If', 'yes', 'returns', 'that', 'instance', '.', 'If', 'not', 'creates', 'a', 'new', 'instance', 'with', 'kwargs', 'and', 'returns', 'it'] | train | https://github.com/SuryaSankar/flask-sqlalchemy-booster/blob/444048d167ab7718f758e943665ef32d101423a5/flask_sqlalchemy_booster/model_booster/queryable_mixin.py#L580-L615 |
9,409 | CI-WATER/gsshapy | gsshapy/lib/db_tools.py | init_sqlite_db | def init_sqlite_db(path, initTime=False):
"""
Initialize SQLite Database
Args:
path(str): Path to database (Ex. '/home/username/my_sqlite.db').
initTime(Optional[bool]): If True, it will print the amount of time to generate database.
Example::
from gsshapy.lib.db_tools import init_sqlite_db, create_session
sqlite_db_path = '/home/username/my_sqlite.db'
init_postgresql_db(path=sqlite_db_path)
sqlalchemy_url = init_sqlite_db(path=sqlite_db_path)
db_work_sessionmaker = get_sessionmaker(sqlalchemy_url)
db_work_session = db_work_sessionmaker()
##DO WORK
db_work_session.close()
"""
sqlite_base_url = 'sqlite:///'
sqlalchemy_url = sqlite_base_url + path
init_time = init_db(sqlalchemy_url)
if initTime:
print('TIME: {0} seconds'.format(init_time))
return sqlalchemy_url | python | def init_sqlite_db(path, initTime=False):
"""
Initialize SQLite Database
Args:
path(str): Path to database (Ex. '/home/username/my_sqlite.db').
initTime(Optional[bool]): If True, it will print the amount of time to generate database.
Example::
from gsshapy.lib.db_tools import init_sqlite_db, create_session
sqlite_db_path = '/home/username/my_sqlite.db'
init_postgresql_db(path=sqlite_db_path)
sqlalchemy_url = init_sqlite_db(path=sqlite_db_path)
db_work_sessionmaker = get_sessionmaker(sqlalchemy_url)
db_work_session = db_work_sessionmaker()
##DO WORK
db_work_session.close()
"""
sqlite_base_url = 'sqlite:///'
sqlalchemy_url = sqlite_base_url + path
init_time = init_db(sqlalchemy_url)
if initTime:
print('TIME: {0} seconds'.format(init_time))
return sqlalchemy_url | ['def', 'init_sqlite_db', '(', 'path', ',', 'initTime', '=', 'False', ')', ':', 'sqlite_base_url', '=', "'sqlite:///'", 'sqlalchemy_url', '=', 'sqlite_base_url', '+', 'path', 'init_time', '=', 'init_db', '(', 'sqlalchemy_url', ')', 'if', 'initTime', ':', 'print', '(', "'TIME: {0} seconds'", '.', 'format', '(', 'init_time', ')', ')', 'return', 'sqlalchemy_url'] | Initialize SQLite Database
Args:
path(str): Path to database (Ex. '/home/username/my_sqlite.db').
initTime(Optional[bool]): If True, it will print the amount of time to generate database.
Example::
from gsshapy.lib.db_tools import init_sqlite_db, create_session
sqlite_db_path = '/home/username/my_sqlite.db'
init_postgresql_db(path=sqlite_db_path)
sqlalchemy_url = init_sqlite_db(path=sqlite_db_path)
db_work_sessionmaker = get_sessionmaker(sqlalchemy_url)
db_work_session = db_work_sessionmaker()
##DO WORK
db_work_session.close() | ['Initialize', 'SQLite', 'Database', 'Args', ':', 'path', '(', 'str', ')', ':', 'Path', 'to', 'database', '(', 'Ex', '.', '/', 'home', '/', 'username', '/', 'my_sqlite', '.', 'db', ')', '.', 'initTime', '(', 'Optional', '[', 'bool', ']', ')', ':', 'If', 'True', 'it', 'will', 'print', 'the', 'amount', 'of', 'time', 'to', 'generate', 'database', '.'] | train | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/db_tools.py#L82-L117 |
9,410 | saltstack/salt | salt/modules/ps.py | _get_proc_status | def _get_proc_status(proc):
'''
Returns the status of a Process instance.
It's backward compatible with < 2.0 versions of psutil.
'''
try:
return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status)
except (psutil.NoSuchProcess, psutil.AccessDenied):
return None | python | def _get_proc_status(proc):
'''
Returns the status of a Process instance.
It's backward compatible with < 2.0 versions of psutil.
'''
try:
return salt.utils.data.decode(proc.status() if PSUTIL2 else proc.status)
except (psutil.NoSuchProcess, psutil.AccessDenied):
return None | ['def', '_get_proc_status', '(', 'proc', ')', ':', 'try', ':', 'return', 'salt', '.', 'utils', '.', 'data', '.', 'decode', '(', 'proc', '.', 'status', '(', ')', 'if', 'PSUTIL2', 'else', 'proc', '.', 'status', ')', 'except', '(', 'psutil', '.', 'NoSuchProcess', ',', 'psutil', '.', 'AccessDenied', ')', ':', 'return', 'None'] | Returns the status of a Process instance.
It's backward compatible with < 2.0 versions of psutil. | ['Returns', 'the', 'status', 'of', 'a', 'Process', 'instance', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L86-L95 |
9,411 | bwhite/hadoopy | hadoopy/thirdparty/pyinstaller/PyInstaller/utils/winmanifest.py | Manifest.todom | def todom(self):
""" Return the manifest as DOM tree """
doc = Document()
docE = doc.cE(self.manifestType)
if self.manifestType == "assemblyBinding":
cfg = doc.cE("configuration")
win = doc.cE("windows")
win.aChild(docE)
cfg.aChild(win)
doc.aChild(cfg)
else:
doc.aChild(docE)
if self.manifestType != "dependentAssembly":
docE.setA("xmlns", "urn:schemas-microsoft-com:asm.v1")
if self.manifestType != "assemblyBinding":
docE.setA("manifestVersion",
".".join([str(i) for i in self.manifestVersion]))
if self.noInheritable:
docE.aChild(doc.cE("noInheritable"))
if self.noInherit:
docE.aChild(doc.cE("noInherit"))
aId = doc.cE("assemblyIdentity")
if self.type:
aId.setAttribute("type", self.type)
if self.name:
aId.setAttribute("name", self.name)
if self.language:
aId.setAttribute("language", self.language)
if self.processorArchitecture:
aId.setAttribute("processorArchitecture",
self.processorArchitecture)
if self.version:
aId.setAttribute("version",
".".join([str(i) for i in self.version]))
if self.publicKeyToken:
aId.setAttribute("publicKeyToken", self.publicKeyToken)
if aId.hasAttributes():
docE.aChild(aId)
else:
aId.unlink()
if self.applyPublisherPolicy != None:
ppE = doc.cE("publisherPolicy")
if self.applyPublisherPolicy:
ppE.setA("apply", "yes")
else:
ppE.setA("apply", "no")
docE.aChild(ppE)
if self.description:
descE = doc.cE("description")
descE.aChild(doc.cT(self.description))
docE.aChild(descE)
if self.requestedExecutionLevel in ("asInvoker", "highestAvailable",
"requireAdministrator"):
tE = doc.cE("trustInfo")
tE.setA("xmlns", "urn:schemas-microsoft-com:asm.v3")
sE = doc.cE("security")
rpE = doc.cE("requestedPrivileges")
relE = doc.cE("requestedExecutionLevel")
relE.setA("level", self.requestedExecutionLevel)
if self.uiAccess:
relE.setA("uiAccess", "true")
else:
relE.setA("uiAccess", "false")
rpE.aChild(relE)
sE.aChild(rpE)
tE.aChild(sE)
docE.aChild(tE)
if self.dependentAssemblies:
for assembly in self.dependentAssemblies:
if self.manifestType != "assemblyBinding":
dE = doc.cE("dependency")
if assembly.optional:
dE.setAttribute("optional", "yes")
daE = doc.cE("dependentAssembly")
adom = assembly.todom()
for child in adom.documentElement.childNodes:
daE.aChild(child.cloneNode(False))
adom.unlink()
if self.manifestType != "assemblyBinding":
dE.aChild(daE)
docE.aChild(dE)
else:
docE.aChild(daE)
if self.bindingRedirects:
for bindingRedirect in self.bindingRedirects:
brE = doc.cE("bindingRedirect")
brE.setAttribute("oldVersion",
"-".join([".".join([str(i)
for i in
part])
for part in
bindingRedirect[0]]))
brE.setAttribute("newVersion",
".".join([str(i) for i in bindingRedirect[1]]))
docE.aChild(brE)
if self.files:
for file_ in self.files:
fE = doc.cE("file")
for attr in ("name", "hashalg", "hash"):
val = getattr(file_, attr)
if val:
fE.setA(attr, val)
docE.aChild(fE)
return doc | python | def todom(self):
""" Return the manifest as DOM tree """
doc = Document()
docE = doc.cE(self.manifestType)
if self.manifestType == "assemblyBinding":
cfg = doc.cE("configuration")
win = doc.cE("windows")
win.aChild(docE)
cfg.aChild(win)
doc.aChild(cfg)
else:
doc.aChild(docE)
if self.manifestType != "dependentAssembly":
docE.setA("xmlns", "urn:schemas-microsoft-com:asm.v1")
if self.manifestType != "assemblyBinding":
docE.setA("manifestVersion",
".".join([str(i) for i in self.manifestVersion]))
if self.noInheritable:
docE.aChild(doc.cE("noInheritable"))
if self.noInherit:
docE.aChild(doc.cE("noInherit"))
aId = doc.cE("assemblyIdentity")
if self.type:
aId.setAttribute("type", self.type)
if self.name:
aId.setAttribute("name", self.name)
if self.language:
aId.setAttribute("language", self.language)
if self.processorArchitecture:
aId.setAttribute("processorArchitecture",
self.processorArchitecture)
if self.version:
aId.setAttribute("version",
".".join([str(i) for i in self.version]))
if self.publicKeyToken:
aId.setAttribute("publicKeyToken", self.publicKeyToken)
if aId.hasAttributes():
docE.aChild(aId)
else:
aId.unlink()
if self.applyPublisherPolicy != None:
ppE = doc.cE("publisherPolicy")
if self.applyPublisherPolicy:
ppE.setA("apply", "yes")
else:
ppE.setA("apply", "no")
docE.aChild(ppE)
if self.description:
descE = doc.cE("description")
descE.aChild(doc.cT(self.description))
docE.aChild(descE)
if self.requestedExecutionLevel in ("asInvoker", "highestAvailable",
"requireAdministrator"):
tE = doc.cE("trustInfo")
tE.setA("xmlns", "urn:schemas-microsoft-com:asm.v3")
sE = doc.cE("security")
rpE = doc.cE("requestedPrivileges")
relE = doc.cE("requestedExecutionLevel")
relE.setA("level", self.requestedExecutionLevel)
if self.uiAccess:
relE.setA("uiAccess", "true")
else:
relE.setA("uiAccess", "false")
rpE.aChild(relE)
sE.aChild(rpE)
tE.aChild(sE)
docE.aChild(tE)
if self.dependentAssemblies:
for assembly in self.dependentAssemblies:
if self.manifestType != "assemblyBinding":
dE = doc.cE("dependency")
if assembly.optional:
dE.setAttribute("optional", "yes")
daE = doc.cE("dependentAssembly")
adom = assembly.todom()
for child in adom.documentElement.childNodes:
daE.aChild(child.cloneNode(False))
adom.unlink()
if self.manifestType != "assemblyBinding":
dE.aChild(daE)
docE.aChild(dE)
else:
docE.aChild(daE)
if self.bindingRedirects:
for bindingRedirect in self.bindingRedirects:
brE = doc.cE("bindingRedirect")
brE.setAttribute("oldVersion",
"-".join([".".join([str(i)
for i in
part])
for part in
bindingRedirect[0]]))
brE.setAttribute("newVersion",
".".join([str(i) for i in bindingRedirect[1]]))
docE.aChild(brE)
if self.files:
for file_ in self.files:
fE = doc.cE("file")
for attr in ("name", "hashalg", "hash"):
val = getattr(file_, attr)
if val:
fE.setA(attr, val)
docE.aChild(fE)
return doc | ['def', 'todom', '(', 'self', ')', ':', 'doc', '=', 'Document', '(', ')', 'docE', '=', 'doc', '.', 'cE', '(', 'self', '.', 'manifestType', ')', 'if', 'self', '.', 'manifestType', '==', '"assemblyBinding"', ':', 'cfg', '=', 'doc', '.', 'cE', '(', '"configuration"', ')', 'win', '=', 'doc', '.', 'cE', '(', '"windows"', ')', 'win', '.', 'aChild', '(', 'docE', ')', 'cfg', '.', 'aChild', '(', 'win', ')', 'doc', '.', 'aChild', '(', 'cfg', ')', 'else', ':', 'doc', '.', 'aChild', '(', 'docE', ')', 'if', 'self', '.', 'manifestType', '!=', '"dependentAssembly"', ':', 'docE', '.', 'setA', '(', '"xmlns"', ',', '"urn:schemas-microsoft-com:asm.v1"', ')', 'if', 'self', '.', 'manifestType', '!=', '"assemblyBinding"', ':', 'docE', '.', 'setA', '(', '"manifestVersion"', ',', '"."', '.', 'join', '(', '[', 'str', '(', 'i', ')', 'for', 'i', 'in', 'self', '.', 'manifestVersion', ']', ')', ')', 'if', 'self', '.', 'noInheritable', ':', 'docE', '.', 'aChild', '(', 'doc', '.', 'cE', '(', '"noInheritable"', ')', ')', 'if', 'self', '.', 'noInherit', ':', 'docE', '.', 'aChild', '(', 'doc', '.', 'cE', '(', '"noInherit"', ')', ')', 'aId', '=', 'doc', '.', 'cE', '(', '"assemblyIdentity"', ')', 'if', 'self', '.', 'type', ':', 'aId', '.', 'setAttribute', '(', '"type"', ',', 'self', '.', 'type', ')', 'if', 'self', '.', 'name', ':', 'aId', '.', 'setAttribute', '(', '"name"', ',', 'self', '.', 'name', ')', 'if', 'self', '.', 'language', ':', 'aId', '.', 'setAttribute', '(', '"language"', ',', 'self', '.', 'language', ')', 'if', 'self', '.', 'processorArchitecture', ':', 'aId', '.', 'setAttribute', '(', '"processorArchitecture"', ',', 'self', '.', 'processorArchitecture', ')', 'if', 'self', '.', 'version', ':', 'aId', '.', 'setAttribute', '(', '"version"', ',', '"."', '.', 'join', '(', '[', 'str', '(', 'i', ')', 'for', 'i', 'in', 'self', '.', 'version', ']', ')', ')', 'if', 'self', '.', 'publicKeyToken', ':', 'aId', '.', 'setAttribute', '(', '"publicKeyToken"', ',', 'self', '.', 'publicKeyToken', ')', 'if', 'aId', '.', 'hasAttributes', '(', ')', ':', 'docE', '.', 'aChild', '(', 'aId', ')', 'else', ':', 'aId', '.', 'unlink', '(', ')', 'if', 'self', '.', 'applyPublisherPolicy', '!=', 'None', ':', 'ppE', '=', 'doc', '.', 'cE', '(', '"publisherPolicy"', ')', 'if', 'self', '.', 'applyPublisherPolicy', ':', 'ppE', '.', 'setA', '(', '"apply"', ',', '"yes"', ')', 'else', ':', 'ppE', '.', 'setA', '(', '"apply"', ',', '"no"', ')', 'docE', '.', 'aChild', '(', 'ppE', ')', 'if', 'self', '.', 'description', ':', 'descE', '=', 'doc', '.', 'cE', '(', '"description"', ')', 'descE', '.', 'aChild', '(', 'doc', '.', 'cT', '(', 'self', '.', 'description', ')', ')', 'docE', '.', 'aChild', '(', 'descE', ')', 'if', 'self', '.', 'requestedExecutionLevel', 'in', '(', '"asInvoker"', ',', '"highestAvailable"', ',', '"requireAdministrator"', ')', ':', 'tE', '=', 'doc', '.', 'cE', '(', '"trustInfo"', ')', 'tE', '.', 'setA', '(', '"xmlns"', ',', '"urn:schemas-microsoft-com:asm.v3"', ')', 'sE', '=', 'doc', '.', 'cE', '(', '"security"', ')', 'rpE', '=', 'doc', '.', 'cE', '(', '"requestedPrivileges"', ')', 'relE', '=', 'doc', '.', 'cE', '(', '"requestedExecutionLevel"', ')', 'relE', '.', 'setA', '(', '"level"', ',', 'self', '.', 'requestedExecutionLevel', ')', 'if', 'self', '.', 'uiAccess', ':', 'relE', '.', 'setA', '(', '"uiAccess"', ',', '"true"', ')', 'else', ':', 'relE', '.', 'setA', '(', '"uiAccess"', ',', '"false"', ')', 'rpE', '.', 'aChild', '(', 'relE', ')', 'sE', '.', 'aChild', '(', 'rpE', ')', 'tE', '.', 'aChild', '(', 'sE', ')', 'docE', '.', 'aChild', '(', 'tE', ')', 'if', 'self', '.', 'dependentAssemblies', ':', 'for', 'assembly', 'in', 'self', '.', 'dependentAssemblies', ':', 'if', 'self', '.', 'manifestType', '!=', '"assemblyBinding"', ':', 'dE', '=', 'doc', '.', 'cE', '(', '"dependency"', ')', 'if', 'assembly', '.', 'optional', ':', 'dE', '.', 'setAttribute', '(', '"optional"', ',', '"yes"', ')', 'daE', '=', 'doc', '.', 'cE', '(', '"dependentAssembly"', ')', 'adom', '=', 'assembly', '.', 'todom', '(', ')', 'for', 'child', 'in', 'adom', '.', 'documentElement', '.', 'childNodes', ':', 'daE', '.', 'aChild', '(', 'child', '.', 'cloneNode', '(', 'False', ')', ')', 'adom', '.', 'unlink', '(', ')', 'if', 'self', '.', 'manifestType', '!=', '"assemblyBinding"', ':', 'dE', '.', 'aChild', '(', 'daE', ')', 'docE', '.', 'aChild', '(', 'dE', ')', 'else', ':', 'docE', '.', 'aChild', '(', 'daE', ')', 'if', 'self', '.', 'bindingRedirects', ':', 'for', 'bindingRedirect', 'in', 'self', '.', 'bindingRedirects', ':', 'brE', '=', 'doc', '.', 'cE', '(', '"bindingRedirect"', ')', 'brE', '.', 'setAttribute', '(', '"oldVersion"', ',', '"-"', '.', 'join', '(', '[', '"."', '.', 'join', '(', '[', 'str', '(', 'i', ')', 'for', 'i', 'in', 'part', ']', ')', 'for', 'part', 'in', 'bindingRedirect', '[', '0', ']', ']', ')', ')', 'brE', '.', 'setAttribute', '(', '"newVersion"', ',', '"."', '.', 'join', '(', '[', 'str', '(', 'i', ')', 'for', 'i', 'in', 'bindingRedirect', '[', '1', ']', ']', ')', ')', 'docE', '.', 'aChild', '(', 'brE', ')', 'if', 'self', '.', 'files', ':', 'for', 'file_', 'in', 'self', '.', 'files', ':', 'fE', '=', 'doc', '.', 'cE', '(', '"file"', ')', 'for', 'attr', 'in', '(', '"name"', ',', '"hashalg"', ',', '"hash"', ')', ':', 'val', '=', 'getattr', '(', 'file_', ',', 'attr', ')', 'if', 'val', ':', 'fE', '.', 'setA', '(', 'attr', ',', 'val', ')', 'docE', '.', 'aChild', '(', 'fE', ')', 'return', 'doc'] | Return the manifest as DOM tree | ['Return', 'the', 'manifest', 'as', 'DOM', 'tree'] | train | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/utils/winmanifest.py#L748-L851 |
9,412 | RiotGames/cloud-inquisitor | backend/cloud_inquisitor/plugins/types/resources.py | BaseResource.get_owner_emails | def get_owner_emails(self, partial_owner_match=True):
"""Return a list of email addresses associated with the instance, based on tags
Returns:
List of email addresses if any, else None
"""
for tag in self.tags:
if tag.key.lower() == 'owner':
rgx = re.compile(RGX_EMAIL_VALIDATION_PATTERN, re.I)
if partial_owner_match:
match = rgx.findall(tag.value)
if match:
return [NotificationContact('email', email) for email in match]
else:
match = rgx.match(tag.value)
if match:
return [NotificationContact('email', email) for email in match.groups()]
return None | python | def get_owner_emails(self, partial_owner_match=True):
"""Return a list of email addresses associated with the instance, based on tags
Returns:
List of email addresses if any, else None
"""
for tag in self.tags:
if tag.key.lower() == 'owner':
rgx = re.compile(RGX_EMAIL_VALIDATION_PATTERN, re.I)
if partial_owner_match:
match = rgx.findall(tag.value)
if match:
return [NotificationContact('email', email) for email in match]
else:
match = rgx.match(tag.value)
if match:
return [NotificationContact('email', email) for email in match.groups()]
return None | ['def', 'get_owner_emails', '(', 'self', ',', 'partial_owner_match', '=', 'True', ')', ':', 'for', 'tag', 'in', 'self', '.', 'tags', ':', 'if', 'tag', '.', 'key', '.', 'lower', '(', ')', '==', "'owner'", ':', 'rgx', '=', 're', '.', 'compile', '(', 'RGX_EMAIL_VALIDATION_PATTERN', ',', 're', '.', 'I', ')', 'if', 'partial_owner_match', ':', 'match', '=', 'rgx', '.', 'findall', '(', 'tag', '.', 'value', ')', 'if', 'match', ':', 'return', '[', 'NotificationContact', '(', "'email'", ',', 'email', ')', 'for', 'email', 'in', 'match', ']', 'else', ':', 'match', '=', 'rgx', '.', 'match', '(', 'tag', '.', 'value', ')', 'if', 'match', ':', 'return', '[', 'NotificationContact', '(', "'email'", ',', 'email', ')', 'for', 'email', 'in', 'match', '.', 'groups', '(', ')', ']', 'return', 'None'] | Return a list of email addresses associated with the instance, based on tags
Returns:
List of email addresses if any, else None | ['Return', 'a', 'list', 'of', 'email', 'addresses', 'associated', 'with', 'the', 'instance', 'based', 'on', 'tags'] | train | https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/backend/cloud_inquisitor/plugins/types/resources.py#L291-L308 |
9,413 | fermiPy/fermipy | fermipy/jobs/job_archive.py | JobArchive.remove_jobs | def remove_jobs(self, mask):
"""Mark all jobs that match a mask as 'removed' """
jobnames = self.table[mask]['jobname']
jobkey = self.table[mask]['jobkey']
self.table[mask]['status'] = JobStatus.removed
for jobname, jobkey in zip(jobnames, jobkey):
fullkey = JobDetails.make_fullkey(jobname, jobkey)
self._cache.pop(fullkey).status = JobStatus.removed
self.write_table_file() | python | def remove_jobs(self, mask):
"""Mark all jobs that match a mask as 'removed' """
jobnames = self.table[mask]['jobname']
jobkey = self.table[mask]['jobkey']
self.table[mask]['status'] = JobStatus.removed
for jobname, jobkey in zip(jobnames, jobkey):
fullkey = JobDetails.make_fullkey(jobname, jobkey)
self._cache.pop(fullkey).status = JobStatus.removed
self.write_table_file() | ['def', 'remove_jobs', '(', 'self', ',', 'mask', ')', ':', 'jobnames', '=', 'self', '.', 'table', '[', 'mask', ']', '[', "'jobname'", ']', 'jobkey', '=', 'self', '.', 'table', '[', 'mask', ']', '[', "'jobkey'", ']', 'self', '.', 'table', '[', 'mask', ']', '[', "'status'", ']', '=', 'JobStatus', '.', 'removed', 'for', 'jobname', ',', 'jobkey', 'in', 'zip', '(', 'jobnames', ',', 'jobkey', ')', ':', 'fullkey', '=', 'JobDetails', '.', 'make_fullkey', '(', 'jobname', ',', 'jobkey', ')', 'self', '.', '_cache', '.', 'pop', '(', 'fullkey', ')', '.', 'status', '=', 'JobStatus', '.', 'removed', 'self', '.', 'write_table_file', '(', ')'] | Mark all jobs that match a mask as 'removed' | ['Mark', 'all', 'jobs', 'that', 'match', 'a', 'mask', 'as', 'removed'] | train | https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/job_archive.py#L607-L615 |
9,414 | deepmind/pysc2 | pysc2/lib/actions.py | select_army | def select_army(action, action_space, select_add):
"""Select the entire army."""
del action_space
action.action_ui.select_army.selection_add = select_add | python | def select_army(action, action_space, select_add):
"""Select the entire army."""
del action_space
action.action_ui.select_army.selection_add = select_add | ['def', 'select_army', '(', 'action', ',', 'action_space', ',', 'select_add', ')', ':', 'del', 'action_space', 'action', '.', 'action_ui', '.', 'select_army', '.', 'selection_add', '=', 'select_add'] | Select the entire army. | ['Select', 'the', 'entire', 'army', '.'] | train | https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/actions.py#L78-L81 |
9,415 | gem/oq-engine | openquake/baselib/node.py | pprint | def pprint(self, stream=None, indent=1, width=80, depth=None):
"""
Pretty print the underlying literal Python object
"""
pp.pprint(to_literal(self), stream, indent, width, depth) | python | def pprint(self, stream=None, indent=1, width=80, depth=None):
"""
Pretty print the underlying literal Python object
"""
pp.pprint(to_literal(self), stream, indent, width, depth) | ['def', 'pprint', '(', 'self', ',', 'stream', '=', 'None', ',', 'indent', '=', '1', ',', 'width', '=', '80', ',', 'depth', '=', 'None', ')', ':', 'pp', '.', 'pprint', '(', 'to_literal', '(', 'self', ')', ',', 'stream', ',', 'indent', ',', 'width', ',', 'depth', ')'] | Pretty print the underlying literal Python object | ['Pretty', 'print', 'the', 'underlying', 'literal', 'Python', 'object'] | train | https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/node.py#L585-L589 |
9,416 | fabioz/PyDev.Debugger | pydevd_attach_to_process/winappdbg/textio.py | Color.default | def default(cls):
"Make the current foreground color the default."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_GREY
wAttributes &= ~win32.FOREGROUND_INTENSITY
cls._set_text_attributes(wAttributes) | python | def default(cls):
"Make the current foreground color the default."
wAttributes = cls._get_text_attributes()
wAttributes &= ~win32.FOREGROUND_MASK
wAttributes |= win32.FOREGROUND_GREY
wAttributes &= ~win32.FOREGROUND_INTENSITY
cls._set_text_attributes(wAttributes) | ['def', 'default', '(', 'cls', ')', ':', 'wAttributes', '=', 'cls', '.', '_get_text_attributes', '(', ')', 'wAttributes', '&=', '~', 'win32', '.', 'FOREGROUND_MASK', 'wAttributes', '|=', 'win32', '.', 'FOREGROUND_GREY', 'wAttributes', '&=', '~', 'win32', '.', 'FOREGROUND_INTENSITY', 'cls', '.', '_set_text_attributes', '(', 'wAttributes', ')'] | Make the current foreground color the default. | ['Make', 'the', 'current', 'foreground', 'color', 'the', 'default', '.'] | train | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/textio.py#L920-L926 |
9,417 | jsfenfen/990-xml-reader | irs_reader/xmlrunner.py | XMLRunner.run_sked | def run_sked(self, object_id, sked, verbose=False):
"""
sked is the proper name of the schedule:
IRS990, IRS990EZ, IRS990PF, IRS990ScheduleA, etc.
"""
self.whole_filing_data = []
self.filing_keyerr_data = []
this_filing = Filing(object_id)
this_filing.process(verbose=verbose)
this_version = this_filing.get_version()
if this_version in ALLOWED_VERSIONSTRINGS or ( self.csv_format and this_version in CSV_ALLOWED_VERSIONSTRINGS ):
this_version = this_filing.get_version()
ein = this_filing.get_ein()
sked_dict = this_filing.get_schedule(sked)
self._run_schedule(sked, object_id, sked_dict, ein)
this_filing.set_result(self.whole_filing_data)
this_filing.set_keyerrors(self.filing_keyerr_data)
return this_filing
else:
print("Filing version %s isn't supported for this operation" % this_version )
return this_filing | python | def run_sked(self, object_id, sked, verbose=False):
"""
sked is the proper name of the schedule:
IRS990, IRS990EZ, IRS990PF, IRS990ScheduleA, etc.
"""
self.whole_filing_data = []
self.filing_keyerr_data = []
this_filing = Filing(object_id)
this_filing.process(verbose=verbose)
this_version = this_filing.get_version()
if this_version in ALLOWED_VERSIONSTRINGS or ( self.csv_format and this_version in CSV_ALLOWED_VERSIONSTRINGS ):
this_version = this_filing.get_version()
ein = this_filing.get_ein()
sked_dict = this_filing.get_schedule(sked)
self._run_schedule(sked, object_id, sked_dict, ein)
this_filing.set_result(self.whole_filing_data)
this_filing.set_keyerrors(self.filing_keyerr_data)
return this_filing
else:
print("Filing version %s isn't supported for this operation" % this_version )
return this_filing | ['def', 'run_sked', '(', 'self', ',', 'object_id', ',', 'sked', ',', 'verbose', '=', 'False', ')', ':', 'self', '.', 'whole_filing_data', '=', '[', ']', 'self', '.', 'filing_keyerr_data', '=', '[', ']', 'this_filing', '=', 'Filing', '(', 'object_id', ')', 'this_filing', '.', 'process', '(', 'verbose', '=', 'verbose', ')', 'this_version', '=', 'this_filing', '.', 'get_version', '(', ')', 'if', 'this_version', 'in', 'ALLOWED_VERSIONSTRINGS', 'or', '(', 'self', '.', 'csv_format', 'and', 'this_version', 'in', 'CSV_ALLOWED_VERSIONSTRINGS', ')', ':', 'this_version', '=', 'this_filing', '.', 'get_version', '(', ')', 'ein', '=', 'this_filing', '.', 'get_ein', '(', ')', 'sked_dict', '=', 'this_filing', '.', 'get_schedule', '(', 'sked', ')', 'self', '.', '_run_schedule', '(', 'sked', ',', 'object_id', ',', 'sked_dict', ',', 'ein', ')', 'this_filing', '.', 'set_result', '(', 'self', '.', 'whole_filing_data', ')', 'this_filing', '.', 'set_keyerrors', '(', 'self', '.', 'filing_keyerr_data', ')', 'return', 'this_filing', 'else', ':', 'print', '(', '"Filing version %s isn\'t supported for this operation"', '%', 'this_version', ')', 'return', 'this_filing'] | sked is the proper name of the schedule:
IRS990, IRS990EZ, IRS990PF, IRS990ScheduleA, etc. | ['sked', 'is', 'the', 'proper', 'name', 'of', 'the', 'schedule', ':', 'IRS990', 'IRS990EZ', 'IRS990PF', 'IRS990ScheduleA', 'etc', '.'] | train | https://github.com/jsfenfen/990-xml-reader/blob/00020529b789081329a31a2e30b5ee729ce7596a/irs_reader/xmlrunner.py#L161-L182 |
9,418 | YeoLab/anchor | anchor/infotheory.py | cross_phenotype_jsd | def cross_phenotype_jsd(data, groupby, bins, n_iter=100):
"""Jensen-Shannon divergence of features across phenotypes
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_features) Dataframe
groupby : mappable
A samples to phenotypes mapping
n_iter : int
Number of bootstrap resampling iterations to perform for the
within-group comparisons
n_bins : int
Number of bins to binify the singles data on
Returns
-------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
"""
grouped = data.groupby(groupby)
jsds = []
seen = set([])
for phenotype1, df1 in grouped:
for phenotype2, df2 in grouped:
pair = tuple(sorted([phenotype1, phenotype2]))
if pair in seen:
continue
seen.add(pair)
if phenotype1 == phenotype2:
seriess = []
bs = cross_validation.Bootstrap(df1.shape[0], n_iter=n_iter,
train_size=0.5)
for i, (ind1, ind2) in enumerate(bs):
df1_subset = df1.iloc[ind1, :]
df2_subset = df2.iloc[ind2, :]
seriess.append(
binify_and_jsd(df1_subset, df2_subset, None, bins))
series = pd.concat(seriess, axis=1, names=None).mean(axis=1)
series.name = pair
jsds.append(series)
else:
series = binify_and_jsd(df1, df2, pair, bins)
jsds.append(series)
return pd.concat(jsds, axis=1) | python | def cross_phenotype_jsd(data, groupby, bins, n_iter=100):
"""Jensen-Shannon divergence of features across phenotypes
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_features) Dataframe
groupby : mappable
A samples to phenotypes mapping
n_iter : int
Number of bootstrap resampling iterations to perform for the
within-group comparisons
n_bins : int
Number of bins to binify the singles data on
Returns
-------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes
"""
grouped = data.groupby(groupby)
jsds = []
seen = set([])
for phenotype1, df1 in grouped:
for phenotype2, df2 in grouped:
pair = tuple(sorted([phenotype1, phenotype2]))
if pair in seen:
continue
seen.add(pair)
if phenotype1 == phenotype2:
seriess = []
bs = cross_validation.Bootstrap(df1.shape[0], n_iter=n_iter,
train_size=0.5)
for i, (ind1, ind2) in enumerate(bs):
df1_subset = df1.iloc[ind1, :]
df2_subset = df2.iloc[ind2, :]
seriess.append(
binify_and_jsd(df1_subset, df2_subset, None, bins))
series = pd.concat(seriess, axis=1, names=None).mean(axis=1)
series.name = pair
jsds.append(series)
else:
series = binify_and_jsd(df1, df2, pair, bins)
jsds.append(series)
return pd.concat(jsds, axis=1) | ['def', 'cross_phenotype_jsd', '(', 'data', ',', 'groupby', ',', 'bins', ',', 'n_iter', '=', '100', ')', ':', 'grouped', '=', 'data', '.', 'groupby', '(', 'groupby', ')', 'jsds', '=', '[', ']', 'seen', '=', 'set', '(', '[', ']', ')', 'for', 'phenotype1', ',', 'df1', 'in', 'grouped', ':', 'for', 'phenotype2', ',', 'df2', 'in', 'grouped', ':', 'pair', '=', 'tuple', '(', 'sorted', '(', '[', 'phenotype1', ',', 'phenotype2', ']', ')', ')', 'if', 'pair', 'in', 'seen', ':', 'continue', 'seen', '.', 'add', '(', 'pair', ')', 'if', 'phenotype1', '==', 'phenotype2', ':', 'seriess', '=', '[', ']', 'bs', '=', 'cross_validation', '.', 'Bootstrap', '(', 'df1', '.', 'shape', '[', '0', ']', ',', 'n_iter', '=', 'n_iter', ',', 'train_size', '=', '0.5', ')', 'for', 'i', ',', '(', 'ind1', ',', 'ind2', ')', 'in', 'enumerate', '(', 'bs', ')', ':', 'df1_subset', '=', 'df1', '.', 'iloc', '[', 'ind1', ',', ':', ']', 'df2_subset', '=', 'df2', '.', 'iloc', '[', 'ind2', ',', ':', ']', 'seriess', '.', 'append', '(', 'binify_and_jsd', '(', 'df1_subset', ',', 'df2_subset', ',', 'None', ',', 'bins', ')', ')', 'series', '=', 'pd', '.', 'concat', '(', 'seriess', ',', 'axis', '=', '1', ',', 'names', '=', 'None', ')', '.', 'mean', '(', 'axis', '=', '1', ')', 'series', '.', 'name', '=', 'pair', 'jsds', '.', 'append', '(', 'series', ')', 'else', ':', 'series', '=', 'binify_and_jsd', '(', 'df1', ',', 'df2', ',', 'pair', ',', 'bins', ')', 'jsds', '.', 'append', '(', 'series', ')', 'return', 'pd', '.', 'concat', '(', 'jsds', ',', 'axis', '=', '1', ')'] | Jensen-Shannon divergence of features across phenotypes
Parameters
----------
data : pandas.DataFrame
A (n_samples, n_features) Dataframe
groupby : mappable
A samples to phenotypes mapping
n_iter : int
Number of bootstrap resampling iterations to perform for the
within-group comparisons
n_bins : int
Number of bins to binify the singles data on
Returns
-------
jsd_df : pandas.DataFrame
A (n_features, n_phenotypes^2) dataframe of the JSD between each
feature between and within phenotypes | ['Jensen', '-', 'Shannon', 'divergence', 'of', 'features', 'across', 'phenotypes'] | train | https://github.com/YeoLab/anchor/blob/1f9c9d6d30235b1e77b945e6ef01db5a0e55d53a/anchor/infotheory.py#L218-L266 |
9,419 | bsolomon1124/pyfinance | pyfinance/options.py | OpStrat.summary | def summary(self, St=None):
"""Tabular summary of strategy composition, broken out by option.
Returns
-------
pd.DataFrame
Columns: kind, position, strike, price, St, payoff, profit.
"""
St = self.St if St is None else St
if self.options:
payoffs = [op.payoff(St=St) for op in self.options]
profits = [op.profit(St=St) for op in self.options]
strikes = [op.K for op in self.options]
prices = [op.price for op in self.options]
exprs = [St] * len(self.options)
kinds = [op.kind for op in self.options]
poss = [op.pos for op in self.options]
res = OrderedDict(
[
("kind", kinds),
("position", poss),
("strike", strikes),
("price", prices),
("St", exprs),
("payoff", payoffs),
("profit", profits),
]
)
return DataFrame(res)
else:
return None | python | def summary(self, St=None):
"""Tabular summary of strategy composition, broken out by option.
Returns
-------
pd.DataFrame
Columns: kind, position, strike, price, St, payoff, profit.
"""
St = self.St if St is None else St
if self.options:
payoffs = [op.payoff(St=St) for op in self.options]
profits = [op.profit(St=St) for op in self.options]
strikes = [op.K for op in self.options]
prices = [op.price for op in self.options]
exprs = [St] * len(self.options)
kinds = [op.kind for op in self.options]
poss = [op.pos for op in self.options]
res = OrderedDict(
[
("kind", kinds),
("position", poss),
("strike", strikes),
("price", prices),
("St", exprs),
("payoff", payoffs),
("profit", profits),
]
)
return DataFrame(res)
else:
return None | ['def', 'summary', '(', 'self', ',', 'St', '=', 'None', ')', ':', 'St', '=', 'self', '.', 'St', 'if', 'St', 'is', 'None', 'else', 'St', 'if', 'self', '.', 'options', ':', 'payoffs', '=', '[', 'op', '.', 'payoff', '(', 'St', '=', 'St', ')', 'for', 'op', 'in', 'self', '.', 'options', ']', 'profits', '=', '[', 'op', '.', 'profit', '(', 'St', '=', 'St', ')', 'for', 'op', 'in', 'self', '.', 'options', ']', 'strikes', '=', '[', 'op', '.', 'K', 'for', 'op', 'in', 'self', '.', 'options', ']', 'prices', '=', '[', 'op', '.', 'price', 'for', 'op', 'in', 'self', '.', 'options', ']', 'exprs', '=', '[', 'St', ']', '*', 'len', '(', 'self', '.', 'options', ')', 'kinds', '=', '[', 'op', '.', 'kind', 'for', 'op', 'in', 'self', '.', 'options', ']', 'poss', '=', '[', 'op', '.', 'pos', 'for', 'op', 'in', 'self', '.', 'options', ']', 'res', '=', 'OrderedDict', '(', '[', '(', '"kind"', ',', 'kinds', ')', ',', '(', '"position"', ',', 'poss', ')', ',', '(', '"strike"', ',', 'strikes', ')', ',', '(', '"price"', ',', 'prices', ')', ',', '(', '"St"', ',', 'exprs', ')', ',', '(', '"payoff"', ',', 'payoffs', ')', ',', '(', '"profit"', ',', 'profits', ')', ',', ']', ')', 'return', 'DataFrame', '(', 'res', ')', 'else', ':', 'return', 'None'] | Tabular summary of strategy composition, broken out by option.
Returns
-------
pd.DataFrame
Columns: kind, position, strike, price, St, payoff, profit. | ['Tabular', 'summary', 'of', 'strategy', 'composition', 'broken', 'out', 'by', 'option', '.', 'Returns', '-------', 'pd', '.', 'DataFrame', 'Columns', ':', 'kind', 'position', 'strike', 'price', 'St', 'payoff', 'profit', '.'] | train | https://github.com/bsolomon1124/pyfinance/blob/c95925209a809b4e648e79cbeaf7711d8e5ff1a6/pyfinance/options.py#L351-L383 |
9,420 | kervi/kervi-core | kervi/values/value_list.py | ValueList.add | def add(self, value_id, name, value_class):
"""
Factory function that creates a value.
:param value_id: id of the value, used to reference the value within this list.BaseException
:param value_class: The class of the value that should be created with this function.
"""
item = value_class(
name,
value_id=self.controller.component_id + "." + value_id,
is_input=self.is_input,
index=self.count,
spine = self.controller.spine
)
#if self._inject and self.controller:
# setattr(self.controller, value_id, item)
#setattr(self, value_id, item)
self.count += 1
self._items[value_id] = item
if self.is_input and self.controller:
item.add_observer(self.controller)
return item | python | def add(self, value_id, name, value_class):
"""
Factory function that creates a value.
:param value_id: id of the value, used to reference the value within this list.BaseException
:param value_class: The class of the value that should be created with this function.
"""
item = value_class(
name,
value_id=self.controller.component_id + "." + value_id,
is_input=self.is_input,
index=self.count,
spine = self.controller.spine
)
#if self._inject and self.controller:
# setattr(self.controller, value_id, item)
#setattr(self, value_id, item)
self.count += 1
self._items[value_id] = item
if self.is_input and self.controller:
item.add_observer(self.controller)
return item | ['def', 'add', '(', 'self', ',', 'value_id', ',', 'name', ',', 'value_class', ')', ':', 'item', '=', 'value_class', '(', 'name', ',', 'value_id', '=', 'self', '.', 'controller', '.', 'component_id', '+', '"."', '+', 'value_id', ',', 'is_input', '=', 'self', '.', 'is_input', ',', 'index', '=', 'self', '.', 'count', ',', 'spine', '=', 'self', '.', 'controller', '.', 'spine', ')', '#if self._inject and self.controller:', '# setattr(self.controller, value_id, item)', '#setattr(self, value_id, item)', 'self', '.', 'count', '+=', '1', 'self', '.', '_items', '[', 'value_id', ']', '=', 'item', 'if', 'self', '.', 'is_input', 'and', 'self', '.', 'controller', ':', 'item', '.', 'add_observer', '(', 'self', '.', 'controller', ')', 'return', 'item'] | Factory function that creates a value.
:param value_id: id of the value, used to reference the value within this list.BaseException
:param value_class: The class of the value that should be created with this function. | ['Factory', 'function', 'that', 'creates', 'a', 'value', '.'] | train | https://github.com/kervi/kervi-core/blob/3c1e3c8a17a7b4d085d8a28b99180ff2a96b0e23/kervi/values/value_list.py#L20-L44 |
9,421 | skyfielders/python-skyfield | skyfield/earthlib.py | terra | def terra(latitude, longitude, elevation, gast):
"""Compute the position and velocity of a terrestrial observer.
`latitude` - Latitude in radians.
`longitude` - Longitude in radians.
`elevation` - Elevation above sea level in au.
`gast` - Hours of Greenwich Apparent Sidereal Time (can be an array).
The return value is a tuple of two 3-vectors `(pos, vel)` in the
dynamical reference system (the true equator and equinox of date)
whose components are measured in au with respect to the center of
the Earth.
"""
zero = zeros_like(gast)
sinphi = sin(latitude)
cosphi = cos(latitude)
c = 1.0 / sqrt(cosphi * cosphi +
sinphi * sinphi * one_minus_flattening_squared)
s = one_minus_flattening_squared * c
ach = earth_radius_au * c + elevation
ash = earth_radius_au * s + elevation
# Compute local sidereal time factors at the observer's longitude.
stlocl = 15.0 * DEG2RAD * gast + longitude
sinst = sin(stlocl)
cosst = cos(stlocl)
# Compute position vector components in kilometers.
ac = ach * cosphi
acsst = ac * sinst
accst = ac * cosst
pos = array((accst, acsst, zero + ash * sinphi))
# Compute velocity vector components in kilometers/sec.
vel = ANGVEL * DAY_S * array((-acsst, accst, zero))
return pos, vel | python | def terra(latitude, longitude, elevation, gast):
"""Compute the position and velocity of a terrestrial observer.
`latitude` - Latitude in radians.
`longitude` - Longitude in radians.
`elevation` - Elevation above sea level in au.
`gast` - Hours of Greenwich Apparent Sidereal Time (can be an array).
The return value is a tuple of two 3-vectors `(pos, vel)` in the
dynamical reference system (the true equator and equinox of date)
whose components are measured in au with respect to the center of
the Earth.
"""
zero = zeros_like(gast)
sinphi = sin(latitude)
cosphi = cos(latitude)
c = 1.0 / sqrt(cosphi * cosphi +
sinphi * sinphi * one_minus_flattening_squared)
s = one_minus_flattening_squared * c
ach = earth_radius_au * c + elevation
ash = earth_radius_au * s + elevation
# Compute local sidereal time factors at the observer's longitude.
stlocl = 15.0 * DEG2RAD * gast + longitude
sinst = sin(stlocl)
cosst = cos(stlocl)
# Compute position vector components in kilometers.
ac = ach * cosphi
acsst = ac * sinst
accst = ac * cosst
pos = array((accst, acsst, zero + ash * sinphi))
# Compute velocity vector components in kilometers/sec.
vel = ANGVEL * DAY_S * array((-acsst, accst, zero))
return pos, vel | ['def', 'terra', '(', 'latitude', ',', 'longitude', ',', 'elevation', ',', 'gast', ')', ':', 'zero', '=', 'zeros_like', '(', 'gast', ')', 'sinphi', '=', 'sin', '(', 'latitude', ')', 'cosphi', '=', 'cos', '(', 'latitude', ')', 'c', '=', '1.0', '/', 'sqrt', '(', 'cosphi', '*', 'cosphi', '+', 'sinphi', '*', 'sinphi', '*', 'one_minus_flattening_squared', ')', 's', '=', 'one_minus_flattening_squared', '*', 'c', 'ach', '=', 'earth_radius_au', '*', 'c', '+', 'elevation', 'ash', '=', 'earth_radius_au', '*', 's', '+', 'elevation', "# Compute local sidereal time factors at the observer's longitude.", 'stlocl', '=', '15.0', '*', 'DEG2RAD', '*', 'gast', '+', 'longitude', 'sinst', '=', 'sin', '(', 'stlocl', ')', 'cosst', '=', 'cos', '(', 'stlocl', ')', '# Compute position vector components in kilometers.', 'ac', '=', 'ach', '*', 'cosphi', 'acsst', '=', 'ac', '*', 'sinst', 'accst', '=', 'ac', '*', 'cosst', 'pos', '=', 'array', '(', '(', 'accst', ',', 'acsst', ',', 'zero', '+', 'ash', '*', 'sinphi', ')', ')', '# Compute velocity vector components in kilometers/sec.', 'vel', '=', 'ANGVEL', '*', 'DAY_S', '*', 'array', '(', '(', '-', 'acsst', ',', 'accst', ',', 'zero', ')', ')', 'return', 'pos', ',', 'vel'] | Compute the position and velocity of a terrestrial observer.
`latitude` - Latitude in radians.
`longitude` - Longitude in radians.
`elevation` - Elevation above sea level in au.
`gast` - Hours of Greenwich Apparent Sidereal Time (can be an array).
The return value is a tuple of two 3-vectors `(pos, vel)` in the
dynamical reference system (the true equator and equinox of date)
whose components are measured in au with respect to the center of
the Earth. | ['Compute', 'the', 'position', 'and', 'velocity', 'of', 'a', 'terrestrial', 'observer', '.'] | train | https://github.com/skyfielders/python-skyfield/blob/51d9e042e06457f6b1f2415296d50a38cb3a300f/skyfield/earthlib.py#L15-L55 |
9,422 | kapadia/usgs | usgs/payloads.py | download | def download(dataset, node, entityids, products, api_key=None):
"""
The use of this request will be to obtain valid data download URLs.
:param dataset:
:param entityIds:
list
:param products:
list
:param node:
:param api_key:
API key is required.
"""
payload = {
"datasetName": dataset,
"node": node,
"apiKey": api_key,
"entityIds": entityids,
"products": products
}
return json.dumps(payload) | python | def download(dataset, node, entityids, products, api_key=None):
"""
The use of this request will be to obtain valid data download URLs.
:param dataset:
:param entityIds:
list
:param products:
list
:param node:
:param api_key:
API key is required.
"""
payload = {
"datasetName": dataset,
"node": node,
"apiKey": api_key,
"entityIds": entityids,
"products": products
}
return json.dumps(payload) | ['def', 'download', '(', 'dataset', ',', 'node', ',', 'entityids', ',', 'products', ',', 'api_key', '=', 'None', ')', ':', 'payload', '=', '{', '"datasetName"', ':', 'dataset', ',', '"node"', ':', 'node', ',', '"apiKey"', ':', 'api_key', ',', '"entityIds"', ':', 'entityids', ',', '"products"', ':', 'products', '}', 'return', 'json', '.', 'dumps', '(', 'payload', ')'] | The use of this request will be to obtain valid data download URLs.
:param dataset:
:param entityIds:
list
:param products:
list
:param node:
:param api_key:
API key is required. | ['The', 'use', 'of', 'this', 'request', 'will', 'be', 'to', 'obtain', 'valid', 'data', 'download', 'URLs', '.', ':', 'param', 'dataset', ':', ':', 'param', 'entityIds', ':', 'list', ':', 'param', 'products', ':', 'list', ':', 'param', 'node', ':', ':', 'param', 'api_key', ':', 'API', 'key', 'is', 'required', '.'] | train | https://github.com/kapadia/usgs/blob/0608346f0bc3c34e20f4ecc77ad71d0b514db7ee/usgs/payloads.py#L128-L154 |
9,423 | google/prettytensor | prettytensor/tutorial/data_utils.py | maybe_download | def maybe_download(url, filename):
"""Download the data from Yann's website, unless it's already here."""
if not os.path.exists(WORK_DIRECTORY):
os.mkdir(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not os.path.exists(filepath):
filepath, _ = request.urlretrieve(url + filename, filepath)
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath | python | def maybe_download(url, filename):
"""Download the data from Yann's website, unless it's already here."""
if not os.path.exists(WORK_DIRECTORY):
os.mkdir(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not os.path.exists(filepath):
filepath, _ = request.urlretrieve(url + filename, filepath)
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath | ['def', 'maybe_download', '(', 'url', ',', 'filename', ')', ':', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'WORK_DIRECTORY', ')', ':', 'os', '.', 'mkdir', '(', 'WORK_DIRECTORY', ')', 'filepath', '=', 'os', '.', 'path', '.', 'join', '(', 'WORK_DIRECTORY', ',', 'filename', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'filepath', ')', ':', 'filepath', ',', '_', '=', 'request', '.', 'urlretrieve', '(', 'url', '+', 'filename', ',', 'filepath', ')', 'statinfo', '=', 'os', '.', 'stat', '(', 'filepath', ')', 'print', '(', "'Successfully downloaded'", ',', 'filename', ',', 'statinfo', '.', 'st_size', ',', "'bytes.'", ')', 'return', 'filepath'] | Download the data from Yann's website, unless it's already here. | ['Download', 'the', 'data', 'from', 'Yann', 's', 'website', 'unless', 'it', 's', 'already', 'here', '.'] | train | https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/tutorial/data_utils.py#L37-L46 |
9,424 | PMEAL/OpenPNM | openpnm/algorithms/FickianDiffusion.py | FickianDiffusion.calc_effective_diffusivity | def calc_effective_diffusivity(self, inlets=None, outlets=None,
domain_area=None, domain_length=None):
r"""
This calculates the effective diffusivity in this linear transport
algorithm.
Parameters
----------
inlets : array_like
The pores where the inlet composition boundary conditions were
applied. If not given an attempt is made to infer them from the
algorithm.
outlets : array_like
The pores where the outlet composition boundary conditions were
applied. If not given an attempt is made to infer them from the
algorithm.
domain_area : scalar, optional
The area of the inlet (and outlet) boundary faces. If not given
then an attempt is made to estimate it, but it is usually
underestimated.
domain_length : scalar, optional
The length of the domain between the inlet and outlet boundary
faces. If not given then an attempt is made to estimate it, but it
is usually underestimated.
Notes
-----
The area and length of the domain are found using the bounding box
around the inlet and outlet pores which do not necessarily lie on the
edge of the domain, resulting in underestimation of sizes.
"""
return self._calc_eff_prop(inlets=inlets, outlets=outlets,
domain_area=domain_area,
domain_length=domain_length) | python | def calc_effective_diffusivity(self, inlets=None, outlets=None,
domain_area=None, domain_length=None):
r"""
This calculates the effective diffusivity in this linear transport
algorithm.
Parameters
----------
inlets : array_like
The pores where the inlet composition boundary conditions were
applied. If not given an attempt is made to infer them from the
algorithm.
outlets : array_like
The pores where the outlet composition boundary conditions were
applied. If not given an attempt is made to infer them from the
algorithm.
domain_area : scalar, optional
The area of the inlet (and outlet) boundary faces. If not given
then an attempt is made to estimate it, but it is usually
underestimated.
domain_length : scalar, optional
The length of the domain between the inlet and outlet boundary
faces. If not given then an attempt is made to estimate it, but it
is usually underestimated.
Notes
-----
The area and length of the domain are found using the bounding box
around the inlet and outlet pores which do not necessarily lie on the
edge of the domain, resulting in underestimation of sizes.
"""
return self._calc_eff_prop(inlets=inlets, outlets=outlets,
domain_area=domain_area,
domain_length=domain_length) | ['def', 'calc_effective_diffusivity', '(', 'self', ',', 'inlets', '=', 'None', ',', 'outlets', '=', 'None', ',', 'domain_area', '=', 'None', ',', 'domain_length', '=', 'None', ')', ':', 'return', 'self', '.', '_calc_eff_prop', '(', 'inlets', '=', 'inlets', ',', 'outlets', '=', 'outlets', ',', 'domain_area', '=', 'domain_area', ',', 'domain_length', '=', 'domain_length', ')'] | r"""
This calculates the effective diffusivity in this linear transport
algorithm.
Parameters
----------
inlets : array_like
The pores where the inlet composition boundary conditions were
applied. If not given an attempt is made to infer them from the
algorithm.
outlets : array_like
The pores where the outlet composition boundary conditions were
applied. If not given an attempt is made to infer them from the
algorithm.
domain_area : scalar, optional
The area of the inlet (and outlet) boundary faces. If not given
then an attempt is made to estimate it, but it is usually
underestimated.
domain_length : scalar, optional
The length of the domain between the inlet and outlet boundary
faces. If not given then an attempt is made to estimate it, but it
is usually underestimated.
Notes
-----
The area and length of the domain are found using the bounding box
around the inlet and outlet pores which do not necessarily lie on the
edge of the domain, resulting in underestimation of sizes. | ['r', 'This', 'calculates', 'the', 'effective', 'diffusivity', 'in', 'this', 'linear', 'transport', 'algorithm', '.'] | train | https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/algorithms/FickianDiffusion.py#L99-L135 |
9,425 | deepmind/pysc2 | pysc2/lib/features.py | Features.available_actions | def available_actions(self, obs):
"""Return the list of available action ids."""
available_actions = set()
hide_specific_actions = self._agent_interface_format.hide_specific_actions
for i, func in six.iteritems(actions.FUNCTIONS_AVAILABLE):
if func.avail_fn(obs):
available_actions.add(i)
for a in obs.abilities:
if a.ability_id not in actions.ABILITY_IDS:
logging.warning("Unknown ability %s seen as available.", a.ability_id)
continue
for func in actions.ABILITY_IDS[a.ability_id]:
if func.function_type in actions.POINT_REQUIRED_FUNCS[a.requires_point]:
if func.general_id == 0 or not hide_specific_actions:
available_actions.add(func.id)
if func.general_id != 0: # Always offer generic actions.
for general_func in actions.ABILITY_IDS[func.general_id]:
if general_func.function_type is func.function_type:
# Only the right type. Don't want to expose the general action
# to minimap if only the screen version is available.
available_actions.add(general_func.id)
break
return list(available_actions) | python | def available_actions(self, obs):
"""Return the list of available action ids."""
available_actions = set()
hide_specific_actions = self._agent_interface_format.hide_specific_actions
for i, func in six.iteritems(actions.FUNCTIONS_AVAILABLE):
if func.avail_fn(obs):
available_actions.add(i)
for a in obs.abilities:
if a.ability_id not in actions.ABILITY_IDS:
logging.warning("Unknown ability %s seen as available.", a.ability_id)
continue
for func in actions.ABILITY_IDS[a.ability_id]:
if func.function_type in actions.POINT_REQUIRED_FUNCS[a.requires_point]:
if func.general_id == 0 or not hide_specific_actions:
available_actions.add(func.id)
if func.general_id != 0: # Always offer generic actions.
for general_func in actions.ABILITY_IDS[func.general_id]:
if general_func.function_type is func.function_type:
# Only the right type. Don't want to expose the general action
# to minimap if only the screen version is available.
available_actions.add(general_func.id)
break
return list(available_actions) | ['def', 'available_actions', '(', 'self', ',', 'obs', ')', ':', 'available_actions', '=', 'set', '(', ')', 'hide_specific_actions', '=', 'self', '.', '_agent_interface_format', '.', 'hide_specific_actions', 'for', 'i', ',', 'func', 'in', 'six', '.', 'iteritems', '(', 'actions', '.', 'FUNCTIONS_AVAILABLE', ')', ':', 'if', 'func', '.', 'avail_fn', '(', 'obs', ')', ':', 'available_actions', '.', 'add', '(', 'i', ')', 'for', 'a', 'in', 'obs', '.', 'abilities', ':', 'if', 'a', '.', 'ability_id', 'not', 'in', 'actions', '.', 'ABILITY_IDS', ':', 'logging', '.', 'warning', '(', '"Unknown ability %s seen as available."', ',', 'a', '.', 'ability_id', ')', 'continue', 'for', 'func', 'in', 'actions', '.', 'ABILITY_IDS', '[', 'a', '.', 'ability_id', ']', ':', 'if', 'func', '.', 'function_type', 'in', 'actions', '.', 'POINT_REQUIRED_FUNCS', '[', 'a', '.', 'requires_point', ']', ':', 'if', 'func', '.', 'general_id', '==', '0', 'or', 'not', 'hide_specific_actions', ':', 'available_actions', '.', 'add', '(', 'func', '.', 'id', ')', 'if', 'func', '.', 'general_id', '!=', '0', ':', '# Always offer generic actions.', 'for', 'general_func', 'in', 'actions', '.', 'ABILITY_IDS', '[', 'func', '.', 'general_id', ']', ':', 'if', 'general_func', '.', 'function_type', 'is', 'func', '.', 'function_type', ':', "# Only the right type. Don't want to expose the general action", '# to minimap if only the screen version is available.', 'available_actions', '.', 'add', '(', 'general_func', '.', 'id', ')', 'break', 'return', 'list', '(', 'available_actions', ')'] | Return the list of available action ids. | ['Return', 'the', 'list', 'of', 'available', 'action', 'ids', '.'] | train | https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/features.py#L1057-L1079 |
9,426 | ionelmc/python-cogen | cogen/core/pubsub.py | PublishSubscribeQueue.compact | def compact(self):
"""Compacts the queue: removes all the messages from the queue that
have been fetched by all the subscribed coroutines.
Returns the number of messages that have been removed."""
if self.subscribers:
level = min(self.subscribers.itervalues())
if level:
del self.messages[:level]
return level
else:
level = len(self.messages)
del self.messages[:]
return level | python | def compact(self):
"""Compacts the queue: removes all the messages from the queue that
have been fetched by all the subscribed coroutines.
Returns the number of messages that have been removed."""
if self.subscribers:
level = min(self.subscribers.itervalues())
if level:
del self.messages[:level]
return level
else:
level = len(self.messages)
del self.messages[:]
return level | ['def', 'compact', '(', 'self', ')', ':', 'if', 'self', '.', 'subscribers', ':', 'level', '=', 'min', '(', 'self', '.', 'subscribers', '.', 'itervalues', '(', ')', ')', 'if', 'level', ':', 'del', 'self', '.', 'messages', '[', ':', 'level', ']', 'return', 'level', 'else', ':', 'level', '=', 'len', '(', 'self', '.', 'messages', ')', 'del', 'self', '.', 'messages', '[', ':', ']', 'return', 'level'] | Compacts the queue: removes all the messages from the queue that
have been fetched by all the subscribed coroutines.
Returns the number of messages that have been removed. | ['Compacts', 'the', 'queue', ':', 'removes', 'all', 'the', 'messages', 'from', 'the', 'queue', 'that', 'have', 'been', 'fetched', 'by', 'all', 'the', 'subscribed', 'coroutines', '.', 'Returns', 'the', 'number', 'of', 'messages', 'that', 'have', 'been', 'removed', '.'] | train | https://github.com/ionelmc/python-cogen/blob/83b0edb88425eba6e5bfda9f1dcd34642517e2a8/cogen/core/pubsub.py#L129-L141 |
9,427 | watchforstock/evohome-client | evohomeclient2/__init__.py | EvohomeClient._headers | def _headers(self):
"""Ensure the Authorization Header has a valid Access Token."""
if not self.access_token or not self.access_token_expires:
self._basic_login()
elif datetime.now() > self.access_token_expires - timedelta(seconds=30):
self._basic_login()
return {'Accept': HEADER_ACCEPT,
'Authorization': 'bearer ' + self.access_token} | python | def _headers(self):
"""Ensure the Authorization Header has a valid Access Token."""
if not self.access_token or not self.access_token_expires:
self._basic_login()
elif datetime.now() > self.access_token_expires - timedelta(seconds=30):
self._basic_login()
return {'Accept': HEADER_ACCEPT,
'Authorization': 'bearer ' + self.access_token} | ['def', '_headers', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'access_token', 'or', 'not', 'self', '.', 'access_token_expires', ':', 'self', '.', '_basic_login', '(', ')', 'elif', 'datetime', '.', 'now', '(', ')', '>', 'self', '.', 'access_token_expires', '-', 'timedelta', '(', 'seconds', '=', '30', ')', ':', 'self', '.', '_basic_login', '(', ')', 'return', '{', "'Accept'", ':', 'HEADER_ACCEPT', ',', "'Authorization'", ':', "'bearer '", '+', 'self', '.', 'access_token', '}'] | Ensure the Authorization Header has a valid Access Token. | ['Ensure', 'the', 'Authorization', 'Header', 'has', 'a', 'valid', 'Access', 'Token', '.'] | train | https://github.com/watchforstock/evohome-client/blob/f1cb9273e97946d79c0651f00a218abbf7ada53a/evohomeclient2/__init__.py#L84-L93 |
9,428 | spotify/luigi | luigi/contrib/hadoop.py | JobTask.run_mapper | def run_mapper(self, stdin=sys.stdin, stdout=sys.stdout):
"""
Run the mapper on the hadoop node.
"""
self.init_hadoop()
self.init_mapper()
outputs = self._map_input((line[:-1] for line in stdin))
if self.reducer == NotImplemented:
self.writer(outputs, stdout)
else:
self.internal_writer(outputs, stdout) | python | def run_mapper(self, stdin=sys.stdin, stdout=sys.stdout):
"""
Run the mapper on the hadoop node.
"""
self.init_hadoop()
self.init_mapper()
outputs = self._map_input((line[:-1] for line in stdin))
if self.reducer == NotImplemented:
self.writer(outputs, stdout)
else:
self.internal_writer(outputs, stdout) | ['def', 'run_mapper', '(', 'self', ',', 'stdin', '=', 'sys', '.', 'stdin', ',', 'stdout', '=', 'sys', '.', 'stdout', ')', ':', 'self', '.', 'init_hadoop', '(', ')', 'self', '.', 'init_mapper', '(', ')', 'outputs', '=', 'self', '.', '_map_input', '(', '(', 'line', '[', ':', '-', '1', ']', 'for', 'line', 'in', 'stdin', ')', ')', 'if', 'self', '.', 'reducer', '==', 'NotImplemented', ':', 'self', '.', 'writer', '(', 'outputs', ',', 'stdout', ')', 'else', ':', 'self', '.', 'internal_writer', '(', 'outputs', ',', 'stdout', ')'] | Run the mapper on the hadoop node. | ['Run', 'the', 'mapper', 'on', 'the', 'hadoop', 'node', '.'] | train | https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/hadoop.py#L1018-L1028 |
9,429 | saltstack/salt | salt/utils/openstack/nova.py | SaltNova._get_version_from_url | def _get_version_from_url(self, url):
'''
Exctract API version from provided URL
'''
regex = re.compile(r"^https?:\/\/.*\/(v[0-9])(\.[0-9])?(\/)?$")
try:
ver = regex.match(url)
if ver.group(1):
retver = ver.group(1)
if ver.group(2):
retver = retver + ver.group(2)
return retver
except AttributeError:
return '' | python | def _get_version_from_url(self, url):
'''
Exctract API version from provided URL
'''
regex = re.compile(r"^https?:\/\/.*\/(v[0-9])(\.[0-9])?(\/)?$")
try:
ver = regex.match(url)
if ver.group(1):
retver = ver.group(1)
if ver.group(2):
retver = retver + ver.group(2)
return retver
except AttributeError:
return '' | ['def', '_get_version_from_url', '(', 'self', ',', 'url', ')', ':', 'regex', '=', 're', '.', 'compile', '(', 'r"^https?:\\/\\/.*\\/(v[0-9])(\\.[0-9])?(\\/)?$"', ')', 'try', ':', 'ver', '=', 'regex', '.', 'match', '(', 'url', ')', 'if', 'ver', '.', 'group', '(', '1', ')', ':', 'retver', '=', 'ver', '.', 'group', '(', '1', ')', 'if', 'ver', '.', 'group', '(', '2', ')', ':', 'retver', '=', 'retver', '+', 'ver', '.', 'group', '(', '2', ')', 'return', 'retver', 'except', 'AttributeError', ':', 'return', "''"] | Exctract API version from provided URL | ['Exctract', 'API', 'version', 'from', 'provided', 'URL'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/openstack/nova.py#L268-L281 |
9,430 | lra/mackup | mackup/config.py | Config._parse_apps_to_ignore | def _parse_apps_to_ignore(self):
"""
Parse the applications to ignore in the config.
Returns:
set
"""
# We ignore nothing by default
apps_to_ignore = set()
# Is the "[applications_to_ignore]" in the cfg file ?
section_title = 'applications_to_ignore'
if self._parser.has_section(section_title):
apps_to_ignore = set(self._parser.options(section_title))
return apps_to_ignore | python | def _parse_apps_to_ignore(self):
"""
Parse the applications to ignore in the config.
Returns:
set
"""
# We ignore nothing by default
apps_to_ignore = set()
# Is the "[applications_to_ignore]" in the cfg file ?
section_title = 'applications_to_ignore'
if self._parser.has_section(section_title):
apps_to_ignore = set(self._parser.options(section_title))
return apps_to_ignore | ['def', '_parse_apps_to_ignore', '(', 'self', ')', ':', '# We ignore nothing by default', 'apps_to_ignore', '=', 'set', '(', ')', '# Is the "[applications_to_ignore]" in the cfg file ?', 'section_title', '=', "'applications_to_ignore'", 'if', 'self', '.', '_parser', '.', 'has_section', '(', 'section_title', ')', ':', 'apps_to_ignore', '=', 'set', '(', 'self', '.', '_parser', '.', 'options', '(', 'section_title', ')', ')', 'return', 'apps_to_ignore'] | Parse the applications to ignore in the config.
Returns:
set | ['Parse', 'the', 'applications', 'to', 'ignore', 'in', 'the', 'config', '.'] | train | https://github.com/lra/mackup/blob/ed0b5626b033f232868900bfd5108df448873725/mackup/config.py#L241-L256 |
9,431 | pdkit/pdkit | pdkit/gait_processor.py | GaitProcessor.walk_direction_preheel | def walk_direction_preheel(self, data_frame):
"""
Estimate local walk (not cardinal) direction with pre-heel strike phase.
Inspired by Nirupam Roy's B.E. thesis: "WalkCompass: Finding Walking Direction Leveraging Smartphone's Inertial Sensors"
:param data_frame: The data frame. It should have x, y, and z columns.
:type data_frame: pandas.DataFrame
:return: Unit vector of local walk (not cardinal) direction.
:rtype: numpy.ndarray
"""
# Sum of absolute values across accelerometer axes:
data = data_frame.x.abs() + data_frame.y.abs() + data_frame.z.abs()
# Find maximum peaks of smoothed data:
dummy, ipeaks_smooth = self.heel_strikes(data)
data = data.values
# Compute number of samples between peaks using the real part of the FFT:
interpeak = compute_interpeak(data, self.sampling_frequency)
decel = np.int(np.round(self.stride_fraction * interpeak))
# Find maximum peaks close to maximum peaks of smoothed data:
ipeaks = []
for ipeak_smooth in ipeaks_smooth:
ipeak = np.argmax(data[ipeak_smooth - decel:ipeak_smooth + decel])
ipeak += ipeak_smooth - decel
ipeaks.append(ipeak)
# Compute the average vector for each deceleration phase:
vectors = []
for ipeak in ipeaks:
decel_vectors = np.asarray([[data_frame.x[i], data_frame.y[i], data_frame.z[i]]
for i in range(ipeak - decel, ipeak)])
vectors.append(np.mean(decel_vectors, axis=0))
# Compute the average deceleration vector and take the opposite direction:
direction = -1 * np.mean(vectors, axis=0)
# Return the unit vector in this direction:
direction /= np.sqrt(direction.dot(direction))
return direction | python | def walk_direction_preheel(self, data_frame):
"""
Estimate local walk (not cardinal) direction with pre-heel strike phase.
Inspired by Nirupam Roy's B.E. thesis: "WalkCompass: Finding Walking Direction Leveraging Smartphone's Inertial Sensors"
:param data_frame: The data frame. It should have x, y, and z columns.
:type data_frame: pandas.DataFrame
:return: Unit vector of local walk (not cardinal) direction.
:rtype: numpy.ndarray
"""
# Sum of absolute values across accelerometer axes:
data = data_frame.x.abs() + data_frame.y.abs() + data_frame.z.abs()
# Find maximum peaks of smoothed data:
dummy, ipeaks_smooth = self.heel_strikes(data)
data = data.values
# Compute number of samples between peaks using the real part of the FFT:
interpeak = compute_interpeak(data, self.sampling_frequency)
decel = np.int(np.round(self.stride_fraction * interpeak))
# Find maximum peaks close to maximum peaks of smoothed data:
ipeaks = []
for ipeak_smooth in ipeaks_smooth:
ipeak = np.argmax(data[ipeak_smooth - decel:ipeak_smooth + decel])
ipeak += ipeak_smooth - decel
ipeaks.append(ipeak)
# Compute the average vector for each deceleration phase:
vectors = []
for ipeak in ipeaks:
decel_vectors = np.asarray([[data_frame.x[i], data_frame.y[i], data_frame.z[i]]
for i in range(ipeak - decel, ipeak)])
vectors.append(np.mean(decel_vectors, axis=0))
# Compute the average deceleration vector and take the opposite direction:
direction = -1 * np.mean(vectors, axis=0)
# Return the unit vector in this direction:
direction /= np.sqrt(direction.dot(direction))
return direction | ['def', 'walk_direction_preheel', '(', 'self', ',', 'data_frame', ')', ':', '# Sum of absolute values across accelerometer axes:', 'data', '=', 'data_frame', '.', 'x', '.', 'abs', '(', ')', '+', 'data_frame', '.', 'y', '.', 'abs', '(', ')', '+', 'data_frame', '.', 'z', '.', 'abs', '(', ')', '# Find maximum peaks of smoothed data:', 'dummy', ',', 'ipeaks_smooth', '=', 'self', '.', 'heel_strikes', '(', 'data', ')', 'data', '=', 'data', '.', 'values', '# Compute number of samples between peaks using the real part of the FFT:', 'interpeak', '=', 'compute_interpeak', '(', 'data', ',', 'self', '.', 'sampling_frequency', ')', 'decel', '=', 'np', '.', 'int', '(', 'np', '.', 'round', '(', 'self', '.', 'stride_fraction', '*', 'interpeak', ')', ')', '# Find maximum peaks close to maximum peaks of smoothed data:', 'ipeaks', '=', '[', ']', 'for', 'ipeak_smooth', 'in', 'ipeaks_smooth', ':', 'ipeak', '=', 'np', '.', 'argmax', '(', 'data', '[', 'ipeak_smooth', '-', 'decel', ':', 'ipeak_smooth', '+', 'decel', ']', ')', 'ipeak', '+=', 'ipeak_smooth', '-', 'decel', 'ipeaks', '.', 'append', '(', 'ipeak', ')', '# Compute the average vector for each deceleration phase:', 'vectors', '=', '[', ']', 'for', 'ipeak', 'in', 'ipeaks', ':', 'decel_vectors', '=', 'np', '.', 'asarray', '(', '[', '[', 'data_frame', '.', 'x', '[', 'i', ']', ',', 'data_frame', '.', 'y', '[', 'i', ']', ',', 'data_frame', '.', 'z', '[', 'i', ']', ']', 'for', 'i', 'in', 'range', '(', 'ipeak', '-', 'decel', ',', 'ipeak', ')', ']', ')', 'vectors', '.', 'append', '(', 'np', '.', 'mean', '(', 'decel_vectors', ',', 'axis', '=', '0', ')', ')', '# Compute the average deceleration vector and take the opposite direction:', 'direction', '=', '-', '1', '*', 'np', '.', 'mean', '(', 'vectors', ',', 'axis', '=', '0', ')', '# Return the unit vector in this direction:', 'direction', '/=', 'np', '.', 'sqrt', '(', 'direction', '.', 'dot', '(', 'direction', ')', ')', 'return', 'direction'] | Estimate local walk (not cardinal) direction with pre-heel strike phase.
Inspired by Nirupam Roy's B.E. thesis: "WalkCompass: Finding Walking Direction Leveraging Smartphone's Inertial Sensors"
:param data_frame: The data frame. It should have x, y, and z columns.
:type data_frame: pandas.DataFrame
:return: Unit vector of local walk (not cardinal) direction.
:rtype: numpy.ndarray | ['Estimate', 'local', 'walk', '(', 'not', 'cardinal', ')', 'direction', 'with', 'pre', '-', 'heel', 'strike', 'phase', '.'] | train | https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/gait_processor.py#L268-L311 |
9,432 | tensorflow/tensor2tensor | tensor2tensor/data_generators/imagenet.py | _normalize | def _normalize(image):
"""Normalize the image to zero mean and unit variance."""
offset = tf.constant(MEAN_RGB, shape=[1, 1, 3])
image -= offset
scale = tf.constant(STDDEV_RGB, shape=[1, 1, 3])
image /= scale
return image | python | def _normalize(image):
"""Normalize the image to zero mean and unit variance."""
offset = tf.constant(MEAN_RGB, shape=[1, 1, 3])
image -= offset
scale = tf.constant(STDDEV_RGB, shape=[1, 1, 3])
image /= scale
return image | ['def', '_normalize', '(', 'image', ')', ':', 'offset', '=', 'tf', '.', 'constant', '(', 'MEAN_RGB', ',', 'shape', '=', '[', '1', ',', '1', ',', '3', ']', ')', 'image', '-=', 'offset', 'scale', '=', 'tf', '.', 'constant', '(', 'STDDEV_RGB', ',', 'shape', '=', '[', '1', ',', '1', ',', '3', ']', ')', 'image', '/=', 'scale', 'return', 'image'] | Normalize the image to zero mean and unit variance. | ['Normalize', 'the', 'image', 'to', 'zero', 'mean', 'and', 'unit', 'variance', '.'] | train | https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/imagenet.py#L581-L588 |
9,433 | oisinmulvihill/stomper | lib/stomper/stomp_10.py | Frame.setCmd | def setCmd(self, cmd):
"""Check the cmd is valid, FrameError will be raised if its not."""
cmd = cmd.upper()
if cmd not in VALID_COMMANDS:
raise FrameError("The cmd '%s' is not valid! It must be one of '%s' (STOMP v%s)." % (
cmd, VALID_COMMANDS, STOMP_VERSION)
)
else:
self._cmd = cmd | python | def setCmd(self, cmd):
"""Check the cmd is valid, FrameError will be raised if its not."""
cmd = cmd.upper()
if cmd not in VALID_COMMANDS:
raise FrameError("The cmd '%s' is not valid! It must be one of '%s' (STOMP v%s)." % (
cmd, VALID_COMMANDS, STOMP_VERSION)
)
else:
self._cmd = cmd | ['def', 'setCmd', '(', 'self', ',', 'cmd', ')', ':', 'cmd', '=', 'cmd', '.', 'upper', '(', ')', 'if', 'cmd', 'not', 'in', 'VALID_COMMANDS', ':', 'raise', 'FrameError', '(', '"The cmd \'%s\' is not valid! It must be one of \'%s\' (STOMP v%s)."', '%', '(', 'cmd', ',', 'VALID_COMMANDS', ',', 'STOMP_VERSION', ')', ')', 'else', ':', 'self', '.', '_cmd', '=', 'cmd'] | Check the cmd is valid, FrameError will be raised if its not. | ['Check', 'the', 'cmd', 'is', 'valid', 'FrameError', 'will', 'be', 'raised', 'if', 'its', 'not', '.'] | train | https://github.com/oisinmulvihill/stomper/blob/842ed2353a4ddd638d35929ae5b7b70eb298305c/lib/stomper/stomp_10.py#L119-L127 |
9,434 | palankai/pyrs-schema | pyrs/schema/types.py | Object.to_raw | def to_raw(self, value, context=None):
"""Convert the value to a JSON compatible value"""
if value is None:
return None
res = {}
value = value.copy()
errors = []
for field in list(set(value) & set(self._fields)):
schema = self._fields.get(field)
name = schema.get_attr('name', field)
try:
res[name] = \
schema.to_raw(value.pop(field), context=context)
except exceptions.ValidationErrors as ex:
self._update_errors_by_exception(errors, ex, name)
self._raise_exception_when_errors(errors, value)
res.update(value)
return res | python | def to_raw(self, value, context=None):
"""Convert the value to a JSON compatible value"""
if value is None:
return None
res = {}
value = value.copy()
errors = []
for field in list(set(value) & set(self._fields)):
schema = self._fields.get(field)
name = schema.get_attr('name', field)
try:
res[name] = \
schema.to_raw(value.pop(field), context=context)
except exceptions.ValidationErrors as ex:
self._update_errors_by_exception(errors, ex, name)
self._raise_exception_when_errors(errors, value)
res.update(value)
return res | ['def', 'to_raw', '(', 'self', ',', 'value', ',', 'context', '=', 'None', ')', ':', 'if', 'value', 'is', 'None', ':', 'return', 'None', 'res', '=', '{', '}', 'value', '=', 'value', '.', 'copy', '(', ')', 'errors', '=', '[', ']', 'for', 'field', 'in', 'list', '(', 'set', '(', 'value', ')', '&', 'set', '(', 'self', '.', '_fields', ')', ')', ':', 'schema', '=', 'self', '.', '_fields', '.', 'get', '(', 'field', ')', 'name', '=', 'schema', '.', 'get_attr', '(', "'name'", ',', 'field', ')', 'try', ':', 'res', '[', 'name', ']', '=', 'schema', '.', 'to_raw', '(', 'value', '.', 'pop', '(', 'field', ')', ',', 'context', '=', 'context', ')', 'except', 'exceptions', '.', 'ValidationErrors', 'as', 'ex', ':', 'self', '.', '_update_errors_by_exception', '(', 'errors', ',', 'ex', ',', 'name', ')', 'self', '.', '_raise_exception_when_errors', '(', 'errors', ',', 'value', ')', 'res', '.', 'update', '(', 'value', ')', 'return', 'res'] | Convert the value to a JSON compatible value | ['Convert', 'the', 'value', 'to', 'a', 'JSON', 'compatible', 'value'] | train | https://github.com/palankai/pyrs-schema/blob/6bcde02e74d8fc3fa889f00f8e661e6d6af24a4f/pyrs/schema/types.py#L316-L334 |
9,435 | johntruckenbrodt/spatialist | spatialist/explorer.py | RasterViewer.__reset_crosshair | def __reset_crosshair(self):
"""
redraw the cross-hair on the horizontal slice plot
Parameters
----------
x: int
the x image coordinate
y: int
the y image coordinate
Returns
-------
"""
self.lhor.set_ydata(self.y_coord)
self.lver.set_xdata(self.x_coord) | python | def __reset_crosshair(self):
"""
redraw the cross-hair on the horizontal slice plot
Parameters
----------
x: int
the x image coordinate
y: int
the y image coordinate
Returns
-------
"""
self.lhor.set_ydata(self.y_coord)
self.lver.set_xdata(self.x_coord) | ['def', '__reset_crosshair', '(', 'self', ')', ':', 'self', '.', 'lhor', '.', 'set_ydata', '(', 'self', '.', 'y_coord', ')', 'self', '.', 'lver', '.', 'set_xdata', '(', 'self', '.', 'x_coord', ')'] | redraw the cross-hair on the horizontal slice plot
Parameters
----------
x: int
the x image coordinate
y: int
the y image coordinate
Returns
------- | ['redraw', 'the', 'cross', '-', 'hair', 'on', 'the', 'horizontal', 'slice', 'plot'] | train | https://github.com/johntruckenbrodt/spatialist/blob/007f49296a156de8d7168ad235b5a5b8e8d3633d/spatialist/explorer.py#L243-L258 |
9,436 | mathiasertl/django-ca | ca/django_ca/management/base.py | BaseCommand.add_format | def add_format(self, parser, default=Encoding.PEM, help_text=None, opts=None):
"""Add the --format option."""
if opts is None:
opts = ['-f', '--format']
if help_text is None:
help_text = 'The format to use ("ASN1" is an alias for "DER", default: %(default)s).'
help_text = help_text % {'default': default.name}
parser.add_argument(*opts, metavar='{PEM,ASN1,DER}', default=default,
action=FormatAction, help=help_text) | python | def add_format(self, parser, default=Encoding.PEM, help_text=None, opts=None):
"""Add the --format option."""
if opts is None:
opts = ['-f', '--format']
if help_text is None:
help_text = 'The format to use ("ASN1" is an alias for "DER", default: %(default)s).'
help_text = help_text % {'default': default.name}
parser.add_argument(*opts, metavar='{PEM,ASN1,DER}', default=default,
action=FormatAction, help=help_text) | ['def', 'add_format', '(', 'self', ',', 'parser', ',', 'default', '=', 'Encoding', '.', 'PEM', ',', 'help_text', '=', 'None', ',', 'opts', '=', 'None', ')', ':', 'if', 'opts', 'is', 'None', ':', 'opts', '=', '[', "'-f'", ',', "'--format'", ']', 'if', 'help_text', 'is', 'None', ':', 'help_text', '=', '\'The format to use ("ASN1" is an alias for "DER", default: %(default)s).\'', 'help_text', '=', 'help_text', '%', '{', "'default'", ':', 'default', '.', 'name', '}', 'parser', '.', 'add_argument', '(', '*', 'opts', ',', 'metavar', '=', "'{PEM,ASN1,DER}'", ',', 'default', '=', 'default', ',', 'action', '=', 'FormatAction', ',', 'help', '=', 'help_text', ')'] | Add the --format option. | ['Add', 'the', '--', 'format', 'option', '.'] | train | https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/management/base.py#L281-L290 |
9,437 | hydpy-dev/hydpy | hydpy/models/lland/lland_model.py | calc_schm_wats_v1 | def calc_schm_wats_v1(self):
"""Calculate the actual amount of water melting within the snow cover.
Required control parameters:
|NHRU|
|Lnk|
Required flux sequences:
|SBes|
|WGTF|
Calculated flux sequence:
|Schm|
Updated state sequence:
|WATS|
Basic equations:
:math:`\\frac{dWATS}{dt} = SBes - Schm`
:math:`Schm = \\Bigl \\lbrace
{
{WGTF \\ | \\ WATS > 0}
\\atop
{0 \\ | \\ WATS = 0}
}`
Examples:
Initialize two water (|FLUSS| and |SEE|) and four arable land
(|ACKER|) HRUs. Assume the same values for the initial amount
of frozen water (|WATS|) and the frozen part of stand precipitation
(|SBes|), but different values for potential snowmelt (|WGTF|):
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> nhru(6)
>>> lnk(FLUSS, SEE, ACKER, ACKER, ACKER, ACKER)
>>> states.wats = 2.0
>>> fluxes.sbes = 1.0
>>> fluxes.wgtf = 1.0, 1.0, 0.0, 1.0, 3.0, 5.0
>>> model.calc_schm_wats_v1()
>>> states.wats
wats(0.0, 0.0, 3.0, 2.0, 0.0, 0.0)
>>> fluxes.schm
schm(0.0, 0.0, 0.0, 1.0, 3.0, 3.0)
For the water areas, both the frozen amount of water and actual melt
are set to zero. For all other land use classes, actual melt
is either limited by potential melt or the available frozen water,
which is the sum of initial frozen water and the frozen part
of stand precipitation.
"""
con = self.parameters.control.fastaccess
flu = self.sequences.fluxes.fastaccess
sta = self.sequences.states.fastaccess
for k in range(con.nhru):
if con.lnk[k] in (WASSER, FLUSS, SEE):
sta.wats[k] = 0.
flu.schm[k] = 0.
else:
sta.wats[k] += flu.sbes[k]
flu.schm[k] = min(flu.wgtf[k], sta.wats[k])
sta.wats[k] -= flu.schm[k] | python | def calc_schm_wats_v1(self):
"""Calculate the actual amount of water melting within the snow cover.
Required control parameters:
|NHRU|
|Lnk|
Required flux sequences:
|SBes|
|WGTF|
Calculated flux sequence:
|Schm|
Updated state sequence:
|WATS|
Basic equations:
:math:`\\frac{dWATS}{dt} = SBes - Schm`
:math:`Schm = \\Bigl \\lbrace
{
{WGTF \\ | \\ WATS > 0}
\\atop
{0 \\ | \\ WATS = 0}
}`
Examples:
Initialize two water (|FLUSS| and |SEE|) and four arable land
(|ACKER|) HRUs. Assume the same values for the initial amount
of frozen water (|WATS|) and the frozen part of stand precipitation
(|SBes|), but different values for potential snowmelt (|WGTF|):
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> nhru(6)
>>> lnk(FLUSS, SEE, ACKER, ACKER, ACKER, ACKER)
>>> states.wats = 2.0
>>> fluxes.sbes = 1.0
>>> fluxes.wgtf = 1.0, 1.0, 0.0, 1.0, 3.0, 5.0
>>> model.calc_schm_wats_v1()
>>> states.wats
wats(0.0, 0.0, 3.0, 2.0, 0.0, 0.0)
>>> fluxes.schm
schm(0.0, 0.0, 0.0, 1.0, 3.0, 3.0)
For the water areas, both the frozen amount of water and actual melt
are set to zero. For all other land use classes, actual melt
is either limited by potential melt or the available frozen water,
which is the sum of initial frozen water and the frozen part
of stand precipitation.
"""
con = self.parameters.control.fastaccess
flu = self.sequences.fluxes.fastaccess
sta = self.sequences.states.fastaccess
for k in range(con.nhru):
if con.lnk[k] in (WASSER, FLUSS, SEE):
sta.wats[k] = 0.
flu.schm[k] = 0.
else:
sta.wats[k] += flu.sbes[k]
flu.schm[k] = min(flu.wgtf[k], sta.wats[k])
sta.wats[k] -= flu.schm[k] | ['def', 'calc_schm_wats_v1', '(', 'self', ')', ':', 'con', '=', 'self', '.', 'parameters', '.', 'control', '.', 'fastaccess', 'flu', '=', 'self', '.', 'sequences', '.', 'fluxes', '.', 'fastaccess', 'sta', '=', 'self', '.', 'sequences', '.', 'states', '.', 'fastaccess', 'for', 'k', 'in', 'range', '(', 'con', '.', 'nhru', ')', ':', 'if', 'con', '.', 'lnk', '[', 'k', ']', 'in', '(', 'WASSER', ',', 'FLUSS', ',', 'SEE', ')', ':', 'sta', '.', 'wats', '[', 'k', ']', '=', '0.', 'flu', '.', 'schm', '[', 'k', ']', '=', '0.', 'else', ':', 'sta', '.', 'wats', '[', 'k', ']', '+=', 'flu', '.', 'sbes', '[', 'k', ']', 'flu', '.', 'schm', '[', 'k', ']', '=', 'min', '(', 'flu', '.', 'wgtf', '[', 'k', ']', ',', 'sta', '.', 'wats', '[', 'k', ']', ')', 'sta', '.', 'wats', '[', 'k', ']', '-=', 'flu', '.', 'schm', '[', 'k', ']'] | Calculate the actual amount of water melting within the snow cover.
Required control parameters:
|NHRU|
|Lnk|
Required flux sequences:
|SBes|
|WGTF|
Calculated flux sequence:
|Schm|
Updated state sequence:
|WATS|
Basic equations:
:math:`\\frac{dWATS}{dt} = SBes - Schm`
:math:`Schm = \\Bigl \\lbrace
{
{WGTF \\ | \\ WATS > 0}
\\atop
{0 \\ | \\ WATS = 0}
}`
Examples:
Initialize two water (|FLUSS| and |SEE|) and four arable land
(|ACKER|) HRUs. Assume the same values for the initial amount
of frozen water (|WATS|) and the frozen part of stand precipitation
(|SBes|), but different values for potential snowmelt (|WGTF|):
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> nhru(6)
>>> lnk(FLUSS, SEE, ACKER, ACKER, ACKER, ACKER)
>>> states.wats = 2.0
>>> fluxes.sbes = 1.0
>>> fluxes.wgtf = 1.0, 1.0, 0.0, 1.0, 3.0, 5.0
>>> model.calc_schm_wats_v1()
>>> states.wats
wats(0.0, 0.0, 3.0, 2.0, 0.0, 0.0)
>>> fluxes.schm
schm(0.0, 0.0, 0.0, 1.0, 3.0, 3.0)
For the water areas, both the frozen amount of water and actual melt
are set to zero. For all other land use classes, actual melt
is either limited by potential melt or the available frozen water,
which is the sum of initial frozen water and the frozen part
of stand precipitation. | ['Calculate', 'the', 'actual', 'amount', 'of', 'water', 'melting', 'within', 'the', 'snow', 'cover', '.'] | train | https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/lland/lland_model.py#L604-L666 |
9,438 | mikekatz04/BOWIE | snr_calculator_folder/gwsnrcalc/genconutils/genprocess.py | GenProcess.set_parameters | def set_parameters(self):
"""Setup all the parameters for the binaries to be evaluated.
Grid values and store necessary parameters for input into the SNR function.
"""
# declare 1D arrays of both paramters
if self.xscale != 'lin':
self.xvals = np.logspace(np.log10(float(self.x_low)),
np.log10(float(self.x_high)),
self.num_x)
else:
self.xvals = np.linspace(float(self.x_low),
float(self.x_high),
self.num_x)
if self.yscale != 'lin':
self.yvals = np.logspace(np.log10(float(self.y_low)),
np.log10(float(self.y_high)),
self.num_y)
else:
self.yvals = np.linspace(float(self.y_low),
float(self.y_high),
self.num_y)
self.xvals, self.yvals = np.meshgrid(self.xvals, self.yvals)
self.xvals, self.yvals = self.xvals.ravel(), self.yvals.ravel()
for which in ['x', 'y']:
setattr(self, getattr(self, which + 'val_name'), getattr(self, which + 'vals'))
self.ecc = 'eccentricity' in self.__dict__
if self.ecc:
if 'observation_time' not in self.__dict__:
if 'start_time' not in self.__dict__:
raise ValueError('If no observation time is provided, the time before'
+ 'merger must be the inital starting condition.')
self.observation_time = self.start_time # small number so it is not zero
else:
if 'spin' in self.__dict__:
self.spin_1 = self.spin
self.spin_2 = self.spin
for key in ['redshift', 'luminosity_distance', 'comoving_distance']:
if key in self.__dict__:
self.dist_type = key
self.z_or_dist = getattr(self, key)
if self.ecc:
for key in ['start_frequency', 'start_time', 'start_separation']:
if key in self.__dict__:
self.initial_cond_type = key.split('_')[-1]
self.initial_point = getattr(self, key)
# add m1 and m2
self.m1 = (self.total_mass / (1. + self.mass_ratio))
self.m2 = (self.total_mass * self.mass_ratio / (1. + self.mass_ratio))
return | python | def set_parameters(self):
"""Setup all the parameters for the binaries to be evaluated.
Grid values and store necessary parameters for input into the SNR function.
"""
# declare 1D arrays of both paramters
if self.xscale != 'lin':
self.xvals = np.logspace(np.log10(float(self.x_low)),
np.log10(float(self.x_high)),
self.num_x)
else:
self.xvals = np.linspace(float(self.x_low),
float(self.x_high),
self.num_x)
if self.yscale != 'lin':
self.yvals = np.logspace(np.log10(float(self.y_low)),
np.log10(float(self.y_high)),
self.num_y)
else:
self.yvals = np.linspace(float(self.y_low),
float(self.y_high),
self.num_y)
self.xvals, self.yvals = np.meshgrid(self.xvals, self.yvals)
self.xvals, self.yvals = self.xvals.ravel(), self.yvals.ravel()
for which in ['x', 'y']:
setattr(self, getattr(self, which + 'val_name'), getattr(self, which + 'vals'))
self.ecc = 'eccentricity' in self.__dict__
if self.ecc:
if 'observation_time' not in self.__dict__:
if 'start_time' not in self.__dict__:
raise ValueError('If no observation time is provided, the time before'
+ 'merger must be the inital starting condition.')
self.observation_time = self.start_time # small number so it is not zero
else:
if 'spin' in self.__dict__:
self.spin_1 = self.spin
self.spin_2 = self.spin
for key in ['redshift', 'luminosity_distance', 'comoving_distance']:
if key in self.__dict__:
self.dist_type = key
self.z_or_dist = getattr(self, key)
if self.ecc:
for key in ['start_frequency', 'start_time', 'start_separation']:
if key in self.__dict__:
self.initial_cond_type = key.split('_')[-1]
self.initial_point = getattr(self, key)
# add m1 and m2
self.m1 = (self.total_mass / (1. + self.mass_ratio))
self.m2 = (self.total_mass * self.mass_ratio / (1. + self.mass_ratio))
return | ['def', 'set_parameters', '(', 'self', ')', ':', '# declare 1D arrays of both paramters', 'if', 'self', '.', 'xscale', '!=', "'lin'", ':', 'self', '.', 'xvals', '=', 'np', '.', 'logspace', '(', 'np', '.', 'log10', '(', 'float', '(', 'self', '.', 'x_low', ')', ')', ',', 'np', '.', 'log10', '(', 'float', '(', 'self', '.', 'x_high', ')', ')', ',', 'self', '.', 'num_x', ')', 'else', ':', 'self', '.', 'xvals', '=', 'np', '.', 'linspace', '(', 'float', '(', 'self', '.', 'x_low', ')', ',', 'float', '(', 'self', '.', 'x_high', ')', ',', 'self', '.', 'num_x', ')', 'if', 'self', '.', 'yscale', '!=', "'lin'", ':', 'self', '.', 'yvals', '=', 'np', '.', 'logspace', '(', 'np', '.', 'log10', '(', 'float', '(', 'self', '.', 'y_low', ')', ')', ',', 'np', '.', 'log10', '(', 'float', '(', 'self', '.', 'y_high', ')', ')', ',', 'self', '.', 'num_y', ')', 'else', ':', 'self', '.', 'yvals', '=', 'np', '.', 'linspace', '(', 'float', '(', 'self', '.', 'y_low', ')', ',', 'float', '(', 'self', '.', 'y_high', ')', ',', 'self', '.', 'num_y', ')', 'self', '.', 'xvals', ',', 'self', '.', 'yvals', '=', 'np', '.', 'meshgrid', '(', 'self', '.', 'xvals', ',', 'self', '.', 'yvals', ')', 'self', '.', 'xvals', ',', 'self', '.', 'yvals', '=', 'self', '.', 'xvals', '.', 'ravel', '(', ')', ',', 'self', '.', 'yvals', '.', 'ravel', '(', ')', 'for', 'which', 'in', '[', "'x'", ',', "'y'", ']', ':', 'setattr', '(', 'self', ',', 'getattr', '(', 'self', ',', 'which', '+', "'val_name'", ')', ',', 'getattr', '(', 'self', ',', 'which', '+', "'vals'", ')', ')', 'self', '.', 'ecc', '=', "'eccentricity'", 'in', 'self', '.', '__dict__', 'if', 'self', '.', 'ecc', ':', 'if', "'observation_time'", 'not', 'in', 'self', '.', '__dict__', ':', 'if', "'start_time'", 'not', 'in', 'self', '.', '__dict__', ':', 'raise', 'ValueError', '(', "'If no observation time is provided, the time before'", '+', "'merger must be the inital starting condition.'", ')', 'self', '.', 'observation_time', '=', 'self', '.', 'start_time', '# small number so it is not zero', 'else', ':', 'if', "'spin'", 'in', 'self', '.', '__dict__', ':', 'self', '.', 'spin_1', '=', 'self', '.', 'spin', 'self', '.', 'spin_2', '=', 'self', '.', 'spin', 'for', 'key', 'in', '[', "'redshift'", ',', "'luminosity_distance'", ',', "'comoving_distance'", ']', ':', 'if', 'key', 'in', 'self', '.', '__dict__', ':', 'self', '.', 'dist_type', '=', 'key', 'self', '.', 'z_or_dist', '=', 'getattr', '(', 'self', ',', 'key', ')', 'if', 'self', '.', 'ecc', ':', 'for', 'key', 'in', '[', "'start_frequency'", ',', "'start_time'", ',', "'start_separation'", ']', ':', 'if', 'key', 'in', 'self', '.', '__dict__', ':', 'self', '.', 'initial_cond_type', '=', 'key', '.', 'split', '(', "'_'", ')', '[', '-', '1', ']', 'self', '.', 'initial_point', '=', 'getattr', '(', 'self', ',', 'key', ')', '# add m1 and m2', 'self', '.', 'm1', '=', '(', 'self', '.', 'total_mass', '/', '(', '1.', '+', 'self', '.', 'mass_ratio', ')', ')', 'self', '.', 'm2', '=', '(', 'self', '.', 'total_mass', '*', 'self', '.', 'mass_ratio', '/', '(', '1.', '+', 'self', '.', 'mass_ratio', ')', ')', 'return'] | Setup all the parameters for the binaries to be evaluated.
Grid values and store necessary parameters for input into the SNR function. | ['Setup', 'all', 'the', 'parameters', 'for', 'the', 'binaries', 'to', 'be', 'evaluated', '.'] | train | https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/genconutils/genprocess.py#L68-L128 |
9,439 | bwhite/hadoopy | hadoopy/_hdfs.py | cp | def cp(hdfs_src, hdfs_dst):
"""Copy a file
:param hdfs_src: Source (str)
:param hdfs_dst: Destination (str)
:raises: IOError: If unsuccessful
"""
cmd = "hadoop fs -cp %s %s" % (hdfs_src, hdfs_dst)
rcode, stdout, stderr = _checked_hadoop_fs_command(cmd) | python | def cp(hdfs_src, hdfs_dst):
"""Copy a file
:param hdfs_src: Source (str)
:param hdfs_dst: Destination (str)
:raises: IOError: If unsuccessful
"""
cmd = "hadoop fs -cp %s %s" % (hdfs_src, hdfs_dst)
rcode, stdout, stderr = _checked_hadoop_fs_command(cmd) | ['def', 'cp', '(', 'hdfs_src', ',', 'hdfs_dst', ')', ':', 'cmd', '=', '"hadoop fs -cp %s %s"', '%', '(', 'hdfs_src', ',', 'hdfs_dst', ')', 'rcode', ',', 'stdout', ',', 'stderr', '=', '_checked_hadoop_fs_command', '(', 'cmd', ')'] | Copy a file
:param hdfs_src: Source (str)
:param hdfs_dst: Destination (str)
:raises: IOError: If unsuccessful | ['Copy', 'a', 'file'] | train | https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/_hdfs.py#L148-L156 |
9,440 | osrg/ryu | ryu/lib/stplib.py | Port.transmit_ack_bpdu | def transmit_ack_bpdu(self):
""" Send Topology Change Ack BPDU. """
ack_flags = 0b10000001
bpdu_data = self._generate_config_bpdu(ack_flags)
self.ofctl.send_packet_out(self.ofport.port_no, bpdu_data) | python | def transmit_ack_bpdu(self):
""" Send Topology Change Ack BPDU. """
ack_flags = 0b10000001
bpdu_data = self._generate_config_bpdu(ack_flags)
self.ofctl.send_packet_out(self.ofport.port_no, bpdu_data) | ['def', 'transmit_ack_bpdu', '(', 'self', ')', ':', 'ack_flags', '=', '0b10000001', 'bpdu_data', '=', 'self', '.', '_generate_config_bpdu', '(', 'ack_flags', ')', 'self', '.', 'ofctl', '.', 'send_packet_out', '(', 'self', '.', 'ofport', '.', 'port_no', ',', 'bpdu_data', ')'] | Send Topology Change Ack BPDU. | ['Send', 'Topology', 'Change', 'Ack', 'BPDU', '.'] | train | https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/stplib.py#L1029-L1033 |
9,441 | markchil/gptools | gptools/kernel/warping.py | WarpedKernel.free_params | def free_params(self, value):
"""Set the free parameters. Note that this bypasses enforce_bounds.
"""
value = scipy.asarray(value, dtype=float)
self.K_up_to_date = False
self.k.free_params = value[:self.k.num_free_params]
self.w.free_params = value[self.k.num_free_params:self.k.num_free_params + self.w.num_free_params] | python | def free_params(self, value):
"""Set the free parameters. Note that this bypasses enforce_bounds.
"""
value = scipy.asarray(value, dtype=float)
self.K_up_to_date = False
self.k.free_params = value[:self.k.num_free_params]
self.w.free_params = value[self.k.num_free_params:self.k.num_free_params + self.w.num_free_params] | ['def', 'free_params', '(', 'self', ',', 'value', ')', ':', 'value', '=', 'scipy', '.', 'asarray', '(', 'value', ',', 'dtype', '=', 'float', ')', 'self', '.', 'K_up_to_date', '=', 'False', 'self', '.', 'k', '.', 'free_params', '=', 'value', '[', ':', 'self', '.', 'k', '.', 'num_free_params', ']', 'self', '.', 'w', '.', 'free_params', '=', 'value', '[', 'self', '.', 'k', '.', 'num_free_params', ':', 'self', '.', 'k', '.', 'num_free_params', '+', 'self', '.', 'w', '.', 'num_free_params', ']'] | Set the free parameters. Note that this bypasses enforce_bounds. | ['Set', 'the', 'free', 'parameters', '.', 'Note', 'that', 'this', 'bypasses', 'enforce_bounds', '.'] | train | https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/kernel/warping.py#L582-L588 |
9,442 | lra/mackup | mackup/application.py | ApplicationProfile.backup | def backup(self):
"""
Backup the application config files.
Algorithm:
if exists home/file
if home/file is a real file
if exists mackup/file
are you sure ?
if sure
rm mackup/file
mv home/file mackup/file
link mackup/file home/file
else
mv home/file mackup/file
link mackup/file home/file
"""
# For each file used by the application
for filename in self.files:
(home_filepath, mackup_filepath) = self.getFilepaths(filename)
# If the file exists and is not already a link pointing to Mackup
if ((os.path.isfile(home_filepath) or
os.path.isdir(home_filepath)) and
not (os.path.islink(home_filepath) and
(os.path.isfile(mackup_filepath) or
os.path.isdir(mackup_filepath)) and
os.path.samefile(home_filepath,
mackup_filepath))):
if self.verbose:
print("Backing up\n {}\n to\n {} ..."
.format(home_filepath, mackup_filepath))
else:
print("Backing up {} ...".format(filename))
if self.dry_run:
continue
# Check if we already have a backup
if os.path.exists(mackup_filepath):
# Name it right
if os.path.isfile(mackup_filepath):
file_type = 'file'
elif os.path.isdir(mackup_filepath):
file_type = 'folder'
elif os.path.islink(mackup_filepath):
file_type = 'link'
else:
raise ValueError("Unsupported file: {}"
.format(mackup_filepath))
# Ask the user if he really want to replace it
if utils.confirm("A {} named {} already exists in the"
" backup.\nAre you sure that you want to"
" replace it ?"
.format(file_type, mackup_filepath)):
# Delete the file in Mackup
utils.delete(mackup_filepath)
# Copy the file
utils.copy(home_filepath, mackup_filepath)
# Delete the file in the home
utils.delete(home_filepath)
# Link the backuped file to its original place
utils.link(mackup_filepath, home_filepath)
else:
# Copy the file
utils.copy(home_filepath, mackup_filepath)
# Delete the file in the home
utils.delete(home_filepath)
# Link the backuped file to its original place
utils.link(mackup_filepath, home_filepath)
elif self.verbose:
if os.path.exists(home_filepath):
print("Doing nothing\n {}\n "
"is already backed up to\n {}"
.format(home_filepath, mackup_filepath))
elif os.path.islink(home_filepath):
print("Doing nothing\n {}\n "
"is a broken link, you might want to fix it."
.format(home_filepath))
else:
print("Doing nothing\n {}\n does not exist"
.format(home_filepath)) | python | def backup(self):
"""
Backup the application config files.
Algorithm:
if exists home/file
if home/file is a real file
if exists mackup/file
are you sure ?
if sure
rm mackup/file
mv home/file mackup/file
link mackup/file home/file
else
mv home/file mackup/file
link mackup/file home/file
"""
# For each file used by the application
for filename in self.files:
(home_filepath, mackup_filepath) = self.getFilepaths(filename)
# If the file exists and is not already a link pointing to Mackup
if ((os.path.isfile(home_filepath) or
os.path.isdir(home_filepath)) and
not (os.path.islink(home_filepath) and
(os.path.isfile(mackup_filepath) or
os.path.isdir(mackup_filepath)) and
os.path.samefile(home_filepath,
mackup_filepath))):
if self.verbose:
print("Backing up\n {}\n to\n {} ..."
.format(home_filepath, mackup_filepath))
else:
print("Backing up {} ...".format(filename))
if self.dry_run:
continue
# Check if we already have a backup
if os.path.exists(mackup_filepath):
# Name it right
if os.path.isfile(mackup_filepath):
file_type = 'file'
elif os.path.isdir(mackup_filepath):
file_type = 'folder'
elif os.path.islink(mackup_filepath):
file_type = 'link'
else:
raise ValueError("Unsupported file: {}"
.format(mackup_filepath))
# Ask the user if he really want to replace it
if utils.confirm("A {} named {} already exists in the"
" backup.\nAre you sure that you want to"
" replace it ?"
.format(file_type, mackup_filepath)):
# Delete the file in Mackup
utils.delete(mackup_filepath)
# Copy the file
utils.copy(home_filepath, mackup_filepath)
# Delete the file in the home
utils.delete(home_filepath)
# Link the backuped file to its original place
utils.link(mackup_filepath, home_filepath)
else:
# Copy the file
utils.copy(home_filepath, mackup_filepath)
# Delete the file in the home
utils.delete(home_filepath)
# Link the backuped file to its original place
utils.link(mackup_filepath, home_filepath)
elif self.verbose:
if os.path.exists(home_filepath):
print("Doing nothing\n {}\n "
"is already backed up to\n {}"
.format(home_filepath, mackup_filepath))
elif os.path.islink(home_filepath):
print("Doing nothing\n {}\n "
"is a broken link, you might want to fix it."
.format(home_filepath))
else:
print("Doing nothing\n {}\n does not exist"
.format(home_filepath)) | ['def', 'backup', '(', 'self', ')', ':', '# For each file used by the application', 'for', 'filename', 'in', 'self', '.', 'files', ':', '(', 'home_filepath', ',', 'mackup_filepath', ')', '=', 'self', '.', 'getFilepaths', '(', 'filename', ')', '# If the file exists and is not already a link pointing to Mackup', 'if', '(', '(', 'os', '.', 'path', '.', 'isfile', '(', 'home_filepath', ')', 'or', 'os', '.', 'path', '.', 'isdir', '(', 'home_filepath', ')', ')', 'and', 'not', '(', 'os', '.', 'path', '.', 'islink', '(', 'home_filepath', ')', 'and', '(', 'os', '.', 'path', '.', 'isfile', '(', 'mackup_filepath', ')', 'or', 'os', '.', 'path', '.', 'isdir', '(', 'mackup_filepath', ')', ')', 'and', 'os', '.', 'path', '.', 'samefile', '(', 'home_filepath', ',', 'mackup_filepath', ')', ')', ')', ':', 'if', 'self', '.', 'verbose', ':', 'print', '(', '"Backing up\\n {}\\n to\\n {} ..."', '.', 'format', '(', 'home_filepath', ',', 'mackup_filepath', ')', ')', 'else', ':', 'print', '(', '"Backing up {} ..."', '.', 'format', '(', 'filename', ')', ')', 'if', 'self', '.', 'dry_run', ':', 'continue', '# Check if we already have a backup', 'if', 'os', '.', 'path', '.', 'exists', '(', 'mackup_filepath', ')', ':', '# Name it right', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'mackup_filepath', ')', ':', 'file_type', '=', "'file'", 'elif', 'os', '.', 'path', '.', 'isdir', '(', 'mackup_filepath', ')', ':', 'file_type', '=', "'folder'", 'elif', 'os', '.', 'path', '.', 'islink', '(', 'mackup_filepath', ')', ':', 'file_type', '=', "'link'", 'else', ':', 'raise', 'ValueError', '(', '"Unsupported file: {}"', '.', 'format', '(', 'mackup_filepath', ')', ')', '# Ask the user if he really want to replace it', 'if', 'utils', '.', 'confirm', '(', '"A {} named {} already exists in the"', '" backup.\\nAre you sure that you want to"', '" replace it ?"', '.', 'format', '(', 'file_type', ',', 'mackup_filepath', ')', ')', ':', '# Delete the file in Mackup', 'utils', '.', 'delete', '(', 'mackup_filepath', ')', '# Copy the file', 'utils', '.', 'copy', '(', 'home_filepath', ',', 'mackup_filepath', ')', '# Delete the file in the home', 'utils', '.', 'delete', '(', 'home_filepath', ')', '# Link the backuped file to its original place', 'utils', '.', 'link', '(', 'mackup_filepath', ',', 'home_filepath', ')', 'else', ':', '# Copy the file', 'utils', '.', 'copy', '(', 'home_filepath', ',', 'mackup_filepath', ')', '# Delete the file in the home', 'utils', '.', 'delete', '(', 'home_filepath', ')', '# Link the backuped file to its original place', 'utils', '.', 'link', '(', 'mackup_filepath', ',', 'home_filepath', ')', 'elif', 'self', '.', 'verbose', ':', 'if', 'os', '.', 'path', '.', 'exists', '(', 'home_filepath', ')', ':', 'print', '(', '"Doing nothing\\n {}\\n "', '"is already backed up to\\n {}"', '.', 'format', '(', 'home_filepath', ',', 'mackup_filepath', ')', ')', 'elif', 'os', '.', 'path', '.', 'islink', '(', 'home_filepath', ')', ':', 'print', '(', '"Doing nothing\\n {}\\n "', '"is a broken link, you might want to fix it."', '.', 'format', '(', 'home_filepath', ')', ')', 'else', ':', 'print', '(', '"Doing nothing\\n {}\\n does not exist"', '.', 'format', '(', 'home_filepath', ')', ')'] | Backup the application config files.
Algorithm:
if exists home/file
if home/file is a real file
if exists mackup/file
are you sure ?
if sure
rm mackup/file
mv home/file mackup/file
link mackup/file home/file
else
mv home/file mackup/file
link mackup/file home/file | ['Backup', 'the', 'application', 'config', 'files', '.'] | train | https://github.com/lra/mackup/blob/ed0b5626b033f232868900bfd5108df448873725/mackup/application.py#L46-L130 |
9,443 | mozilla/DeepSpeech | bin/benchmark_nc.py | exec_command | def exec_command(command, cwd=None):
r'''
Helper to exec locally (subprocess) or remotely (paramiko)
'''
rc = None
stdout = stderr = None
if ssh_conn is None:
ld_library_path = {'LD_LIBRARY_PATH': '.:%s' % os.environ.get('LD_LIBRARY_PATH', '')}
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=ld_library_path, cwd=cwd)
stdout, stderr = p.communicate()
rc = p.returncode
else:
# environment= requires paramiko >= 2.1 (fails with 2.0.2)
final_command = command if cwd is None else 'cd %s && %s %s' % (cwd, 'LD_LIBRARY_PATH=.:$LD_LIBRARY_PATH', command)
ssh_stdin, ssh_stdout, ssh_stderr = ssh_conn.exec_command(final_command)
stdout = ''.join(ssh_stdout.readlines())
stderr = ''.join(ssh_stderr.readlines())
rc = ssh_stdout.channel.recv_exit_status()
return rc, stdout, stderr | python | def exec_command(command, cwd=None):
r'''
Helper to exec locally (subprocess) or remotely (paramiko)
'''
rc = None
stdout = stderr = None
if ssh_conn is None:
ld_library_path = {'LD_LIBRARY_PATH': '.:%s' % os.environ.get('LD_LIBRARY_PATH', '')}
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=ld_library_path, cwd=cwd)
stdout, stderr = p.communicate()
rc = p.returncode
else:
# environment= requires paramiko >= 2.1 (fails with 2.0.2)
final_command = command if cwd is None else 'cd %s && %s %s' % (cwd, 'LD_LIBRARY_PATH=.:$LD_LIBRARY_PATH', command)
ssh_stdin, ssh_stdout, ssh_stderr = ssh_conn.exec_command(final_command)
stdout = ''.join(ssh_stdout.readlines())
stderr = ''.join(ssh_stderr.readlines())
rc = ssh_stdout.channel.recv_exit_status()
return rc, stdout, stderr | ['def', 'exec_command', '(', 'command', ',', 'cwd', '=', 'None', ')', ':', 'rc', '=', 'None', 'stdout', '=', 'stderr', '=', 'None', 'if', 'ssh_conn', 'is', 'None', ':', 'ld_library_path', '=', '{', "'LD_LIBRARY_PATH'", ':', "'.:%s'", '%', 'os', '.', 'environ', '.', 'get', '(', "'LD_LIBRARY_PATH'", ',', "''", ')', '}', 'p', '=', 'subprocess', '.', 'Popen', '(', 'command', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ',', 'stderr', '=', 'subprocess', '.', 'PIPE', ',', 'shell', '=', 'True', ',', 'env', '=', 'ld_library_path', ',', 'cwd', '=', 'cwd', ')', 'stdout', ',', 'stderr', '=', 'p', '.', 'communicate', '(', ')', 'rc', '=', 'p', '.', 'returncode', 'else', ':', '# environment= requires paramiko >= 2.1 (fails with 2.0.2)', 'final_command', '=', 'command', 'if', 'cwd', 'is', 'None', 'else', "'cd %s && %s %s'", '%', '(', 'cwd', ',', "'LD_LIBRARY_PATH=.:$LD_LIBRARY_PATH'", ',', 'command', ')', 'ssh_stdin', ',', 'ssh_stdout', ',', 'ssh_stderr', '=', 'ssh_conn', '.', 'exec_command', '(', 'final_command', ')', 'stdout', '=', "''", '.', 'join', '(', 'ssh_stdout', '.', 'readlines', '(', ')', ')', 'stderr', '=', "''", '.', 'join', '(', 'ssh_stderr', '.', 'readlines', '(', ')', ')', 'rc', '=', 'ssh_stdout', '.', 'channel', '.', 'recv_exit_status', '(', ')', 'return', 'rc', ',', 'stdout', ',', 'stderr'] | r'''
Helper to exec locally (subprocess) or remotely (paramiko) | ['r', 'Helper', 'to', 'exec', 'locally', '(', 'subprocess', ')', 'or', 'remotely', '(', 'paramiko', ')'] | train | https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/bin/benchmark_nc.py#L41-L61 |
9,444 | doakey3/DashTable | dashtable/simple2data/simple2data.py | simple2data | def simple2data(text):
"""
Convert a simple table to data (the kind used by DashTable)
Parameters
----------
text : str
A valid simple rst table
Returns
-------
table : list of lists of str
spans : list of lists of lists of int
A span is a [row, column] pair that defines a group of merged
cells in the table. In a simple rst table, spans can only be
colspans.
use_headers : bool
Whether or not this table uses headers
headers_row : int
The row where headers are located
Notes
-----
This function requires docutils_.
.. _docutils: http://docutils.sourceforge.net/
Example
-------
>>> html_text = '''
... ====== ===== ======
... Inputs Output
... ------------- ------
... A B A or B
... ====== ===== ======
... False False False
... True False True
... False True True
... True True True
... ====== ===== ======
... '''
>>> from dashtable import simple2data
>>> table, spans, use_headers, headers_row = simple2data(html_text)
>>> from pprint import pprint
>>> pprint(table)
[['Inputs', 'Output', ''],
['A', 'B', 'A or B'],
['False', 'False', 'False'],
['True, 'False', 'True'],
['False', 'True', 'True'],
['True', 'True', 'True']]
>>> print(spans)
[[[0, 0], [0, 1]]]
>>> print(use_headers)
True
>>> print(headers_row)
1
"""
try:
import docutils.statemachine
import docutils.parsers.rst.tableparser
except ImportError:
print("ERROR: You must install the docutils library to use simple2data")
return
lines = text.split('\n')
lines = truncate_empty_lines(lines)
leading_space = lines[0].replace(lines[0].lstrip(), '')
for i in range(len(lines)):
lines[i] = lines[i][len(leading_space)::]
parser = docutils.parsers.rst.tableparser.SimpleTableParser()
block = docutils.statemachine.StringList(list(lines))
simple_data = list(parser.parse(block))
column_widths = simple_data.pop(0)
column_count = len(column_widths)
headers_row = 0
if len(simple_data[0]) > 0:
use_headers = True
headers_row = len(simple_data[0]) - 1
headers = simple_data[0][0]
row_count = len(simple_data[1]) + len(simple_data[0])
while len(simple_data[0]) > 0:
simple_data[1].insert(0, simple_data[0][-1])
simple_data[0].pop(-1)
simple_data.pop(0)
else:
use_headers = False
simple_data.pop(0)
row_count = len(simple_data[0])
simple_data = simple_data[0]
table = make_empty_table(row_count, column_count)
spans = []
for row in range(len(simple_data)):
for column in range(len(simple_data[row])):
try:
text = '\n'.join(simple_data[row][column][3]).rstrip()
table[row][column] = text
extra_rows = simple_data[row][column][0]
extra_columns = simple_data[row][column][1]
span = make_span(row, column, extra_rows, extra_columns)
span = sorted(span)
span = list(span for span,_ in itertools.groupby(span))
if not len(span) == 1:
spans.append(span)
except TypeError:
pass
spans = sorted(spans)
return table, spans, use_headers, headers_row | python | def simple2data(text):
"""
Convert a simple table to data (the kind used by DashTable)
Parameters
----------
text : str
A valid simple rst table
Returns
-------
table : list of lists of str
spans : list of lists of lists of int
A span is a [row, column] pair that defines a group of merged
cells in the table. In a simple rst table, spans can only be
colspans.
use_headers : bool
Whether or not this table uses headers
headers_row : int
The row where headers are located
Notes
-----
This function requires docutils_.
.. _docutils: http://docutils.sourceforge.net/
Example
-------
>>> html_text = '''
... ====== ===== ======
... Inputs Output
... ------------- ------
... A B A or B
... ====== ===== ======
... False False False
... True False True
... False True True
... True True True
... ====== ===== ======
... '''
>>> from dashtable import simple2data
>>> table, spans, use_headers, headers_row = simple2data(html_text)
>>> from pprint import pprint
>>> pprint(table)
[['Inputs', 'Output', ''],
['A', 'B', 'A or B'],
['False', 'False', 'False'],
['True, 'False', 'True'],
['False', 'True', 'True'],
['True', 'True', 'True']]
>>> print(spans)
[[[0, 0], [0, 1]]]
>>> print(use_headers)
True
>>> print(headers_row)
1
"""
try:
import docutils.statemachine
import docutils.parsers.rst.tableparser
except ImportError:
print("ERROR: You must install the docutils library to use simple2data")
return
lines = text.split('\n')
lines = truncate_empty_lines(lines)
leading_space = lines[0].replace(lines[0].lstrip(), '')
for i in range(len(lines)):
lines[i] = lines[i][len(leading_space)::]
parser = docutils.parsers.rst.tableparser.SimpleTableParser()
block = docutils.statemachine.StringList(list(lines))
simple_data = list(parser.parse(block))
column_widths = simple_data.pop(0)
column_count = len(column_widths)
headers_row = 0
if len(simple_data[0]) > 0:
use_headers = True
headers_row = len(simple_data[0]) - 1
headers = simple_data[0][0]
row_count = len(simple_data[1]) + len(simple_data[0])
while len(simple_data[0]) > 0:
simple_data[1].insert(0, simple_data[0][-1])
simple_data[0].pop(-1)
simple_data.pop(0)
else:
use_headers = False
simple_data.pop(0)
row_count = len(simple_data[0])
simple_data = simple_data[0]
table = make_empty_table(row_count, column_count)
spans = []
for row in range(len(simple_data)):
for column in range(len(simple_data[row])):
try:
text = '\n'.join(simple_data[row][column][3]).rstrip()
table[row][column] = text
extra_rows = simple_data[row][column][0]
extra_columns = simple_data[row][column][1]
span = make_span(row, column, extra_rows, extra_columns)
span = sorted(span)
span = list(span for span,_ in itertools.groupby(span))
if not len(span) == 1:
spans.append(span)
except TypeError:
pass
spans = sorted(spans)
return table, spans, use_headers, headers_row | ['def', 'simple2data', '(', 'text', ')', ':', 'try', ':', 'import', 'docutils', '.', 'statemachine', 'import', 'docutils', '.', 'parsers', '.', 'rst', '.', 'tableparser', 'except', 'ImportError', ':', 'print', '(', '"ERROR: You must install the docutils library to use simple2data"', ')', 'return', 'lines', '=', 'text', '.', 'split', '(', "'\\n'", ')', 'lines', '=', 'truncate_empty_lines', '(', 'lines', ')', 'leading_space', '=', 'lines', '[', '0', ']', '.', 'replace', '(', 'lines', '[', '0', ']', '.', 'lstrip', '(', ')', ',', "''", ')', 'for', 'i', 'in', 'range', '(', 'len', '(', 'lines', ')', ')', ':', 'lines', '[', 'i', ']', '=', 'lines', '[', 'i', ']', '[', 'len', '(', 'leading_space', ')', ':', ':', ']', 'parser', '=', 'docutils', '.', 'parsers', '.', 'rst', '.', 'tableparser', '.', 'SimpleTableParser', '(', ')', 'block', '=', 'docutils', '.', 'statemachine', '.', 'StringList', '(', 'list', '(', 'lines', ')', ')', 'simple_data', '=', 'list', '(', 'parser', '.', 'parse', '(', 'block', ')', ')', 'column_widths', '=', 'simple_data', '.', 'pop', '(', '0', ')', 'column_count', '=', 'len', '(', 'column_widths', ')', 'headers_row', '=', '0', 'if', 'len', '(', 'simple_data', '[', '0', ']', ')', '>', '0', ':', 'use_headers', '=', 'True', 'headers_row', '=', 'len', '(', 'simple_data', '[', '0', ']', ')', '-', '1', 'headers', '=', 'simple_data', '[', '0', ']', '[', '0', ']', 'row_count', '=', 'len', '(', 'simple_data', '[', '1', ']', ')', '+', 'len', '(', 'simple_data', '[', '0', ']', ')', 'while', 'len', '(', 'simple_data', '[', '0', ']', ')', '>', '0', ':', 'simple_data', '[', '1', ']', '.', 'insert', '(', '0', ',', 'simple_data', '[', '0', ']', '[', '-', '1', ']', ')', 'simple_data', '[', '0', ']', '.', 'pop', '(', '-', '1', ')', 'simple_data', '.', 'pop', '(', '0', ')', 'else', ':', 'use_headers', '=', 'False', 'simple_data', '.', 'pop', '(', '0', ')', 'row_count', '=', 'len', '(', 'simple_data', '[', '0', ']', ')', 'simple_data', '=', 'simple_data', '[', '0', ']', 'table', '=', 'make_empty_table', '(', 'row_count', ',', 'column_count', ')', 'spans', '=', '[', ']', 'for', 'row', 'in', 'range', '(', 'len', '(', 'simple_data', ')', ')', ':', 'for', 'column', 'in', 'range', '(', 'len', '(', 'simple_data', '[', 'row', ']', ')', ')', ':', 'try', ':', 'text', '=', "'\\n'", '.', 'join', '(', 'simple_data', '[', 'row', ']', '[', 'column', ']', '[', '3', ']', ')', '.', 'rstrip', '(', ')', 'table', '[', 'row', ']', '[', 'column', ']', '=', 'text', 'extra_rows', '=', 'simple_data', '[', 'row', ']', '[', 'column', ']', '[', '0', ']', 'extra_columns', '=', 'simple_data', '[', 'row', ']', '[', 'column', ']', '[', '1', ']', 'span', '=', 'make_span', '(', 'row', ',', 'column', ',', 'extra_rows', ',', 'extra_columns', ')', 'span', '=', 'sorted', '(', 'span', ')', 'span', '=', 'list', '(', 'span', 'for', 'span', ',', '_', 'in', 'itertools', '.', 'groupby', '(', 'span', ')', ')', 'if', 'not', 'len', '(', 'span', ')', '==', '1', ':', 'spans', '.', 'append', '(', 'span', ')', 'except', 'TypeError', ':', 'pass', 'spans', '=', 'sorted', '(', 'spans', ')', 'return', 'table', ',', 'spans', ',', 'use_headers', ',', 'headers_row'] | Convert a simple table to data (the kind used by DashTable)
Parameters
----------
text : str
A valid simple rst table
Returns
-------
table : list of lists of str
spans : list of lists of lists of int
A span is a [row, column] pair that defines a group of merged
cells in the table. In a simple rst table, spans can only be
colspans.
use_headers : bool
Whether or not this table uses headers
headers_row : int
The row where headers are located
Notes
-----
This function requires docutils_.
.. _docutils: http://docutils.sourceforge.net/
Example
-------
>>> html_text = '''
... ====== ===== ======
... Inputs Output
... ------------- ------
... A B A or B
... ====== ===== ======
... False False False
... True False True
... False True True
... True True True
... ====== ===== ======
... '''
>>> from dashtable import simple2data
>>> table, spans, use_headers, headers_row = simple2data(html_text)
>>> from pprint import pprint
>>> pprint(table)
[['Inputs', 'Output', ''],
['A', 'B', 'A or B'],
['False', 'False', 'False'],
['True, 'False', 'True'],
['False', 'True', 'True'],
['True', 'True', 'True']]
>>> print(spans)
[[[0, 0], [0, 1]]]
>>> print(use_headers)
True
>>> print(headers_row)
1 | ['Convert', 'a', 'simple', 'table', 'to', 'data', '(', 'the', 'kind', 'used', 'by', 'DashTable', ')'] | train | https://github.com/doakey3/DashTable/blob/744cfb6a717fa75a8092c83ebcd49b2668023681/dashtable/simple2data/simple2data.py#L9-L121 |
9,445 | HPAC/matchpy | matchpy/expressions/substitution.py | Substitution.rename | def rename(self, renaming: Dict[str, str]) -> 'Substitution':
"""Return a copy of the substitution with renamed variables.
Example:
Rename the variable *x* to *y*:
>>> subst = Substitution({'x': a})
>>> subst.rename({'x': 'y'})
{'y': Symbol('a')}
Args:
renaming:
A dictionary mapping old variable names to new ones.
Returns:
A copy of the substitution where variable names have been replaced according to the given renaming
dictionary. Names that are not contained in the dictionary are left unchanged.
"""
return Substitution((renaming.get(name, name), value) for name, value in self.items()) | python | def rename(self, renaming: Dict[str, str]) -> 'Substitution':
"""Return a copy of the substitution with renamed variables.
Example:
Rename the variable *x* to *y*:
>>> subst = Substitution({'x': a})
>>> subst.rename({'x': 'y'})
{'y': Symbol('a')}
Args:
renaming:
A dictionary mapping old variable names to new ones.
Returns:
A copy of the substitution where variable names have been replaced according to the given renaming
dictionary. Names that are not contained in the dictionary are left unchanged.
"""
return Substitution((renaming.get(name, name), value) for name, value in self.items()) | ['def', 'rename', '(', 'self', ',', 'renaming', ':', 'Dict', '[', 'str', ',', 'str', ']', ')', '->', "'Substitution'", ':', 'return', 'Substitution', '(', '(', 'renaming', '.', 'get', '(', 'name', ',', 'name', ')', ',', 'value', ')', 'for', 'name', ',', 'value', 'in', 'self', '.', 'items', '(', ')', ')'] | Return a copy of the substitution with renamed variables.
Example:
Rename the variable *x* to *y*:
>>> subst = Substitution({'x': a})
>>> subst.rename({'x': 'y'})
{'y': Symbol('a')}
Args:
renaming:
A dictionary mapping old variable names to new ones.
Returns:
A copy of the substitution where variable names have been replaced according to the given renaming
dictionary. Names that are not contained in the dictionary are left unchanged. | ['Return', 'a', 'copy', 'of', 'the', 'substitution', 'with', 'renamed', 'variables', '.'] | train | https://github.com/HPAC/matchpy/blob/06b2ec50ee0efdf3dd183768c0ffdb51b7efc393/matchpy/expressions/substitution.py#L199-L218 |
9,446 | jxtech/wechatpy | wechatpy/client/api/message.py | WeChatMessage.send_mass_video | def send_mass_video(self, group_or_users, media_id, title=None,
description=None, is_to_all=False, preview=False,
send_ignore_reprint=0, client_msg_id=None):
"""
群发视频消息
详情请参考
https://mp.weixin.qq.com/wiki?id=mp1481187827_i0l21
:param group_or_users: 值为整型数字时为按分组群发,值为列表/元组时为按 OpenID 列表群发
当 is_to_all 为 True 时,传入 None 即对所有用户发送。
:param media_id: 视频的媒体 ID。可以通过 :func:`upload_video` 上传。
:param title: 视频标题
:param description: 视频描述
:param is_to_all: 用于设定是否向全部用户发送,值为true或false,选择true该消息群发给所有用户
选择false可根据group_id发送给指定群组的用户
:type is_to_all: bool
:param preview: 是否发送预览,此时 group_or_users 参数应为一个openid字符串
:type preview: bool
:param send_ignore_reprint: 指定待群发的文章被判定为转载时,是否继续群发。
当 send_ignore_reprint 参数设置为1时,文章被判定为转载时,且原创文允许转载时,将继续进行群发操作。
当 send_ignore_reprint 参数设置为0时,文章被判定为转载时,将停止群发操作。
send_ignore_reprint 默认为0。
:type send_ignore_reprint: int
:param client_msg_id: 开发者侧群发 msgid,长度限制 64 字节
:type client_msg_id: str
:return: 返回的 JSON 数据包
"""
video_data = {
'media_id': media_id
}
if title:
video_data['title'] = title
if description:
video_data['description'] = description
return self._send_mass_message(
group_or_users,
'mpvideo',
{
'mpvideo': video_data
},
is_to_all,
preview,
send_ignore_reprint,
client_msg_id,
) | python | def send_mass_video(self, group_or_users, media_id, title=None,
description=None, is_to_all=False, preview=False,
send_ignore_reprint=0, client_msg_id=None):
"""
群发视频消息
详情请参考
https://mp.weixin.qq.com/wiki?id=mp1481187827_i0l21
:param group_or_users: 值为整型数字时为按分组群发,值为列表/元组时为按 OpenID 列表群发
当 is_to_all 为 True 时,传入 None 即对所有用户发送。
:param media_id: 视频的媒体 ID。可以通过 :func:`upload_video` 上传。
:param title: 视频标题
:param description: 视频描述
:param is_to_all: 用于设定是否向全部用户发送,值为true或false,选择true该消息群发给所有用户
选择false可根据group_id发送给指定群组的用户
:type is_to_all: bool
:param preview: 是否发送预览,此时 group_or_users 参数应为一个openid字符串
:type preview: bool
:param send_ignore_reprint: 指定待群发的文章被判定为转载时,是否继续群发。
当 send_ignore_reprint 参数设置为1时,文章被判定为转载时,且原创文允许转载时,将继续进行群发操作。
当 send_ignore_reprint 参数设置为0时,文章被判定为转载时,将停止群发操作。
send_ignore_reprint 默认为0。
:type send_ignore_reprint: int
:param client_msg_id: 开发者侧群发 msgid,长度限制 64 字节
:type client_msg_id: str
:return: 返回的 JSON 数据包
"""
video_data = {
'media_id': media_id
}
if title:
video_data['title'] = title
if description:
video_data['description'] = description
return self._send_mass_message(
group_or_users,
'mpvideo',
{
'mpvideo': video_data
},
is_to_all,
preview,
send_ignore_reprint,
client_msg_id,
) | ['def', 'send_mass_video', '(', 'self', ',', 'group_or_users', ',', 'media_id', ',', 'title', '=', 'None', ',', 'description', '=', 'None', ',', 'is_to_all', '=', 'False', ',', 'preview', '=', 'False', ',', 'send_ignore_reprint', '=', '0', ',', 'client_msg_id', '=', 'None', ')', ':', 'video_data', '=', '{', "'media_id'", ':', 'media_id', '}', 'if', 'title', ':', 'video_data', '[', "'title'", ']', '=', 'title', 'if', 'description', ':', 'video_data', '[', "'description'", ']', '=', 'description', 'return', 'self', '.', '_send_mass_message', '(', 'group_or_users', ',', "'mpvideo'", ',', '{', "'mpvideo'", ':', 'video_data', '}', ',', 'is_to_all', ',', 'preview', ',', 'send_ignore_reprint', ',', 'client_msg_id', ',', ')'] | 群发视频消息
详情请参考
https://mp.weixin.qq.com/wiki?id=mp1481187827_i0l21
:param group_or_users: 值为整型数字时为按分组群发,值为列表/元组时为按 OpenID 列表群发
当 is_to_all 为 True 时,传入 None 即对所有用户发送。
:param media_id: 视频的媒体 ID。可以通过 :func:`upload_video` 上传。
:param title: 视频标题
:param description: 视频描述
:param is_to_all: 用于设定是否向全部用户发送,值为true或false,选择true该消息群发给所有用户
选择false可根据group_id发送给指定群组的用户
:type is_to_all: bool
:param preview: 是否发送预览,此时 group_or_users 参数应为一个openid字符串
:type preview: bool
:param send_ignore_reprint: 指定待群发的文章被判定为转载时,是否继续群发。
当 send_ignore_reprint 参数设置为1时,文章被判定为转载时,且原创文允许转载时,将继续进行群发操作。
当 send_ignore_reprint 参数设置为0时,文章被判定为转载时,将停止群发操作。
send_ignore_reprint 默认为0。
:type send_ignore_reprint: int
:param client_msg_id: 开发者侧群发 msgid,长度限制 64 字节
:type client_msg_id: str
:return: 返回的 JSON 数据包 | ['群发视频消息'] | train | https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/message.py#L450-L496 |
9,447 | BerkeleyAutomation/autolab_core | autolab_core/points.py | NormalCloud._check_valid_data | def _check_valid_data(self, data):
"""Checks that the incoming data is a 3 x #elements ndarray of normal
vectors.
Parameters
----------
data : :obj:`numpy.ndarray`
The data to verify.
Raises
------
ValueError
If the data is not of the correct shape or type, or if the vectors
therein are not normalized.
"""
if data.dtype.type != np.float32 and data.dtype.type != np.float64:
raise ValueError('Must initialize normals clouds with a numpy float ndarray')
if data.shape[0] != 3:
raise ValueError('Illegal data array passed to normal cloud. Must have 3 coordinates')
if len(data.shape) > 2:
raise ValueError('Illegal data array passed to normal cloud. Must have 1 or 2 dimensions')
if np.any((np.abs(np.linalg.norm(data, axis=0) - 1) > 1e-4) & (np.linalg.norm(data, axis=0) != 0)):
raise ValueError('Illegal data array passed to normal cloud. Must have norm=1.0 or norm=0.0') | python | def _check_valid_data(self, data):
"""Checks that the incoming data is a 3 x #elements ndarray of normal
vectors.
Parameters
----------
data : :obj:`numpy.ndarray`
The data to verify.
Raises
------
ValueError
If the data is not of the correct shape or type, or if the vectors
therein are not normalized.
"""
if data.dtype.type != np.float32 and data.dtype.type != np.float64:
raise ValueError('Must initialize normals clouds with a numpy float ndarray')
if data.shape[0] != 3:
raise ValueError('Illegal data array passed to normal cloud. Must have 3 coordinates')
if len(data.shape) > 2:
raise ValueError('Illegal data array passed to normal cloud. Must have 1 or 2 dimensions')
if np.any((np.abs(np.linalg.norm(data, axis=0) - 1) > 1e-4) & (np.linalg.norm(data, axis=0) != 0)):
raise ValueError('Illegal data array passed to normal cloud. Must have norm=1.0 or norm=0.0') | ['def', '_check_valid_data', '(', 'self', ',', 'data', ')', ':', 'if', 'data', '.', 'dtype', '.', 'type', '!=', 'np', '.', 'float32', 'and', 'data', '.', 'dtype', '.', 'type', '!=', 'np', '.', 'float64', ':', 'raise', 'ValueError', '(', "'Must initialize normals clouds with a numpy float ndarray'", ')', 'if', 'data', '.', 'shape', '[', '0', ']', '!=', '3', ':', 'raise', 'ValueError', '(', "'Illegal data array passed to normal cloud. Must have 3 coordinates'", ')', 'if', 'len', '(', 'data', '.', 'shape', ')', '>', '2', ':', 'raise', 'ValueError', '(', "'Illegal data array passed to normal cloud. Must have 1 or 2 dimensions'", ')', 'if', 'np', '.', 'any', '(', '(', 'np', '.', 'abs', '(', 'np', '.', 'linalg', '.', 'norm', '(', 'data', ',', 'axis', '=', '0', ')', '-', '1', ')', '>', '1e-4', ')', '&', '(', 'np', '.', 'linalg', '.', 'norm', '(', 'data', ',', 'axis', '=', '0', ')', '!=', '0', ')', ')', ':', 'raise', 'ValueError', '(', "'Illegal data array passed to normal cloud. Must have norm=1.0 or norm=0.0'", ')'] | Checks that the incoming data is a 3 x #elements ndarray of normal
vectors.
Parameters
----------
data : :obj:`numpy.ndarray`
The data to verify.
Raises
------
ValueError
If the data is not of the correct shape or type, or if the vectors
therein are not normalized. | ['Checks', 'that', 'the', 'incoming', 'data', 'is', 'a', '3', 'x', '#elements', 'ndarray', 'of', 'normal', 'vectors', '.'] | train | https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/points.py#L852-L874 |
9,448 | alvations/pywsd | pywsd/similarity.py | sim | def sim(sense1: "wn.Synset", sense2: "wn.Synset", option: str = "path") -> float:
"""
Calculates similarity based on user's choice.
:param sense1: A synset.
:param sense2: A synset.
:param option: String, one of ('path', 'wup', 'lch', 'res', 'jcn', 'lin').
:return: A float, similarity measurement.
"""
option = option.lower()
if option.lower() in ["path", "path_similarity",
"wup", "wupa", "wu-palmer", "wu-palmer",
'lch', "leacock-chordorow"]:
return similarity_by_path(sense1, sense2, option)
elif option.lower() in ["res", "resnik",
"jcn","jiang-conrath",
"lin"]:
return similarity_by_infocontent(sense1, sense2, option) | python | def sim(sense1: "wn.Synset", sense2: "wn.Synset", option: str = "path") -> float:
"""
Calculates similarity based on user's choice.
:param sense1: A synset.
:param sense2: A synset.
:param option: String, one of ('path', 'wup', 'lch', 'res', 'jcn', 'lin').
:return: A float, similarity measurement.
"""
option = option.lower()
if option.lower() in ["path", "path_similarity",
"wup", "wupa", "wu-palmer", "wu-palmer",
'lch', "leacock-chordorow"]:
return similarity_by_path(sense1, sense2, option)
elif option.lower() in ["res", "resnik",
"jcn","jiang-conrath",
"lin"]:
return similarity_by_infocontent(sense1, sense2, option) | ['def', 'sim', '(', 'sense1', ':', '"wn.Synset"', ',', 'sense2', ':', '"wn.Synset"', ',', 'option', ':', 'str', '=', '"path"', ')', '->', 'float', ':', 'option', '=', 'option', '.', 'lower', '(', ')', 'if', 'option', '.', 'lower', '(', ')', 'in', '[', '"path"', ',', '"path_similarity"', ',', '"wup"', ',', '"wupa"', ',', '"wu-palmer"', ',', '"wu-palmer"', ',', "'lch'", ',', '"leacock-chordorow"', ']', ':', 'return', 'similarity_by_path', '(', 'sense1', ',', 'sense2', ',', 'option', ')', 'elif', 'option', '.', 'lower', '(', ')', 'in', '[', '"res"', ',', '"resnik"', ',', '"jcn"', ',', '"jiang-conrath"', ',', '"lin"', ']', ':', 'return', 'similarity_by_infocontent', '(', 'sense1', ',', 'sense2', ',', 'option', ')'] | Calculates similarity based on user's choice.
:param sense1: A synset.
:param sense2: A synset.
:param option: String, one of ('path', 'wup', 'lch', 'res', 'jcn', 'lin').
:return: A float, similarity measurement. | ['Calculates', 'similarity', 'based', 'on', 'user', 's', 'choice', '.'] | train | https://github.com/alvations/pywsd/blob/4c12394c8adbcfed71dd912bdbef2e36370821bf/pywsd/similarity.py#L73-L90 |
9,449 | senaite/senaite.jsonapi | src/senaite/jsonapi/fieldmanagers.py | DateTimeFieldManager.set | def set(self, instance, value, **kw):
"""Converts the value into a DateTime object before setting.
"""
if value:
try:
value = DateTime(value)
except SyntaxError:
logger.warn("Value '{}' is not a valid DateTime string"
.format(value))
return False
self._set(instance, value, **kw) | python | def set(self, instance, value, **kw):
"""Converts the value into a DateTime object before setting.
"""
if value:
try:
value = DateTime(value)
except SyntaxError:
logger.warn("Value '{}' is not a valid DateTime string"
.format(value))
return False
self._set(instance, value, **kw) | ['def', 'set', '(', 'self', ',', 'instance', ',', 'value', ',', '*', '*', 'kw', ')', ':', 'if', 'value', ':', 'try', ':', 'value', '=', 'DateTime', '(', 'value', ')', 'except', 'SyntaxError', ':', 'logger', '.', 'warn', '(', '"Value \'{}\' is not a valid DateTime string"', '.', 'format', '(', 'value', ')', ')', 'return', 'False', 'self', '.', '_set', '(', 'instance', ',', 'value', ',', '*', '*', 'kw', ')'] | Converts the value into a DateTime object before setting. | ['Converts', 'the', 'value', 'into', 'a', 'DateTime', 'object', 'before', 'setting', '.'] | train | https://github.com/senaite/senaite.jsonapi/blob/871959f4b1c9edbb477e9456325527ca78e13ec6/src/senaite/jsonapi/fieldmanagers.py#L326-L337 |
9,450 | wummel/linkchecker | scripts/update_iana_uri_schemes.py | parse_csv_file | def parse_csv_file(url, res):
"""Parse given URL and write res with {scheme -> description}"""
response = requests.get(url, stream=True)
reader = csv.reader(response.iter_lines())
first_row = True
for row in reader:
if first_row:
# skip first row
first_row = False
else:
scheme, template, description, reference = row
res[scheme] = description | python | def parse_csv_file(url, res):
"""Parse given URL and write res with {scheme -> description}"""
response = requests.get(url, stream=True)
reader = csv.reader(response.iter_lines())
first_row = True
for row in reader:
if first_row:
# skip first row
first_row = False
else:
scheme, template, description, reference = row
res[scheme] = description | ['def', 'parse_csv_file', '(', 'url', ',', 'res', ')', ':', 'response', '=', 'requests', '.', 'get', '(', 'url', ',', 'stream', '=', 'True', ')', 'reader', '=', 'csv', '.', 'reader', '(', 'response', '.', 'iter_lines', '(', ')', ')', 'first_row', '=', 'True', 'for', 'row', 'in', 'reader', ':', 'if', 'first_row', ':', '# skip first row', 'first_row', '=', 'False', 'else', ':', 'scheme', ',', 'template', ',', 'description', ',', 'reference', '=', 'row', 'res', '[', 'scheme', ']', '=', 'description'] | Parse given URL and write res with {scheme -> description} | ['Parse', 'given', 'URL', 'and', 'write', 'res', 'with', '{', 'scheme', '-', '>', 'description', '}'] | train | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/scripts/update_iana_uri_schemes.py#L91-L102 |
9,451 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | NDArray._at | def _at(self, idx):
"""Returns a view of the array sliced at `idx` in the first dim.
This is called through ``x[idx]``.
Parameters
----------
idx : int
index for slicing the `NDArray` in the first dim.
Returns
-------
NDArray
`NDArray` sharing the memory with the current one sliced at `idx` in the first dim.
Examples
--------
>>> a = mx.nd.array([[1,2], [3, 4]])
>>> a[1].asnumpy()
array([ 3., 4.], dtype=float32)
>>> b = mx.nd.array([1, 2, 3, 4])
>>> b[0].asnumpy()
array([ 1.], dtype=float32)
"""
handle = NDArrayHandle()
if idx < 0:
length = self.shape[0]
idx += length
if idx < 0:
raise IndexError('index %d is out of bounds for axis 0 with size %d'
% (idx-length, length))
check_call(_LIB.MXNDArrayAt(
self.handle, mx_uint(idx), ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable) | python | def _at(self, idx):
"""Returns a view of the array sliced at `idx` in the first dim.
This is called through ``x[idx]``.
Parameters
----------
idx : int
index for slicing the `NDArray` in the first dim.
Returns
-------
NDArray
`NDArray` sharing the memory with the current one sliced at `idx` in the first dim.
Examples
--------
>>> a = mx.nd.array([[1,2], [3, 4]])
>>> a[1].asnumpy()
array([ 3., 4.], dtype=float32)
>>> b = mx.nd.array([1, 2, 3, 4])
>>> b[0].asnumpy()
array([ 1.], dtype=float32)
"""
handle = NDArrayHandle()
if idx < 0:
length = self.shape[0]
idx += length
if idx < 0:
raise IndexError('index %d is out of bounds for axis 0 with size %d'
% (idx-length, length))
check_call(_LIB.MXNDArrayAt(
self.handle, mx_uint(idx), ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable) | ['def', '_at', '(', 'self', ',', 'idx', ')', ':', 'handle', '=', 'NDArrayHandle', '(', ')', 'if', 'idx', '<', '0', ':', 'length', '=', 'self', '.', 'shape', '[', '0', ']', 'idx', '+=', 'length', 'if', 'idx', '<', '0', ':', 'raise', 'IndexError', '(', "'index %d is out of bounds for axis 0 with size %d'", '%', '(', 'idx', '-', 'length', ',', 'length', ')', ')', 'check_call', '(', '_LIB', '.', 'MXNDArrayAt', '(', 'self', '.', 'handle', ',', 'mx_uint', '(', 'idx', ')', ',', 'ctypes', '.', 'byref', '(', 'handle', ')', ')', ')', 'return', 'NDArray', '(', 'handle', '=', 'handle', ',', 'writable', '=', 'self', '.', 'writable', ')'] | Returns a view of the array sliced at `idx` in the first dim.
This is called through ``x[idx]``.
Parameters
----------
idx : int
index for slicing the `NDArray` in the first dim.
Returns
-------
NDArray
`NDArray` sharing the memory with the current one sliced at `idx` in the first dim.
Examples
--------
>>> a = mx.nd.array([[1,2], [3, 4]])
>>> a[1].asnumpy()
array([ 3., 4.], dtype=float32)
>>> b = mx.nd.array([1, 2, 3, 4])
>>> b[0].asnumpy()
array([ 1.], dtype=float32) | ['Returns', 'a', 'view', 'of', 'the', 'array', 'sliced', 'at', 'idx', 'in', 'the', 'first', 'dim', '.', 'This', 'is', 'called', 'through', 'x', '[', 'idx', ']', '.'] | train | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L912-L944 |
9,452 | reiinakano/xcessiv | xcessiv/functions.py | import_object_from_path | def import_object_from_path(path, object):
"""Used to import an object from an absolute path.
This function takes an absolute path and imports it as a Python module.
It then returns the object with name `object` from the imported module.
Args:
path (string): Absolute file path of .py file to import
object (string): Name of object to extract from imported module
"""
with open(path) as f:
return import_object_from_string_code(f.read(), object) | python | def import_object_from_path(path, object):
"""Used to import an object from an absolute path.
This function takes an absolute path and imports it as a Python module.
It then returns the object with name `object` from the imported module.
Args:
path (string): Absolute file path of .py file to import
object (string): Name of object to extract from imported module
"""
with open(path) as f:
return import_object_from_string_code(f.read(), object) | ['def', 'import_object_from_path', '(', 'path', ',', 'object', ')', ':', 'with', 'open', '(', 'path', ')', 'as', 'f', ':', 'return', 'import_object_from_string_code', '(', 'f', '.', 'read', '(', ')', ',', 'object', ')'] | Used to import an object from an absolute path.
This function takes an absolute path and imports it as a Python module.
It then returns the object with name `object` from the imported module.
Args:
path (string): Absolute file path of .py file to import
object (string): Name of object to extract from imported module | ['Used', 'to', 'import', 'an', 'object', 'from', 'an', 'absolute', 'path', '.'] | train | https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L36-L48 |
9,453 | pbrisk/unicum | unicum/linkedobject.py | LinkedObject.register_link | def register_link(self, obj, attr=None):
"""
creates link from obj.attr to self
:param obj: object to register link to
:param attr: attribute name to register link to
"""
name = repr(self)
if not name:
return self
l = self.__class__._get_links()
if name not in l:
l[name] = set()
v = WeakAttrLink(obj, attr)
if v not in l[name]:
l[name].add(v)
return self | python | def register_link(self, obj, attr=None):
"""
creates link from obj.attr to self
:param obj: object to register link to
:param attr: attribute name to register link to
"""
name = repr(self)
if not name:
return self
l = self.__class__._get_links()
if name not in l:
l[name] = set()
v = WeakAttrLink(obj, attr)
if v not in l[name]:
l[name].add(v)
return self | ['def', 'register_link', '(', 'self', ',', 'obj', ',', 'attr', '=', 'None', ')', ':', 'name', '=', 'repr', '(', 'self', ')', 'if', 'not', 'name', ':', 'return', 'self', 'l', '=', 'self', '.', '__class__', '.', '_get_links', '(', ')', 'if', 'name', 'not', 'in', 'l', ':', 'l', '[', 'name', ']', '=', 'set', '(', ')', 'v', '=', 'WeakAttrLink', '(', 'obj', ',', 'attr', ')', 'if', 'v', 'not', 'in', 'l', '[', 'name', ']', ':', 'l', '[', 'name', ']', '.', 'add', '(', 'v', ')', 'return', 'self'] | creates link from obj.attr to self
:param obj: object to register link to
:param attr: attribute name to register link to | ['creates', 'link', 'from', 'obj', '.', 'attr', 'to', 'self', ':', 'param', 'obj', ':', 'object', 'to', 'register', 'link', 'to', ':', 'param', 'attr', ':', 'attribute', 'name', 'to', 'register', 'link', 'to'] | train | https://github.com/pbrisk/unicum/blob/24bfa7355f36847a06646c58e9fd75bd3b689bfe/unicum/linkedobject.py#L80-L95 |
9,454 | miku/gluish | gluish/format.py | iter_tsv | def iter_tsv(input_stream, cols=None, encoding='utf-8'):
"""
If a tuple is given in cols, use the elements as names to construct
a namedtuple.
Columns can be marked as ignored by using ``X`` or ``0`` as column name.
Example (ignore the first four columns of a five column TSV):
::
def run(self):
with self.input().open() as handle:
for row in handle.iter_tsv(cols=('X', 'X', 'X', 'X', 'iln')):
print(row.iln)
"""
if cols:
cols = [c if not c in ('x', 'X', 0, None) else random_string(length=5)
for c in cols]
Record = collections.namedtuple('Record', cols)
for line in input_stream:
yield Record._make(line.decode(encoding).rstrip('\n').split('\t'))
else:
for line in input_stream:
yield tuple(line.decode(encoding).rstrip('\n').split('\t')) | python | def iter_tsv(input_stream, cols=None, encoding='utf-8'):
"""
If a tuple is given in cols, use the elements as names to construct
a namedtuple.
Columns can be marked as ignored by using ``X`` or ``0`` as column name.
Example (ignore the first four columns of a five column TSV):
::
def run(self):
with self.input().open() as handle:
for row in handle.iter_tsv(cols=('X', 'X', 'X', 'X', 'iln')):
print(row.iln)
"""
if cols:
cols = [c if not c in ('x', 'X', 0, None) else random_string(length=5)
for c in cols]
Record = collections.namedtuple('Record', cols)
for line in input_stream:
yield Record._make(line.decode(encoding).rstrip('\n').split('\t'))
else:
for line in input_stream:
yield tuple(line.decode(encoding).rstrip('\n').split('\t')) | ['def', 'iter_tsv', '(', 'input_stream', ',', 'cols', '=', 'None', ',', 'encoding', '=', "'utf-8'", ')', ':', 'if', 'cols', ':', 'cols', '=', '[', 'c', 'if', 'not', 'c', 'in', '(', "'x'", ',', "'X'", ',', '0', ',', 'None', ')', 'else', 'random_string', '(', 'length', '=', '5', ')', 'for', 'c', 'in', 'cols', ']', 'Record', '=', 'collections', '.', 'namedtuple', '(', "'Record'", ',', 'cols', ')', 'for', 'line', 'in', 'input_stream', ':', 'yield', 'Record', '.', '_make', '(', 'line', '.', 'decode', '(', 'encoding', ')', '.', 'rstrip', '(', "'\\n'", ')', '.', 'split', '(', "'\\t'", ')', ')', 'else', ':', 'for', 'line', 'in', 'input_stream', ':', 'yield', 'tuple', '(', 'line', '.', 'decode', '(', 'encoding', ')', '.', 'rstrip', '(', "'\\n'", ')', '.', 'split', '(', "'\\t'", ')', ')'] | If a tuple is given in cols, use the elements as names to construct
a namedtuple.
Columns can be marked as ignored by using ``X`` or ``0`` as column name.
Example (ignore the first four columns of a five column TSV):
::
def run(self):
with self.input().open() as handle:
for row in handle.iter_tsv(cols=('X', 'X', 'X', 'X', 'iln')):
print(row.iln) | ['If', 'a', 'tuple', 'is', 'given', 'in', 'cols', 'use', 'the', 'elements', 'as', 'names', 'to', 'construct', 'a', 'namedtuple', '.'] | train | https://github.com/miku/gluish/blob/56d3ac4f41a944e31ecac0aa3b6d1dc2ce705e29/gluish/format.py#L65-L89 |
9,455 | ngmarchant/oasis | oasis/passive.py | PassiveSampler.reset | def reset(self):
"""Resets the sampler to its initial state
Note
----
This will destroy the label cache and history of estimates.
"""
self._TP = np.zeros(self._n_class)
self._FP = np.zeros(self._n_class)
self._FN = np.zeros(self._n_class)
self._TN = np.zeros(self._n_class)
self.t_ = 0
self._queried_oracle = np.repeat(False, self._max_iter)
self.cached_labels_ = np.repeat(np.nan, self._n_items)
self._estimate = np.tile(np.nan, [self._max_iter, self._n_class]) | python | def reset(self):
"""Resets the sampler to its initial state
Note
----
This will destroy the label cache and history of estimates.
"""
self._TP = np.zeros(self._n_class)
self._FP = np.zeros(self._n_class)
self._FN = np.zeros(self._n_class)
self._TN = np.zeros(self._n_class)
self.t_ = 0
self._queried_oracle = np.repeat(False, self._max_iter)
self.cached_labels_ = np.repeat(np.nan, self._n_items)
self._estimate = np.tile(np.nan, [self._max_iter, self._n_class]) | ['def', 'reset', '(', 'self', ')', ':', 'self', '.', '_TP', '=', 'np', '.', 'zeros', '(', 'self', '.', '_n_class', ')', 'self', '.', '_FP', '=', 'np', '.', 'zeros', '(', 'self', '.', '_n_class', ')', 'self', '.', '_FN', '=', 'np', '.', 'zeros', '(', 'self', '.', '_n_class', ')', 'self', '.', '_TN', '=', 'np', '.', 'zeros', '(', 'self', '.', '_n_class', ')', 'self', '.', 't_', '=', '0', 'self', '.', '_queried_oracle', '=', 'np', '.', 'repeat', '(', 'False', ',', 'self', '.', '_max_iter', ')', 'self', '.', 'cached_labels_', '=', 'np', '.', 'repeat', '(', 'np', '.', 'nan', ',', 'self', '.', '_n_items', ')', 'self', '.', '_estimate', '=', 'np', '.', 'tile', '(', 'np', '.', 'nan', ',', '[', 'self', '.', '_max_iter', ',', 'self', '.', '_n_class', ']', ')'] | Resets the sampler to its initial state
Note
----
This will destroy the label cache and history of estimates. | ['Resets', 'the', 'sampler', 'to', 'its', 'initial', 'state'] | train | https://github.com/ngmarchant/oasis/blob/28a037a8924b85ae97db8a93960a910a219d6a4a/oasis/passive.py#L141-L155 |
9,456 | petl-developers/petl | petl/transform/setops.py | recorddiff | def recorddiff(a, b, buffersize=None, tempdir=None, cache=True, strict=False):
"""
Find the difference between records in two tables. E.g.::
>>> import petl as etl
>>> a = [['foo', 'bar', 'baz'],
... ['A', 1, True],
... ['C', 7, False],
... ['B', 2, False],
... ['C', 9, True]]
>>> b = [['bar', 'foo', 'baz'],
... [2, 'B', False],
... [9, 'A', False],
... [3, 'B', True],
... [9, 'C', True]]
>>> added, subtracted = etl.recorddiff(a, b)
>>> added
+-----+-----+-------+
| bar | foo | baz |
+=====+=====+=======+
| 3 | 'B' | True |
+-----+-----+-------+
| 9 | 'A' | False |
+-----+-----+-------+
>>> subtracted
+-----+-----+-------+
| foo | bar | baz |
+=====+=====+=======+
| 'A' | 1 | True |
+-----+-----+-------+
| 'C' | 7 | False |
+-----+-----+-------+
Convenient shorthand for
``(recordcomplement(b, a), recordcomplement(a, b))``. See also
:func:`petl.transform.setops.recordcomplement`.
See also the discussion of the `buffersize`, `tempdir` and `cache`
arguments under the :func:`petl.transform.sorts.sort` function.
.. versionchanged:: 1.1.0
If `strict` is `True` then strict set-like behaviour is used.
"""
added = recordcomplement(b, a, buffersize=buffersize, tempdir=tempdir,
cache=cache, strict=strict)
subtracted = recordcomplement(a, b, buffersize=buffersize, tempdir=tempdir,
cache=cache, strict=strict)
return added, subtracted | python | def recorddiff(a, b, buffersize=None, tempdir=None, cache=True, strict=False):
"""
Find the difference between records in two tables. E.g.::
>>> import petl as etl
>>> a = [['foo', 'bar', 'baz'],
... ['A', 1, True],
... ['C', 7, False],
... ['B', 2, False],
... ['C', 9, True]]
>>> b = [['bar', 'foo', 'baz'],
... [2, 'B', False],
... [9, 'A', False],
... [3, 'B', True],
... [9, 'C', True]]
>>> added, subtracted = etl.recorddiff(a, b)
>>> added
+-----+-----+-------+
| bar | foo | baz |
+=====+=====+=======+
| 3 | 'B' | True |
+-----+-----+-------+
| 9 | 'A' | False |
+-----+-----+-------+
>>> subtracted
+-----+-----+-------+
| foo | bar | baz |
+=====+=====+=======+
| 'A' | 1 | True |
+-----+-----+-------+
| 'C' | 7 | False |
+-----+-----+-------+
Convenient shorthand for
``(recordcomplement(b, a), recordcomplement(a, b))``. See also
:func:`petl.transform.setops.recordcomplement`.
See also the discussion of the `buffersize`, `tempdir` and `cache`
arguments under the :func:`petl.transform.sorts.sort` function.
.. versionchanged:: 1.1.0
If `strict` is `True` then strict set-like behaviour is used.
"""
added = recordcomplement(b, a, buffersize=buffersize, tempdir=tempdir,
cache=cache, strict=strict)
subtracted = recordcomplement(a, b, buffersize=buffersize, tempdir=tempdir,
cache=cache, strict=strict)
return added, subtracted | ['def', 'recorddiff', '(', 'a', ',', 'b', ',', 'buffersize', '=', 'None', ',', 'tempdir', '=', 'None', ',', 'cache', '=', 'True', ',', 'strict', '=', 'False', ')', ':', 'added', '=', 'recordcomplement', '(', 'b', ',', 'a', ',', 'buffersize', '=', 'buffersize', ',', 'tempdir', '=', 'tempdir', ',', 'cache', '=', 'cache', ',', 'strict', '=', 'strict', ')', 'subtracted', '=', 'recordcomplement', '(', 'a', ',', 'b', ',', 'buffersize', '=', 'buffersize', ',', 'tempdir', '=', 'tempdir', ',', 'cache', '=', 'cache', ',', 'strict', '=', 'strict', ')', 'return', 'added', ',', 'subtracted'] | Find the difference between records in two tables. E.g.::
>>> import petl as etl
>>> a = [['foo', 'bar', 'baz'],
... ['A', 1, True],
... ['C', 7, False],
... ['B', 2, False],
... ['C', 9, True]]
>>> b = [['bar', 'foo', 'baz'],
... [2, 'B', False],
... [9, 'A', False],
... [3, 'B', True],
... [9, 'C', True]]
>>> added, subtracted = etl.recorddiff(a, b)
>>> added
+-----+-----+-------+
| bar | foo | baz |
+=====+=====+=======+
| 3 | 'B' | True |
+-----+-----+-------+
| 9 | 'A' | False |
+-----+-----+-------+
>>> subtracted
+-----+-----+-------+
| foo | bar | baz |
+=====+=====+=======+
| 'A' | 1 | True |
+-----+-----+-------+
| 'C' | 7 | False |
+-----+-----+-------+
Convenient shorthand for
``(recordcomplement(b, a), recordcomplement(a, b))``. See also
:func:`petl.transform.setops.recordcomplement`.
See also the discussion of the `buffersize`, `tempdir` and `cache`
arguments under the :func:`petl.transform.sorts.sort` function.
.. versionchanged:: 1.1.0
If `strict` is `True` then strict set-like behaviour is used. | ['Find', 'the', 'difference', 'between', 'records', 'in', 'two', 'tables', '.', 'E', '.', 'g', '.', '::'] | train | https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/transform/setops.py#L293-L344 |
9,457 | ZELLMECHANIK-DRESDEN/dclab | dclab/kde_methods.py | kde_none | def kde_none(events_x, events_y, xout=None, yout=None):
""" No Kernel Density Estimation
Parameters
----------
events_x, events_y: 1D ndarray
The input points for kernel density estimation. Input
is flattened automatically.
xout, yout: ndarray
The coordinates at which the KDE should be computed.
If set to none, input coordinates are used.
Returns
-------
density: ndarray, same shape as `xout`
The KDE for the points in (xout, yout)
Notes
-----
This method is a convenience method that always returns ones in the shape
that the other methods in this module produce.
"""
valid_combi = ((xout is None and yout is None) or
(xout is not None and yout is not None)
)
if not valid_combi:
raise ValueError("Both `xout` and `yout` must be (un)set.")
if yout is None and yout is None:
xout = events_x
yout = events_y
return np.ones(xout.shape) | python | def kde_none(events_x, events_y, xout=None, yout=None):
""" No Kernel Density Estimation
Parameters
----------
events_x, events_y: 1D ndarray
The input points for kernel density estimation. Input
is flattened automatically.
xout, yout: ndarray
The coordinates at which the KDE should be computed.
If set to none, input coordinates are used.
Returns
-------
density: ndarray, same shape as `xout`
The KDE for the points in (xout, yout)
Notes
-----
This method is a convenience method that always returns ones in the shape
that the other methods in this module produce.
"""
valid_combi = ((xout is None and yout is None) or
(xout is not None and yout is not None)
)
if not valid_combi:
raise ValueError("Both `xout` and `yout` must be (un)set.")
if yout is None and yout is None:
xout = events_x
yout = events_y
return np.ones(xout.shape) | ['def', 'kde_none', '(', 'events_x', ',', 'events_y', ',', 'xout', '=', 'None', ',', 'yout', '=', 'None', ')', ':', 'valid_combi', '=', '(', '(', 'xout', 'is', 'None', 'and', 'yout', 'is', 'None', ')', 'or', '(', 'xout', 'is', 'not', 'None', 'and', 'yout', 'is', 'not', 'None', ')', ')', 'if', 'not', 'valid_combi', ':', 'raise', 'ValueError', '(', '"Both `xout` and `yout` must be (un)set."', ')', 'if', 'yout', 'is', 'None', 'and', 'yout', 'is', 'None', ':', 'xout', '=', 'events_x', 'yout', '=', 'events_y', 'return', 'np', '.', 'ones', '(', 'xout', '.', 'shape', ')'] | No Kernel Density Estimation
Parameters
----------
events_x, events_y: 1D ndarray
The input points for kernel density estimation. Input
is flattened automatically.
xout, yout: ndarray
The coordinates at which the KDE should be computed.
If set to none, input coordinates are used.
Returns
-------
density: ndarray, same shape as `xout`
The KDE for the points in (xout, yout)
Notes
-----
This method is a convenience method that always returns ones in the shape
that the other methods in this module produce. | ['No', 'Kernel', 'Density', 'Estimation'] | train | https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/kde_methods.py#L177-L209 |
9,458 | saltstack/salt | salt/fileserver/s3fs.py | _get_cached_file_name | def _get_cached_file_name(bucket_name, saltenv, path):
'''
Return the cached file name for a bucket path file
'''
file_path = os.path.join(_get_cache_dir(), saltenv, bucket_name, path)
# make sure bucket and saltenv directories exist
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
return file_path | python | def _get_cached_file_name(bucket_name, saltenv, path):
'''
Return the cached file name for a bucket path file
'''
file_path = os.path.join(_get_cache_dir(), saltenv, bucket_name, path)
# make sure bucket and saltenv directories exist
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
return file_path | ['def', '_get_cached_file_name', '(', 'bucket_name', ',', 'saltenv', ',', 'path', ')', ':', 'file_path', '=', 'os', '.', 'path', '.', 'join', '(', '_get_cache_dir', '(', ')', ',', 'saltenv', ',', 'bucket_name', ',', 'path', ')', '# make sure bucket and saltenv directories exist', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'os', '.', 'path', '.', 'dirname', '(', 'file_path', ')', ')', ':', 'os', '.', 'makedirs', '(', 'os', '.', 'path', '.', 'dirname', '(', 'file_path', ')', ')', 'return', 'file_path'] | Return the cached file name for a bucket path file | ['Return', 'the', 'cached', 'file', 'name', 'for', 'a', 'bucket', 'path', 'file'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileserver/s3fs.py#L380-L391 |
9,459 | tanghaibao/goatools | goatools/godag_plot.py | GODagSmallPlot._get_item_str | def _get_item_str(self, res):
"""Return genes in any of these formats:
1. 19264, 17319, 12520, 12043, 74131, 22163, 12575
2. Ptprc, Mif, Cd81, Bcl2, Sash3, Tnfrsf4, Cdkn1a
3. 7: Ptprc, Mif, Cd81, Bcl2, Sash3...
"""
npl = self.pltvars.items_p_line # Number of items Per Line
prt_items = sorted([self.__get_genestr(itemid) for itemid in res.study_items])
prt_multiline = [prt_items[i:i+npl] for i in range(0, len(prt_items), npl)]
num_items = len(prt_items)
if self.study_items_max is None:
genestr = "\n".join([", ".join(str(e) for e in sublist) for sublist in prt_multiline])
return "{N}) {GENES}".format(N=num_items, GENES=genestr)
else:
if num_items <= self.study_items_max:
strs = [", ".join(str(e) for e in sublist) for sublist in prt_multiline]
genestr = "\n".join([", ".join(str(e) for e in sublist) for sublist in prt_multiline])
return genestr
else:
short_list = prt_items[:self.study_items_max]
short_mult = [short_list[i:i+npl] for i in range(0, len(short_list), npl)]
short_str = "\n".join([", ".join(str(e) for e in sublist) for sublist in short_mult])
return "".join(["{N} genes; ".format(N=num_items), short_str, "..."]) | python | def _get_item_str(self, res):
"""Return genes in any of these formats:
1. 19264, 17319, 12520, 12043, 74131, 22163, 12575
2. Ptprc, Mif, Cd81, Bcl2, Sash3, Tnfrsf4, Cdkn1a
3. 7: Ptprc, Mif, Cd81, Bcl2, Sash3...
"""
npl = self.pltvars.items_p_line # Number of items Per Line
prt_items = sorted([self.__get_genestr(itemid) for itemid in res.study_items])
prt_multiline = [prt_items[i:i+npl] for i in range(0, len(prt_items), npl)]
num_items = len(prt_items)
if self.study_items_max is None:
genestr = "\n".join([", ".join(str(e) for e in sublist) for sublist in prt_multiline])
return "{N}) {GENES}".format(N=num_items, GENES=genestr)
else:
if num_items <= self.study_items_max:
strs = [", ".join(str(e) for e in sublist) for sublist in prt_multiline]
genestr = "\n".join([", ".join(str(e) for e in sublist) for sublist in prt_multiline])
return genestr
else:
short_list = prt_items[:self.study_items_max]
short_mult = [short_list[i:i+npl] for i in range(0, len(short_list), npl)]
short_str = "\n".join([", ".join(str(e) for e in sublist) for sublist in short_mult])
return "".join(["{N} genes; ".format(N=num_items), short_str, "..."]) | ['def', '_get_item_str', '(', 'self', ',', 'res', ')', ':', 'npl', '=', 'self', '.', 'pltvars', '.', 'items_p_line', '# Number of items Per Line', 'prt_items', '=', 'sorted', '(', '[', 'self', '.', '__get_genestr', '(', 'itemid', ')', 'for', 'itemid', 'in', 'res', '.', 'study_items', ']', ')', 'prt_multiline', '=', '[', 'prt_items', '[', 'i', ':', 'i', '+', 'npl', ']', 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'prt_items', ')', ',', 'npl', ')', ']', 'num_items', '=', 'len', '(', 'prt_items', ')', 'if', 'self', '.', 'study_items_max', 'is', 'None', ':', 'genestr', '=', '"\\n"', '.', 'join', '(', '[', '", "', '.', 'join', '(', 'str', '(', 'e', ')', 'for', 'e', 'in', 'sublist', ')', 'for', 'sublist', 'in', 'prt_multiline', ']', ')', 'return', '"{N}) {GENES}"', '.', 'format', '(', 'N', '=', 'num_items', ',', 'GENES', '=', 'genestr', ')', 'else', ':', 'if', 'num_items', '<=', 'self', '.', 'study_items_max', ':', 'strs', '=', '[', '", "', '.', 'join', '(', 'str', '(', 'e', ')', 'for', 'e', 'in', 'sublist', ')', 'for', 'sublist', 'in', 'prt_multiline', ']', 'genestr', '=', '"\\n"', '.', 'join', '(', '[', '", "', '.', 'join', '(', 'str', '(', 'e', ')', 'for', 'e', 'in', 'sublist', ')', 'for', 'sublist', 'in', 'prt_multiline', ']', ')', 'return', 'genestr', 'else', ':', 'short_list', '=', 'prt_items', '[', ':', 'self', '.', 'study_items_max', ']', 'short_mult', '=', '[', 'short_list', '[', 'i', ':', 'i', '+', 'npl', ']', 'for', 'i', 'in', 'range', '(', '0', ',', 'len', '(', 'short_list', ')', ',', 'npl', ')', ']', 'short_str', '=', '"\\n"', '.', 'join', '(', '[', '", "', '.', 'join', '(', 'str', '(', 'e', ')', 'for', 'e', 'in', 'sublist', ')', 'for', 'sublist', 'in', 'short_mult', ']', ')', 'return', '""', '.', 'join', '(', '[', '"{N} genes; "', '.', 'format', '(', 'N', '=', 'num_items', ')', ',', 'short_str', ',', '"..."', ']', ')'] | Return genes in any of these formats:
1. 19264, 17319, 12520, 12043, 74131, 22163, 12575
2. Ptprc, Mif, Cd81, Bcl2, Sash3, Tnfrsf4, Cdkn1a
3. 7: Ptprc, Mif, Cd81, Bcl2, Sash3... | ['Return', 'genes', 'in', 'any', 'of', 'these', 'formats', ':', '1', '.', '19264', '17319', '12520', '12043', '74131', '22163', '12575', '2', '.', 'Ptprc', 'Mif', 'Cd81', 'Bcl2', 'Sash3', 'Tnfrsf4', 'Cdkn1a', '3', '.', '7', ':', 'Ptprc', 'Mif', 'Cd81', 'Bcl2', 'Sash3', '...'] | train | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/godag_plot.py#L255-L277 |
9,460 | atztogo/phonopy | phonopy/file_IO.py | write_force_constants_to_hdf5 | def write_force_constants_to_hdf5(force_constants,
filename='force_constants.hdf5',
p2s_map=None,
physical_unit=None,
compression=None):
"""Write force constants in hdf5 format.
Parameters
----------
force_constants: ndarray
Force constants
shape=(n_satom,n_satom,3,3) or (n_patom,n_satom,3,3)
dtype=double
filename: str
Filename to be saved
p2s_map: ndarray
Primitive atom indices in supercell index system
shape=(n_patom,)
dtype=intc
physical_unit : str, optional
Physical unit used for force contants. Default is None.
compression : str or int, optional
h5py's lossless compression filters (e.g., "gzip", "lzf").
See the detail at docstring of h5py.Group.create_dataset. Default is
None.
"""
try:
import h5py
except ImportError:
raise ModuleNotFoundError("You need to install python-h5py.")
with h5py.File(filename, 'w') as w:
w.create_dataset('force_constants', data=force_constants,
compression=compression)
if p2s_map is not None:
w.create_dataset('p2s_map', data=p2s_map)
if physical_unit is not None:
dset = w.create_dataset('physical_unit', (1,),
dtype='S%d' % len(physical_unit))
dset[0] = np.string_(physical_unit) | python | def write_force_constants_to_hdf5(force_constants,
filename='force_constants.hdf5',
p2s_map=None,
physical_unit=None,
compression=None):
"""Write force constants in hdf5 format.
Parameters
----------
force_constants: ndarray
Force constants
shape=(n_satom,n_satom,3,3) or (n_patom,n_satom,3,3)
dtype=double
filename: str
Filename to be saved
p2s_map: ndarray
Primitive atom indices in supercell index system
shape=(n_patom,)
dtype=intc
physical_unit : str, optional
Physical unit used for force contants. Default is None.
compression : str or int, optional
h5py's lossless compression filters (e.g., "gzip", "lzf").
See the detail at docstring of h5py.Group.create_dataset. Default is
None.
"""
try:
import h5py
except ImportError:
raise ModuleNotFoundError("You need to install python-h5py.")
with h5py.File(filename, 'w') as w:
w.create_dataset('force_constants', data=force_constants,
compression=compression)
if p2s_map is not None:
w.create_dataset('p2s_map', data=p2s_map)
if physical_unit is not None:
dset = w.create_dataset('physical_unit', (1,),
dtype='S%d' % len(physical_unit))
dset[0] = np.string_(physical_unit) | ['def', 'write_force_constants_to_hdf5', '(', 'force_constants', ',', 'filename', '=', "'force_constants.hdf5'", ',', 'p2s_map', '=', 'None', ',', 'physical_unit', '=', 'None', ',', 'compression', '=', 'None', ')', ':', 'try', ':', 'import', 'h5py', 'except', 'ImportError', ':', 'raise', 'ModuleNotFoundError', '(', '"You need to install python-h5py."', ')', 'with', 'h5py', '.', 'File', '(', 'filename', ',', "'w'", ')', 'as', 'w', ':', 'w', '.', 'create_dataset', '(', "'force_constants'", ',', 'data', '=', 'force_constants', ',', 'compression', '=', 'compression', ')', 'if', 'p2s_map', 'is', 'not', 'None', ':', 'w', '.', 'create_dataset', '(', "'p2s_map'", ',', 'data', '=', 'p2s_map', ')', 'if', 'physical_unit', 'is', 'not', 'None', ':', 'dset', '=', 'w', '.', 'create_dataset', '(', "'physical_unit'", ',', '(', '1', ',', ')', ',', 'dtype', '=', "'S%d'", '%', 'len', '(', 'physical_unit', ')', ')', 'dset', '[', '0', ']', '=', 'np', '.', 'string_', '(', 'physical_unit', ')'] | Write force constants in hdf5 format.
Parameters
----------
force_constants: ndarray
Force constants
shape=(n_satom,n_satom,3,3) or (n_patom,n_satom,3,3)
dtype=double
filename: str
Filename to be saved
p2s_map: ndarray
Primitive atom indices in supercell index system
shape=(n_patom,)
dtype=intc
physical_unit : str, optional
Physical unit used for force contants. Default is None.
compression : str or int, optional
h5py's lossless compression filters (e.g., "gzip", "lzf").
See the detail at docstring of h5py.Group.create_dataset. Default is
None. | ['Write', 'force', 'constants', 'in', 'hdf5', 'format', '.'] | train | https://github.com/atztogo/phonopy/blob/869cc2ba9e7d495d5f4cf6942415ab3fc9e2a10f/phonopy/file_IO.py#L285-L326 |
9,461 | DLR-RM/RAFCON | source/rafcon/core/execution/execution_engine.py | ExecutionEngine.step_into | def step_into(self):
"""Take a forward step (into) for all active states in the state machine
"""
logger.debug("Execution step into ...")
self.run_to_states = []
if self.finished_or_stopped():
self.set_execution_mode(StateMachineExecutionStatus.FORWARD_INTO)
self._run_active_state_machine()
else:
self.set_execution_mode(StateMachineExecutionStatus.FORWARD_INTO) | python | def step_into(self):
"""Take a forward step (into) for all active states in the state machine
"""
logger.debug("Execution step into ...")
self.run_to_states = []
if self.finished_or_stopped():
self.set_execution_mode(StateMachineExecutionStatus.FORWARD_INTO)
self._run_active_state_machine()
else:
self.set_execution_mode(StateMachineExecutionStatus.FORWARD_INTO) | ['def', 'step_into', '(', 'self', ')', ':', 'logger', '.', 'debug', '(', '"Execution step into ..."', ')', 'self', '.', 'run_to_states', '=', '[', ']', 'if', 'self', '.', 'finished_or_stopped', '(', ')', ':', 'self', '.', 'set_execution_mode', '(', 'StateMachineExecutionStatus', '.', 'FORWARD_INTO', ')', 'self', '.', '_run_active_state_machine', '(', ')', 'else', ':', 'self', '.', 'set_execution_mode', '(', 'StateMachineExecutionStatus', '.', 'FORWARD_INTO', ')'] | Take a forward step (into) for all active states in the state machine | ['Take', 'a', 'forward', 'step', '(', 'into', ')', 'for', 'all', 'active', 'states', 'in', 'the', 'state', 'machine'] | train | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/execution/execution_engine.py#L247-L256 |
9,462 | coded-by-hand/mass | mass/parse.py | get_output | def get_output(src):
"""
parse lines looking for commands
"""
output = ''
lines = open(src.path, 'rU').readlines()
for line in lines:
m = re.match(config.import_regex,line)
if m:
include_path = os.path.abspath(src.dir + '/' + m.group('script'));
if include_path not in config.sources:
script = Script(include_path)
script.parents.append(src)
config.sources[script.path] = script
include_file = config.sources[include_path]
#require statements dont include if the file has already been included
if include_file not in config.stack or m.group('command') == 'import':
config.stack.append(include_file)
output += get_output(include_file)
else:
output += line
return output | python | def get_output(src):
"""
parse lines looking for commands
"""
output = ''
lines = open(src.path, 'rU').readlines()
for line in lines:
m = re.match(config.import_regex,line)
if m:
include_path = os.path.abspath(src.dir + '/' + m.group('script'));
if include_path not in config.sources:
script = Script(include_path)
script.parents.append(src)
config.sources[script.path] = script
include_file = config.sources[include_path]
#require statements dont include if the file has already been included
if include_file not in config.stack or m.group('command') == 'import':
config.stack.append(include_file)
output += get_output(include_file)
else:
output += line
return output | ['def', 'get_output', '(', 'src', ')', ':', 'output', '=', "''", 'lines', '=', 'open', '(', 'src', '.', 'path', ',', "'rU'", ')', '.', 'readlines', '(', ')', 'for', 'line', 'in', 'lines', ':', 'm', '=', 're', '.', 'match', '(', 'config', '.', 'import_regex', ',', 'line', ')', 'if', 'm', ':', 'include_path', '=', 'os', '.', 'path', '.', 'abspath', '(', 'src', '.', 'dir', '+', "'/'", '+', 'm', '.', 'group', '(', "'script'", ')', ')', 'if', 'include_path', 'not', 'in', 'config', '.', 'sources', ':', 'script', '=', 'Script', '(', 'include_path', ')', 'script', '.', 'parents', '.', 'append', '(', 'src', ')', 'config', '.', 'sources', '[', 'script', '.', 'path', ']', '=', 'script', 'include_file', '=', 'config', '.', 'sources', '[', 'include_path', ']', '#require statements dont include if the file has already been included', 'if', 'include_file', 'not', 'in', 'config', '.', 'stack', 'or', 'm', '.', 'group', '(', "'command'", ')', '==', "'import'", ':', 'config', '.', 'stack', '.', 'append', '(', 'include_file', ')', 'output', '+=', 'get_output', '(', 'include_file', ')', 'else', ':', 'output', '+=', 'line', 'return', 'output'] | parse lines looking for commands | ['parse', 'lines', 'looking', 'for', 'commands'] | train | https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/mass/parse.py#L28-L49 |
9,463 | robertmartin8/PyPortfolioOpt | pypfopt/objective_functions.py | negative_sharpe | def negative_sharpe(
weights, expected_returns, cov_matrix, gamma=0, risk_free_rate=0.02
):
"""
Calculate the negative Sharpe ratio of a portfolio
:param weights: asset weights of the portfolio
:type weights: np.ndarray
:param expected_returns: expected return of each asset
:type expected_returns: pd.Series
:param cov_matrix: the covariance matrix of asset returns
:type cov_matrix: pd.DataFrame
:param gamma: L2 regularisation parameter, defaults to 0. Increase if you want more
non-negligible weights
:type gamma: float, optional
:param risk_free_rate: risk-free rate of borrowing/lending, defaults to 0.02
:type risk_free_rate: float, optional
:return: negative Sharpe ratio
:rtype: float
"""
mu = weights.dot(expected_returns)
sigma = np.sqrt(np.dot(weights, np.dot(cov_matrix, weights.T)))
L2_reg = gamma * (weights ** 2).sum()
return -(mu - risk_free_rate) / sigma + L2_reg | python | def negative_sharpe(
weights, expected_returns, cov_matrix, gamma=0, risk_free_rate=0.02
):
"""
Calculate the negative Sharpe ratio of a portfolio
:param weights: asset weights of the portfolio
:type weights: np.ndarray
:param expected_returns: expected return of each asset
:type expected_returns: pd.Series
:param cov_matrix: the covariance matrix of asset returns
:type cov_matrix: pd.DataFrame
:param gamma: L2 regularisation parameter, defaults to 0. Increase if you want more
non-negligible weights
:type gamma: float, optional
:param risk_free_rate: risk-free rate of borrowing/lending, defaults to 0.02
:type risk_free_rate: float, optional
:return: negative Sharpe ratio
:rtype: float
"""
mu = weights.dot(expected_returns)
sigma = np.sqrt(np.dot(weights, np.dot(cov_matrix, weights.T)))
L2_reg = gamma * (weights ** 2).sum()
return -(mu - risk_free_rate) / sigma + L2_reg | ['def', 'negative_sharpe', '(', 'weights', ',', 'expected_returns', ',', 'cov_matrix', ',', 'gamma', '=', '0', ',', 'risk_free_rate', '=', '0.02', ')', ':', 'mu', '=', 'weights', '.', 'dot', '(', 'expected_returns', ')', 'sigma', '=', 'np', '.', 'sqrt', '(', 'np', '.', 'dot', '(', 'weights', ',', 'np', '.', 'dot', '(', 'cov_matrix', ',', 'weights', '.', 'T', ')', ')', ')', 'L2_reg', '=', 'gamma', '*', '(', 'weights', '**', '2', ')', '.', 'sum', '(', ')', 'return', '-', '(', 'mu', '-', 'risk_free_rate', ')', '/', 'sigma', '+', 'L2_reg'] | Calculate the negative Sharpe ratio of a portfolio
:param weights: asset weights of the portfolio
:type weights: np.ndarray
:param expected_returns: expected return of each asset
:type expected_returns: pd.Series
:param cov_matrix: the covariance matrix of asset returns
:type cov_matrix: pd.DataFrame
:param gamma: L2 regularisation parameter, defaults to 0. Increase if you want more
non-negligible weights
:type gamma: float, optional
:param risk_free_rate: risk-free rate of borrowing/lending, defaults to 0.02
:type risk_free_rate: float, optional
:return: negative Sharpe ratio
:rtype: float | ['Calculate', 'the', 'negative', 'Sharpe', 'ratio', 'of', 'a', 'portfolio'] | train | https://github.com/robertmartin8/PyPortfolioOpt/blob/dfad1256cb6995c7fbd7a025eedb54b1ca04b2fc/pypfopt/objective_functions.py#L38-L61 |
9,464 | census-instrumentation/opencensus-python | opencensus/trace/span.py | Span.add_time_event | def add_time_event(self, time_event):
"""Add a TimeEvent.
:type time_event: :class: `~opencensus.trace.time_event.TimeEvent`
:param time_event: A TimeEvent object.
"""
if isinstance(time_event, time_event_module.TimeEvent):
self.time_events.append(time_event)
else:
raise TypeError("Type Error: received {}, but requires TimeEvent.".
format(type(time_event).__name__)) | python | def add_time_event(self, time_event):
"""Add a TimeEvent.
:type time_event: :class: `~opencensus.trace.time_event.TimeEvent`
:param time_event: A TimeEvent object.
"""
if isinstance(time_event, time_event_module.TimeEvent):
self.time_events.append(time_event)
else:
raise TypeError("Type Error: received {}, but requires TimeEvent.".
format(type(time_event).__name__)) | ['def', 'add_time_event', '(', 'self', ',', 'time_event', ')', ':', 'if', 'isinstance', '(', 'time_event', ',', 'time_event_module', '.', 'TimeEvent', ')', ':', 'self', '.', 'time_events', '.', 'append', '(', 'time_event', ')', 'else', ':', 'raise', 'TypeError', '(', '"Type Error: received {}, but requires TimeEvent."', '.', 'format', '(', 'type', '(', 'time_event', ')', '.', '__name__', ')', ')'] | Add a TimeEvent.
:type time_event: :class: `~opencensus.trace.time_event.TimeEvent`
:param time_event: A TimeEvent object. | ['Add', 'a', 'TimeEvent', '.'] | train | https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/span.py#L202-L212 |
9,465 | richardcornish/django-applepodcast | podcast/templatetags/podcast_tags.py | show_url | def show_url(context, **kwargs):
"""Return the show feed URL with different protocol."""
if len(kwargs) != 2:
raise TemplateSyntaxError(_('"show_url" tag takes exactly two keyword arguments.'))
request = context['request']
current_site = get_current_site(request)
url = add_domain(current_site.domain, kwargs['url'])
return re.sub(r'https?:\/\/', '%s://' % kwargs['protocol'], url) | python | def show_url(context, **kwargs):
"""Return the show feed URL with different protocol."""
if len(kwargs) != 2:
raise TemplateSyntaxError(_('"show_url" tag takes exactly two keyword arguments.'))
request = context['request']
current_site = get_current_site(request)
url = add_domain(current_site.domain, kwargs['url'])
return re.sub(r'https?:\/\/', '%s://' % kwargs['protocol'], url) | ['def', 'show_url', '(', 'context', ',', '*', '*', 'kwargs', ')', ':', 'if', 'len', '(', 'kwargs', ')', '!=', '2', ':', 'raise', 'TemplateSyntaxError', '(', '_', '(', '\'"show_url" tag takes exactly two keyword arguments.\'', ')', ')', 'request', '=', 'context', '[', "'request'", ']', 'current_site', '=', 'get_current_site', '(', 'request', ')', 'url', '=', 'add_domain', '(', 'current_site', '.', 'domain', ',', 'kwargs', '[', "'url'", ']', ')', 'return', 're', '.', 'sub', '(', "r'https?:\\/\\/'", ',', "'%s://'", '%', 'kwargs', '[', "'protocol'", ']', ',', 'url', ')'] | Return the show feed URL with different protocol. | ['Return', 'the', 'show', 'feed', 'URL', 'with', 'different', 'protocol', '.'] | train | https://github.com/richardcornish/django-applepodcast/blob/50732acfbe1ca258e5afb44c117a6ac5fa0c1219/podcast/templatetags/podcast_tags.py#L17-L24 |
9,466 | mcieslik-mctp/papy | src/papy/core.py | Plumber.stop | def stop(self):
"""
Stops a paused pipeline. This will a trigger a ``StopIteration`` in the
inputs of the pipeline. And retrieve the buffered results. This will
stop all ``Pipers`` and ``NuMaps``. Python will not terminate cleanly
if a pipeline is running or paused.
"""
if self._started.isSet() and \
not self._running.isSet() and \
not self._pausing.isSet():
# stops the dagger
super(Plumber, self).stop()
# disconnects all pipers
self.disconnect()
self.stats['run_time'] = time() - self.stats['start_time']
self._started.clear()
else:
raise PlumberError | python | def stop(self):
"""
Stops a paused pipeline. This will a trigger a ``StopIteration`` in the
inputs of the pipeline. And retrieve the buffered results. This will
stop all ``Pipers`` and ``NuMaps``. Python will not terminate cleanly
if a pipeline is running or paused.
"""
if self._started.isSet() and \
not self._running.isSet() and \
not self._pausing.isSet():
# stops the dagger
super(Plumber, self).stop()
# disconnects all pipers
self.disconnect()
self.stats['run_time'] = time() - self.stats['start_time']
self._started.clear()
else:
raise PlumberError | ['def', 'stop', '(', 'self', ')', ':', 'if', 'self', '.', '_started', '.', 'isSet', '(', ')', 'and', 'not', 'self', '.', '_running', '.', 'isSet', '(', ')', 'and', 'not', 'self', '.', '_pausing', '.', 'isSet', '(', ')', ':', '# stops the dagger', 'super', '(', 'Plumber', ',', 'self', ')', '.', 'stop', '(', ')', '# disconnects all pipers', 'self', '.', 'disconnect', '(', ')', 'self', '.', 'stats', '[', "'run_time'", ']', '=', 'time', '(', ')', '-', 'self', '.', 'stats', '[', "'start_time'", ']', 'self', '.', '_started', '.', 'clear', '(', ')', 'else', ':', 'raise', 'PlumberError'] | Stops a paused pipeline. This will a trigger a ``StopIteration`` in the
inputs of the pipeline. And retrieve the buffered results. This will
stop all ``Pipers`` and ``NuMaps``. Python will not terminate cleanly
if a pipeline is running or paused. | ['Stops', 'a', 'paused', 'pipeline', '.', 'This', 'will', 'a', 'trigger', 'a', 'StopIteration', 'in', 'the', 'inputs', 'of', 'the', 'pipeline', '.', 'And', 'retrieve', 'the', 'buffered', 'results', '.', 'This', 'will', 'stop', 'all', 'Pipers', 'and', 'NuMaps', '.', 'Python', 'will', 'not', 'terminate', 'cleanly', 'if', 'a', 'pipeline', 'is', 'running', 'or', 'paused', '.'] | train | https://github.com/mcieslik-mctp/papy/blob/708e50827b5db46bbea081982cb74b9b0e464064/src/papy/core.py#L757-L775 |
9,467 | LogicalDash/LiSE | ELiDE/ELiDE/app.py | ELiDEApp.build_config | def build_config(self, config):
"""Set config defaults"""
for sec in 'LiSE', 'ELiDE':
config.adddefaultsection(sec)
config.setdefaults(
'LiSE',
{
'world': 'sqlite:///LiSEworld.db',
'language': 'eng',
'logfile': '',
'loglevel': 'info'
}
)
config.setdefaults(
'ELiDE',
{
'boardchar': 'physical',
'debugger': 'no',
'inspector': 'no',
'user_kv': 'yes',
'play_speed': '1',
'thing_graphics': json.dumps([
("Marsh Davies' Island", 'marsh_davies_island_fg.atlas'),
('RLTiles: Body', 'base.atlas'),
('RLTiles: Basic clothes', 'body.atlas'),
('RLTiles: Armwear', 'arm.atlas'),
('RLTiles: Legwear', 'leg.atlas'),
('RLTiles: Right hand', 'hand1.atlas'),
('RLTiles: Left hand', 'hand2.atlas'),
('RLTiles: Boots', 'boot.atlas'),
('RLTiles: Hair', 'hair.atlas'),
('RLTiles: Beard', 'beard.atlas'),
('RLTiles: Headwear', 'head.atlas')
]),
'place_graphics': json.dumps([
("Marsh Davies' Island", 'marsh_davies_island_bg.atlas'),
("Marsh Davies' Crypt", 'marsh_davies_crypt.atlas'),
('RLTiles: Dungeon', 'dungeon.atlas')
])
}
)
config.write() | python | def build_config(self, config):
"""Set config defaults"""
for sec in 'LiSE', 'ELiDE':
config.adddefaultsection(sec)
config.setdefaults(
'LiSE',
{
'world': 'sqlite:///LiSEworld.db',
'language': 'eng',
'logfile': '',
'loglevel': 'info'
}
)
config.setdefaults(
'ELiDE',
{
'boardchar': 'physical',
'debugger': 'no',
'inspector': 'no',
'user_kv': 'yes',
'play_speed': '1',
'thing_graphics': json.dumps([
("Marsh Davies' Island", 'marsh_davies_island_fg.atlas'),
('RLTiles: Body', 'base.atlas'),
('RLTiles: Basic clothes', 'body.atlas'),
('RLTiles: Armwear', 'arm.atlas'),
('RLTiles: Legwear', 'leg.atlas'),
('RLTiles: Right hand', 'hand1.atlas'),
('RLTiles: Left hand', 'hand2.atlas'),
('RLTiles: Boots', 'boot.atlas'),
('RLTiles: Hair', 'hair.atlas'),
('RLTiles: Beard', 'beard.atlas'),
('RLTiles: Headwear', 'head.atlas')
]),
'place_graphics': json.dumps([
("Marsh Davies' Island", 'marsh_davies_island_bg.atlas'),
("Marsh Davies' Crypt", 'marsh_davies_crypt.atlas'),
('RLTiles: Dungeon', 'dungeon.atlas')
])
}
)
config.write() | ['def', 'build_config', '(', 'self', ',', 'config', ')', ':', 'for', 'sec', 'in', "'LiSE'", ',', "'ELiDE'", ':', 'config', '.', 'adddefaultsection', '(', 'sec', ')', 'config', '.', 'setdefaults', '(', "'LiSE'", ',', '{', "'world'", ':', "'sqlite:///LiSEworld.db'", ',', "'language'", ':', "'eng'", ',', "'logfile'", ':', "''", ',', "'loglevel'", ':', "'info'", '}', ')', 'config', '.', 'setdefaults', '(', "'ELiDE'", ',', '{', "'boardchar'", ':', "'physical'", ',', "'debugger'", ':', "'no'", ',', "'inspector'", ':', "'no'", ',', "'user_kv'", ':', "'yes'", ',', "'play_speed'", ':', "'1'", ',', "'thing_graphics'", ':', 'json', '.', 'dumps', '(', '[', '(', '"Marsh Davies\' Island"', ',', "'marsh_davies_island_fg.atlas'", ')', ',', '(', "'RLTiles: Body'", ',', "'base.atlas'", ')', ',', '(', "'RLTiles: Basic clothes'", ',', "'body.atlas'", ')', ',', '(', "'RLTiles: Armwear'", ',', "'arm.atlas'", ')', ',', '(', "'RLTiles: Legwear'", ',', "'leg.atlas'", ')', ',', '(', "'RLTiles: Right hand'", ',', "'hand1.atlas'", ')', ',', '(', "'RLTiles: Left hand'", ',', "'hand2.atlas'", ')', ',', '(', "'RLTiles: Boots'", ',', "'boot.atlas'", ')', ',', '(', "'RLTiles: Hair'", ',', "'hair.atlas'", ')', ',', '(', "'RLTiles: Beard'", ',', "'beard.atlas'", ')', ',', '(', "'RLTiles: Headwear'", ',', "'head.atlas'", ')', ']', ')', ',', "'place_graphics'", ':', 'json', '.', 'dumps', '(', '[', '(', '"Marsh Davies\' Island"', ',', "'marsh_davies_island_bg.atlas'", ')', ',', '(', '"Marsh Davies\' Crypt"', ',', "'marsh_davies_crypt.atlas'", ')', ',', '(', "'RLTiles: Dungeon'", ',', "'dungeon.atlas'", ')', ']', ')', '}', ')', 'config', '.', 'write', '(', ')'] | Set config defaults | ['Set', 'config', 'defaults'] | train | https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/app.py#L121-L162 |
9,468 | ssalentin/plip | plip/modules/supplemental.py | ring_is_planar | def ring_is_planar(ring, r_atoms):
"""Given a set of ring atoms, check if the ring is sufficiently planar
to be considered aromatic"""
normals = []
for a in r_atoms:
adj = pybel.ob.OBAtomAtomIter(a.OBAtom)
# Check for neighboring atoms in the ring
n_coords = [pybel.Atom(neigh).coords for neigh in adj if ring.IsMember(neigh)]
vec1, vec2 = vector(a.coords, n_coords[0]), vector(a.coords, n_coords[1])
normals.append(np.cross(vec1, vec2))
# Given all normals of ring atoms and their neighbors, the angle between any has to be 5.0 deg or less
for n1, n2 in itertools.product(normals, repeat=2):
arom_angle = vecangle(n1, n2)
if all([arom_angle > config.AROMATIC_PLANARITY, arom_angle < 180.0 - config.AROMATIC_PLANARITY]):
return False
return True | python | def ring_is_planar(ring, r_atoms):
"""Given a set of ring atoms, check if the ring is sufficiently planar
to be considered aromatic"""
normals = []
for a in r_atoms:
adj = pybel.ob.OBAtomAtomIter(a.OBAtom)
# Check for neighboring atoms in the ring
n_coords = [pybel.Atom(neigh).coords for neigh in adj if ring.IsMember(neigh)]
vec1, vec2 = vector(a.coords, n_coords[0]), vector(a.coords, n_coords[1])
normals.append(np.cross(vec1, vec2))
# Given all normals of ring atoms and their neighbors, the angle between any has to be 5.0 deg or less
for n1, n2 in itertools.product(normals, repeat=2):
arom_angle = vecangle(n1, n2)
if all([arom_angle > config.AROMATIC_PLANARITY, arom_angle < 180.0 - config.AROMATIC_PLANARITY]):
return False
return True | ['def', 'ring_is_planar', '(', 'ring', ',', 'r_atoms', ')', ':', 'normals', '=', '[', ']', 'for', 'a', 'in', 'r_atoms', ':', 'adj', '=', 'pybel', '.', 'ob', '.', 'OBAtomAtomIter', '(', 'a', '.', 'OBAtom', ')', '# Check for neighboring atoms in the ring', 'n_coords', '=', '[', 'pybel', '.', 'Atom', '(', 'neigh', ')', '.', 'coords', 'for', 'neigh', 'in', 'adj', 'if', 'ring', '.', 'IsMember', '(', 'neigh', ')', ']', 'vec1', ',', 'vec2', '=', 'vector', '(', 'a', '.', 'coords', ',', 'n_coords', '[', '0', ']', ')', ',', 'vector', '(', 'a', '.', 'coords', ',', 'n_coords', '[', '1', ']', ')', 'normals', '.', 'append', '(', 'np', '.', 'cross', '(', 'vec1', ',', 'vec2', ')', ')', '# Given all normals of ring atoms and their neighbors, the angle between any has to be 5.0 deg or less', 'for', 'n1', ',', 'n2', 'in', 'itertools', '.', 'product', '(', 'normals', ',', 'repeat', '=', '2', ')', ':', 'arom_angle', '=', 'vecangle', '(', 'n1', ',', 'n2', ')', 'if', 'all', '(', '[', 'arom_angle', '>', 'config', '.', 'AROMATIC_PLANARITY', ',', 'arom_angle', '<', '180.0', '-', 'config', '.', 'AROMATIC_PLANARITY', ']', ')', ':', 'return', 'False', 'return', 'True'] | Given a set of ring atoms, check if the ring is sufficiently planar
to be considered aromatic | ['Given', 'a', 'set', 'of', 'ring', 'atoms', 'check', 'if', 'the', 'ring', 'is', 'sufficiently', 'planar', 'to', 'be', 'considered', 'aromatic'] | train | https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/supplemental.py#L274-L289 |
9,469 | Accelize/pycosio | pycosio/storage/oss.py | _OSSSystem._remove | def _remove(self, client_kwargs):
"""
Remove an object.
args:
client_kwargs (dict): Client arguments.
"""
with _handle_oss_error():
bucket = self._get_bucket(client_kwargs)
# Object
if 'key' in client_kwargs:
return bucket.delete_object(key=client_kwargs['key'])
# Bucket
return bucket.delete_bucket() | python | def _remove(self, client_kwargs):
"""
Remove an object.
args:
client_kwargs (dict): Client arguments.
"""
with _handle_oss_error():
bucket = self._get_bucket(client_kwargs)
# Object
if 'key' in client_kwargs:
return bucket.delete_object(key=client_kwargs['key'])
# Bucket
return bucket.delete_bucket() | ['def', '_remove', '(', 'self', ',', 'client_kwargs', ')', ':', 'with', '_handle_oss_error', '(', ')', ':', 'bucket', '=', 'self', '.', '_get_bucket', '(', 'client_kwargs', ')', '# Object', 'if', "'key'", 'in', 'client_kwargs', ':', 'return', 'bucket', '.', 'delete_object', '(', 'key', '=', 'client_kwargs', '[', "'key'", ']', ')', '# Bucket', 'return', 'bucket', '.', 'delete_bucket', '(', ')'] | Remove an object.
args:
client_kwargs (dict): Client arguments. | ['Remove', 'an', 'object', '.'] | train | https://github.com/Accelize/pycosio/blob/1cc1f8fdf5394d92918b7bae2bfa682169ccc48c/pycosio/storage/oss.py#L206-L221 |
9,470 | ambitioninc/newrelic-api | newrelic_api/application_hosts.py | ApplicationHosts.show | def show(self, application_id, host_id):
"""
This API endpoint returns a single application host, identified by its
ID.
:type application_id: int
:param application_id: Application ID
:type host_id: int
:param host_id: Application host ID
:rtype: dict
:return: The JSON response of the API
::
{
"application_host": {
"id": "integer",
"application_name": "string",
"host": "string",
"language": "integer",
"health_status": "string",
"application_summary": {
"response_time": "float",
"throughput": "float",
"error_rate": "float",
"apdex_score": "float"
},
"end_user_summary": {
"response_time": "float",
"throughput": "float",
"apdex_score": "float"
},
"links": {
"application": "integer",
"application_instances": [
"integer"
],
"server": "integer"
}
}
}
"""
return self._get(
url='{root}applications/{application_id}/hosts/{host_id}.json'.format(
root=self.URL,
application_id=application_id,
host_id=host_id
),
headers=self.headers,
) | python | def show(self, application_id, host_id):
"""
This API endpoint returns a single application host, identified by its
ID.
:type application_id: int
:param application_id: Application ID
:type host_id: int
:param host_id: Application host ID
:rtype: dict
:return: The JSON response of the API
::
{
"application_host": {
"id": "integer",
"application_name": "string",
"host": "string",
"language": "integer",
"health_status": "string",
"application_summary": {
"response_time": "float",
"throughput": "float",
"error_rate": "float",
"apdex_score": "float"
},
"end_user_summary": {
"response_time": "float",
"throughput": "float",
"apdex_score": "float"
},
"links": {
"application": "integer",
"application_instances": [
"integer"
],
"server": "integer"
}
}
}
"""
return self._get(
url='{root}applications/{application_id}/hosts/{host_id}.json'.format(
root=self.URL,
application_id=application_id,
host_id=host_id
),
headers=self.headers,
) | ['def', 'show', '(', 'self', ',', 'application_id', ',', 'host_id', ')', ':', 'return', 'self', '.', '_get', '(', 'url', '=', "'{root}applications/{application_id}/hosts/{host_id}.json'", '.', 'format', '(', 'root', '=', 'self', '.', 'URL', ',', 'application_id', '=', 'application_id', ',', 'host_id', '=', 'host_id', ')', ',', 'headers', '=', 'self', '.', 'headers', ',', ')'] | This API endpoint returns a single application host, identified by its
ID.
:type application_id: int
:param application_id: Application ID
:type host_id: int
:param host_id: Application host ID
:rtype: dict
:return: The JSON response of the API
::
{
"application_host": {
"id": "integer",
"application_name": "string",
"host": "string",
"language": "integer",
"health_status": "string",
"application_summary": {
"response_time": "float",
"throughput": "float",
"error_rate": "float",
"apdex_score": "float"
},
"end_user_summary": {
"response_time": "float",
"throughput": "float",
"apdex_score": "float"
},
"links": {
"application": "integer",
"application_instances": [
"integer"
],
"server": "integer"
}
}
} | ['This', 'API', 'endpoint', 'returns', 'a', 'single', 'application', 'host', 'identified', 'by', 'its', 'ID', '.'] | train | https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/application_hosts.py#L91-L143 |
9,471 | MichaelAquilina/S4 | s4/clients/local.py | LocalSyncClient.unlock | def unlock(self):
"""
Unlock the active advisory lock.
"""
logger.debug("Releasing lock %s", self.lock_file)
self._lock.release()
try:
os.unlink(self.lock_file)
except FileNotFoundError:
pass | python | def unlock(self):
"""
Unlock the active advisory lock.
"""
logger.debug("Releasing lock %s", self.lock_file)
self._lock.release()
try:
os.unlink(self.lock_file)
except FileNotFoundError:
pass | ['def', 'unlock', '(', 'self', ')', ':', 'logger', '.', 'debug', '(', '"Releasing lock %s"', ',', 'self', '.', 'lock_file', ')', 'self', '.', '_lock', '.', 'release', '(', ')', 'try', ':', 'os', '.', 'unlink', '(', 'self', '.', 'lock_file', ')', 'except', 'FileNotFoundError', ':', 'pass'] | Unlock the active advisory lock. | ['Unlock', 'the', 'active', 'advisory', 'lock', '.'] | train | https://github.com/MichaelAquilina/S4/blob/05d74697e6ec683f0329c983f7c3f05ab75fd57e/s4/clients/local.py#L77-L86 |
9,472 | letuananh/chirptext | chirptext/dekomecab.py | parse | def parse(content, *args, **kwargs):
''' Use mecab-python3 by default to parse JP text. Fall back to mecab binary app if needed '''
global MECAB_PYTHON3
if 'mecab_loc' not in kwargs and MECAB_PYTHON3 and 'MeCab' in globals():
return MeCab.Tagger(*args).parse(content)
else:
return run_mecab_process(content, *args, **kwargs) | python | def parse(content, *args, **kwargs):
''' Use mecab-python3 by default to parse JP text. Fall back to mecab binary app if needed '''
global MECAB_PYTHON3
if 'mecab_loc' not in kwargs and MECAB_PYTHON3 and 'MeCab' in globals():
return MeCab.Tagger(*args).parse(content)
else:
return run_mecab_process(content, *args, **kwargs) | ['def', 'parse', '(', 'content', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'global', 'MECAB_PYTHON3', 'if', "'mecab_loc'", 'not', 'in', 'kwargs', 'and', 'MECAB_PYTHON3', 'and', "'MeCab'", 'in', 'globals', '(', ')', ':', 'return', 'MeCab', '.', 'Tagger', '(', '*', 'args', ')', '.', 'parse', '(', 'content', ')', 'else', ':', 'return', 'run_mecab_process', '(', 'content', ',', '*', 'args', ',', '*', '*', 'kwargs', ')'] | Use mecab-python3 by default to parse JP text. Fall back to mecab binary app if needed | ['Use', 'mecab', '-', 'python3', 'by', 'default', 'to', 'parse', 'JP', 'text', '.', 'Fall', 'back', 'to', 'mecab', 'binary', 'app', 'if', 'needed'] | train | https://github.com/letuananh/chirptext/blob/ce60b47257b272a587c8703ea1f86cd1a45553a7/chirptext/dekomecab.py#L73-L79 |
9,473 | usc-isi-i2/etk | etk/extractors/spacy_rule_extractor.py | SpacyRuleExtractor._filter_match | def _filter_match(self, span: span, relations: Dict, patterns: List) -> bool:
"""
Filter the match result according to prefix, suffix, min, max ...
Args:
span: span
relations: Dict
patterns: List of pattern
Returns: bool
"""
for pattern_id, a_pattern in enumerate(patterns):
token_range = relations[pattern_id]
if token_range:
tokens = [x for x in span[token_range[0]:token_range[1]]]
if a_pattern.type == "word":
if not self._pre_suf_fix_filter(tokens, a_pattern.prefix, a_pattern.suffix):
return False
if a_pattern.type == "shape":
if not (self._full_shape_filter(tokens, a_pattern.full_shape)
and self._pre_suf_fix_filter(tokens, a_pattern.prefix,a_pattern.suffix)):
return False
if a_pattern.type == "number":
if not self._min_max_filter(tokens, a_pattern.min, a_pattern.max):
return False
return True | python | def _filter_match(self, span: span, relations: Dict, patterns: List) -> bool:
"""
Filter the match result according to prefix, suffix, min, max ...
Args:
span: span
relations: Dict
patterns: List of pattern
Returns: bool
"""
for pattern_id, a_pattern in enumerate(patterns):
token_range = relations[pattern_id]
if token_range:
tokens = [x for x in span[token_range[0]:token_range[1]]]
if a_pattern.type == "word":
if not self._pre_suf_fix_filter(tokens, a_pattern.prefix, a_pattern.suffix):
return False
if a_pattern.type == "shape":
if not (self._full_shape_filter(tokens, a_pattern.full_shape)
and self._pre_suf_fix_filter(tokens, a_pattern.prefix,a_pattern.suffix)):
return False
if a_pattern.type == "number":
if not self._min_max_filter(tokens, a_pattern.min, a_pattern.max):
return False
return True | ['def', '_filter_match', '(', 'self', ',', 'span', ':', 'span', ',', 'relations', ':', 'Dict', ',', 'patterns', ':', 'List', ')', '->', 'bool', ':', 'for', 'pattern_id', ',', 'a_pattern', 'in', 'enumerate', '(', 'patterns', ')', ':', 'token_range', '=', 'relations', '[', 'pattern_id', ']', 'if', 'token_range', ':', 'tokens', '=', '[', 'x', 'for', 'x', 'in', 'span', '[', 'token_range', '[', '0', ']', ':', 'token_range', '[', '1', ']', ']', ']', 'if', 'a_pattern', '.', 'type', '==', '"word"', ':', 'if', 'not', 'self', '.', '_pre_suf_fix_filter', '(', 'tokens', ',', 'a_pattern', '.', 'prefix', ',', 'a_pattern', '.', 'suffix', ')', ':', 'return', 'False', 'if', 'a_pattern', '.', 'type', '==', '"shape"', ':', 'if', 'not', '(', 'self', '.', '_full_shape_filter', '(', 'tokens', ',', 'a_pattern', '.', 'full_shape', ')', 'and', 'self', '.', '_pre_suf_fix_filter', '(', 'tokens', ',', 'a_pattern', '.', 'prefix', ',', 'a_pattern', '.', 'suffix', ')', ')', ':', 'return', 'False', 'if', 'a_pattern', '.', 'type', '==', '"number"', ':', 'if', 'not', 'self', '.', '_min_max_filter', '(', 'tokens', ',', 'a_pattern', '.', 'min', ',', 'a_pattern', '.', 'max', ')', ':', 'return', 'False', 'return', 'True'] | Filter the match result according to prefix, suffix, min, max ...
Args:
span: span
relations: Dict
patterns: List of pattern
Returns: bool | ['Filter', 'the', 'match', 'result', 'according', 'to', 'prefix', 'suffix', 'min', 'max', '...', 'Args', ':', 'span', ':', 'span', 'relations', ':', 'Dict', 'patterns', ':', 'List', 'of', 'pattern'] | train | https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/spacy_rule_extractor.py#L198-L223 |
9,474 | ome/omego | omego/external.py | External.set_server_dir | def set_server_dir(self, dir):
"""
Set the directory of the server to be controlled
"""
self.dir = os.path.abspath(dir)
config = os.path.join(self.dir, 'etc', 'grid', 'config.xml')
self.configured = os.path.exists(config) | python | def set_server_dir(self, dir):
"""
Set the directory of the server to be controlled
"""
self.dir = os.path.abspath(dir)
config = os.path.join(self.dir, 'etc', 'grid', 'config.xml')
self.configured = os.path.exists(config) | ['def', 'set_server_dir', '(', 'self', ',', 'dir', ')', ':', 'self', '.', 'dir', '=', 'os', '.', 'path', '.', 'abspath', '(', 'dir', ')', 'config', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'dir', ',', "'etc'", ',', "'grid'", ',', "'config.xml'", ')', 'self', '.', 'configured', '=', 'os', '.', 'path', '.', 'exists', '(', 'config', ')'] | Set the directory of the server to be controlled | ['Set', 'the', 'directory', 'of', 'the', 'server', 'to', 'be', 'controlled'] | train | https://github.com/ome/omego/blob/2dadbf3c6342b6c995f9e0dceaf3c0b7fab030fb/omego/external.py#L55-L61 |
9,475 | wummel/dosage | dosagelib/director.py | ComicGetter.getStrips | def getStrips(self, scraperobj):
"""Download comic strips."""
with lock:
host_lock = get_host_lock(scraperobj.url)
with host_lock:
self._getStrips(scraperobj) | python | def getStrips(self, scraperobj):
"""Download comic strips."""
with lock:
host_lock = get_host_lock(scraperobj.url)
with host_lock:
self._getStrips(scraperobj) | ['def', 'getStrips', '(', 'self', ',', 'scraperobj', ')', ':', 'with', 'lock', ':', 'host_lock', '=', 'get_host_lock', '(', 'scraperobj', '.', 'url', ')', 'with', 'host_lock', ':', 'self', '.', '_getStrips', '(', 'scraperobj', ')'] | Download comic strips. | ['Download', 'comic', 'strips', '.'] | train | https://github.com/wummel/dosage/blob/a0109c3a46219f280e6e5e77183674e40da0f304/dosagelib/director.py#L91-L96 |
9,476 | bcbio/bcbio-nextgen | bcbio/variation/effects.py | _installed_snpeff_genome | def _installed_snpeff_genome(base_name, config):
"""Find the most recent installed genome for snpEff with the given name.
"""
snpeff_config_file = os.path.join(config_utils.get_program("snpeff", config, "dir"),
"snpEff.config")
if os.path.exists(snpeff_config_file):
data_dir = _find_snpeff_datadir(snpeff_config_file)
dbs = [d for d in sorted(glob.glob(os.path.join(data_dir, "%s*" % base_name)), reverse=True)
if os.path.isdir(d)]
else:
data_dir = None
dbs = []
if len(dbs) == 0:
raise ValueError("No database found in %s for %s" % (data_dir, base_name))
else:
return data_dir, os.path.split(dbs[0])[-1] | python | def _installed_snpeff_genome(base_name, config):
"""Find the most recent installed genome for snpEff with the given name.
"""
snpeff_config_file = os.path.join(config_utils.get_program("snpeff", config, "dir"),
"snpEff.config")
if os.path.exists(snpeff_config_file):
data_dir = _find_snpeff_datadir(snpeff_config_file)
dbs = [d for d in sorted(glob.glob(os.path.join(data_dir, "%s*" % base_name)), reverse=True)
if os.path.isdir(d)]
else:
data_dir = None
dbs = []
if len(dbs) == 0:
raise ValueError("No database found in %s for %s" % (data_dir, base_name))
else:
return data_dir, os.path.split(dbs[0])[-1] | ['def', '_installed_snpeff_genome', '(', 'base_name', ',', 'config', ')', ':', 'snpeff_config_file', '=', 'os', '.', 'path', '.', 'join', '(', 'config_utils', '.', 'get_program', '(', '"snpeff"', ',', 'config', ',', '"dir"', ')', ',', '"snpEff.config"', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'snpeff_config_file', ')', ':', 'data_dir', '=', '_find_snpeff_datadir', '(', 'snpeff_config_file', ')', 'dbs', '=', '[', 'd', 'for', 'd', 'in', 'sorted', '(', 'glob', '.', 'glob', '(', 'os', '.', 'path', '.', 'join', '(', 'data_dir', ',', '"%s*"', '%', 'base_name', ')', ')', ',', 'reverse', '=', 'True', ')', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'd', ')', ']', 'else', ':', 'data_dir', '=', 'None', 'dbs', '=', '[', ']', 'if', 'len', '(', 'dbs', ')', '==', '0', ':', 'raise', 'ValueError', '(', '"No database found in %s for %s"', '%', '(', 'data_dir', ',', 'base_name', ')', ')', 'else', ':', 'return', 'data_dir', ',', 'os', '.', 'path', '.', 'split', '(', 'dbs', '[', '0', ']', ')', '[', '-', '1', ']'] | Find the most recent installed genome for snpEff with the given name. | ['Find', 'the', 'most', 'recent', 'installed', 'genome', 'for', 'snpEff', 'with', 'the', 'given', 'name', '.'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/effects.py#L400-L415 |
9,477 | DataDog/integrations-core | datadog_checks_dev/datadog_checks/dev/tooling/github.py | parse_pr_numbers | def parse_pr_numbers(git_log_lines):
"""
Parse PR numbers from commit messages. At GitHub those have the format:
`here is the message (#1234)`
being `1234` the PR number.
"""
prs = []
for line in git_log_lines:
pr_number = parse_pr_number(line)
if pr_number:
prs.append(pr_number)
return prs | python | def parse_pr_numbers(git_log_lines):
"""
Parse PR numbers from commit messages. At GitHub those have the format:
`here is the message (#1234)`
being `1234` the PR number.
"""
prs = []
for line in git_log_lines:
pr_number = parse_pr_number(line)
if pr_number:
prs.append(pr_number)
return prs | ['def', 'parse_pr_numbers', '(', 'git_log_lines', ')', ':', 'prs', '=', '[', ']', 'for', 'line', 'in', 'git_log_lines', ':', 'pr_number', '=', 'parse_pr_number', '(', 'line', ')', 'if', 'pr_number', ':', 'prs', '.', 'append', '(', 'pr_number', ')', 'return', 'prs'] | Parse PR numbers from commit messages. At GitHub those have the format:
`here is the message (#1234)`
being `1234` the PR number. | ['Parse', 'PR', 'numbers', 'from', 'commit', 'messages', '.', 'At', 'GitHub', 'those', 'have', 'the', 'format', ':'] | train | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/datadog_checks_dev/datadog_checks/dev/tooling/github.py#L98-L111 |
9,478 | saulpw/visidata | visidata/canvas.py | Canvas.setCursorSize | def setCursorSize(self, p):
'sets width based on diagonal corner p'
self.cursorBox = BoundingBox(self.cursorBox.xmin, self.cursorBox.ymin, p.x, p.y)
self.cursorBox.w = max(self.cursorBox.w, self.canvasCharWidth)
self.cursorBox.h = max(self.cursorBox.h, self.canvasCharHeight) | python | def setCursorSize(self, p):
'sets width based on diagonal corner p'
self.cursorBox = BoundingBox(self.cursorBox.xmin, self.cursorBox.ymin, p.x, p.y)
self.cursorBox.w = max(self.cursorBox.w, self.canvasCharWidth)
self.cursorBox.h = max(self.cursorBox.h, self.canvasCharHeight) | ['def', 'setCursorSize', '(', 'self', ',', 'p', ')', ':', 'self', '.', 'cursorBox', '=', 'BoundingBox', '(', 'self', '.', 'cursorBox', '.', 'xmin', ',', 'self', '.', 'cursorBox', '.', 'ymin', ',', 'p', '.', 'x', ',', 'p', '.', 'y', ')', 'self', '.', 'cursorBox', '.', 'w', '=', 'max', '(', 'self', '.', 'cursorBox', '.', 'w', ',', 'self', '.', 'canvasCharWidth', ')', 'self', '.', 'cursorBox', '.', 'h', '=', 'max', '(', 'self', '.', 'cursorBox', '.', 'h', ',', 'self', '.', 'canvasCharHeight', ')'] | sets width based on diagonal corner p | ['sets', 'width', 'based', 'on', 'diagonal', 'corner', 'p'] | train | https://github.com/saulpw/visidata/blob/32771e0cea6c24fc7902683d14558391395c591f/visidata/canvas.py#L371-L375 |
9,479 | hubo1016/namedstruct | namedstruct/namedstruct.py | NamedStruct._tobytes | def _tobytes(self, skipprepack = False):
'''
Convert the struct to bytes. This is the standard way to convert a NamedStruct to bytes.
:param skipprepack: if True, the prepack stage is skipped. For parser internal use.
:returns: converted bytes
'''
stream = BytesIO()
self._tostream(stream, skipprepack)
return stream.getvalue() | python | def _tobytes(self, skipprepack = False):
'''
Convert the struct to bytes. This is the standard way to convert a NamedStruct to bytes.
:param skipprepack: if True, the prepack stage is skipped. For parser internal use.
:returns: converted bytes
'''
stream = BytesIO()
self._tostream(stream, skipprepack)
return stream.getvalue() | ['def', '_tobytes', '(', 'self', ',', 'skipprepack', '=', 'False', ')', ':', 'stream', '=', 'BytesIO', '(', ')', 'self', '.', '_tostream', '(', 'stream', ',', 'skipprepack', ')', 'return', 'stream', '.', 'getvalue', '(', ')'] | Convert the struct to bytes. This is the standard way to convert a NamedStruct to bytes.
:param skipprepack: if True, the prepack stage is skipped. For parser internal use.
:returns: converted bytes | ['Convert', 'the', 'struct', 'to', 'bytes', '.', 'This', 'is', 'the', 'standard', 'way', 'to', 'convert', 'a', 'NamedStruct', 'to', 'bytes', '.', ':', 'param', 'skipprepack', ':', 'if', 'True', 'the', 'prepack', 'stage', 'is', 'skipped', '.', 'For', 'parser', 'internal', 'use', '.', ':', 'returns', ':', 'converted', 'bytes'] | train | https://github.com/hubo1016/namedstruct/blob/5039026e0df4ce23003d212358918dbe1a6e1d76/namedstruct/namedstruct.py#L146-L156 |
9,480 | saltstack/salt | salt/modules/parted_partition.py | disk_toggle | def disk_toggle(device, flag):
'''
Toggle the state of <flag> on <device>. Valid flags are the same
as the disk_set command.
CLI Example:
.. code-block:: bash
salt '*' partition.disk_toggle /dev/sda pmbr_boot
'''
_validate_device(device)
if flag not in VALID_DISK_FLAGS:
raise CommandExecutionError('Invalid flag passed to partition.disk_toggle')
cmd = ['parted', '-m', '-s', device, 'disk_toggle', flag]
out = __salt__['cmd.run'](cmd).splitlines()
return out | python | def disk_toggle(device, flag):
'''
Toggle the state of <flag> on <device>. Valid flags are the same
as the disk_set command.
CLI Example:
.. code-block:: bash
salt '*' partition.disk_toggle /dev/sda pmbr_boot
'''
_validate_device(device)
if flag not in VALID_DISK_FLAGS:
raise CommandExecutionError('Invalid flag passed to partition.disk_toggle')
cmd = ['parted', '-m', '-s', device, 'disk_toggle', flag]
out = __salt__['cmd.run'](cmd).splitlines()
return out | ['def', 'disk_toggle', '(', 'device', ',', 'flag', ')', ':', '_validate_device', '(', 'device', ')', 'if', 'flag', 'not', 'in', 'VALID_DISK_FLAGS', ':', 'raise', 'CommandExecutionError', '(', "'Invalid flag passed to partition.disk_toggle'", ')', 'cmd', '=', '[', "'parted'", ',', "'-m'", ',', "'-s'", ',', 'device', ',', "'disk_toggle'", ',', 'flag', ']', 'out', '=', '__salt__', '[', "'cmd.run'", ']', '(', 'cmd', ')', '.', 'splitlines', '(', ')', 'return', 'out'] | Toggle the state of <flag> on <device>. Valid flags are the same
as the disk_set command.
CLI Example:
.. code-block:: bash
salt '*' partition.disk_toggle /dev/sda pmbr_boot | ['Toggle', 'the', 'state', 'of', '<flag', '>', 'on', '<device', '>', '.', 'Valid', 'flags', 'are', 'the', 'same', 'as', 'the', 'disk_set', 'command', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/parted_partition.py#L761-L779 |
9,481 | vxgmichel/aiostream | aiostream/stream/time.py | delay | async def delay(source, delay):
"""Delay the iteration of an asynchronous sequence."""
await asyncio.sleep(delay)
async with streamcontext(source) as streamer:
async for item in streamer:
yield item | python | async def delay(source, delay):
"""Delay the iteration of an asynchronous sequence."""
await asyncio.sleep(delay)
async with streamcontext(source) as streamer:
async for item in streamer:
yield item | ['async', 'def', 'delay', '(', 'source', ',', 'delay', ')', ':', 'await', 'asyncio', '.', 'sleep', '(', 'delay', ')', 'async', 'with', 'streamcontext', '(', 'source', ')', 'as', 'streamer', ':', 'async', 'for', 'item', 'in', 'streamer', ':', 'yield', 'item'] | Delay the iteration of an asynchronous sequence. | ['Delay', 'the', 'iteration', 'of', 'an', 'asynchronous', 'sequence', '.'] | train | https://github.com/vxgmichel/aiostream/blob/43bdf04ab19108a3f1b5a472062e1392a26cbcf8/aiostream/stream/time.py#L60-L65 |
9,482 | apache/incubator-mxnet | python/mxnet/ndarray/ndarray.py | arange | def arange(start, stop=None, step=1.0, repeat=1, infer_range=None, ctx=None, dtype=mx_real_t):
"""Returns evenly spaced values within a given interval.
Values are generated within the half-open interval [`start`, `stop`). In other
words, the interval includes `start` but excludes `stop`. The function is
similar to the built-in Python function `range` and to `numpy.arange`,
but returns an `NDArray`.
Parameters
----------
start : number, optional
Start of interval. The default start value is 0.
stop : number
End of interval.
step : number, optional
Spacing between values. The default step size is 1.
repeat : int, optional
Number of times to repeat each element. The default repeat count is 1.
infer_range : boolean, optional
When set to True, infer the stop position from the start, step,
repeat, and output tensor size.
ctx : Context, optional
Device context. Default context is the current default context.
dtype : str or numpy.dtype, optional
The data type of the `NDArray`. The default datatype is `np.float32`.
Returns
-------
NDArray
`NDArray` of evenly spaced values in the specified range.
Examples
--------
>>> mx.nd.arange(3).asnumpy()
array([ 0., 1., 2.], dtype=float32)
>>> mx.nd.arange(2, 6).asnumpy()
array([ 2., 3., 4., 5.], dtype=float32)
>>> mx.nd.arange(2, 6, step=2).asnumpy()
array([ 2., 4.], dtype=float32)
>>> mx.nd.arange(2, 6, step=1.5, repeat=2).asnumpy()
array([ 2. , 2. , 3.5, 3.5, 5. , 5. ], dtype=float32)
>>> mx.nd.arange(2, 6, step=2, repeat=3, dtype='int32').asnumpy()
array([2, 2, 2, 4, 4, 4], dtype=int32)
"""
if infer_range is not None:
warnings.warn('`infer_range` argument has been deprecated',
DeprecationWarning)
if ctx is None:
ctx = current_context()
return _internal._arange(start=start, stop=stop, step=step, repeat=repeat,
infer_range=False, dtype=dtype, ctx=str(ctx)) | python | def arange(start, stop=None, step=1.0, repeat=1, infer_range=None, ctx=None, dtype=mx_real_t):
"""Returns evenly spaced values within a given interval.
Values are generated within the half-open interval [`start`, `stop`). In other
words, the interval includes `start` but excludes `stop`. The function is
similar to the built-in Python function `range` and to `numpy.arange`,
but returns an `NDArray`.
Parameters
----------
start : number, optional
Start of interval. The default start value is 0.
stop : number
End of interval.
step : number, optional
Spacing between values. The default step size is 1.
repeat : int, optional
Number of times to repeat each element. The default repeat count is 1.
infer_range : boolean, optional
When set to True, infer the stop position from the start, step,
repeat, and output tensor size.
ctx : Context, optional
Device context. Default context is the current default context.
dtype : str or numpy.dtype, optional
The data type of the `NDArray`. The default datatype is `np.float32`.
Returns
-------
NDArray
`NDArray` of evenly spaced values in the specified range.
Examples
--------
>>> mx.nd.arange(3).asnumpy()
array([ 0., 1., 2.], dtype=float32)
>>> mx.nd.arange(2, 6).asnumpy()
array([ 2., 3., 4., 5.], dtype=float32)
>>> mx.nd.arange(2, 6, step=2).asnumpy()
array([ 2., 4.], dtype=float32)
>>> mx.nd.arange(2, 6, step=1.5, repeat=2).asnumpy()
array([ 2. , 2. , 3.5, 3.5, 5. , 5. ], dtype=float32)
>>> mx.nd.arange(2, 6, step=2, repeat=3, dtype='int32').asnumpy()
array([2, 2, 2, 4, 4, 4], dtype=int32)
"""
if infer_range is not None:
warnings.warn('`infer_range` argument has been deprecated',
DeprecationWarning)
if ctx is None:
ctx = current_context()
return _internal._arange(start=start, stop=stop, step=step, repeat=repeat,
infer_range=False, dtype=dtype, ctx=str(ctx)) | ['def', 'arange', '(', 'start', ',', 'stop', '=', 'None', ',', 'step', '=', '1.0', ',', 'repeat', '=', '1', ',', 'infer_range', '=', 'None', ',', 'ctx', '=', 'None', ',', 'dtype', '=', 'mx_real_t', ')', ':', 'if', 'infer_range', 'is', 'not', 'None', ':', 'warnings', '.', 'warn', '(', "'`infer_range` argument has been deprecated'", ',', 'DeprecationWarning', ')', 'if', 'ctx', 'is', 'None', ':', 'ctx', '=', 'current_context', '(', ')', 'return', '_internal', '.', '_arange', '(', 'start', '=', 'start', ',', 'stop', '=', 'stop', ',', 'step', '=', 'step', ',', 'repeat', '=', 'repeat', ',', 'infer_range', '=', 'False', ',', 'dtype', '=', 'dtype', ',', 'ctx', '=', 'str', '(', 'ctx', ')', ')'] | Returns evenly spaced values within a given interval.
Values are generated within the half-open interval [`start`, `stop`). In other
words, the interval includes `start` but excludes `stop`. The function is
similar to the built-in Python function `range` and to `numpy.arange`,
but returns an `NDArray`.
Parameters
----------
start : number, optional
Start of interval. The default start value is 0.
stop : number
End of interval.
step : number, optional
Spacing between values. The default step size is 1.
repeat : int, optional
Number of times to repeat each element. The default repeat count is 1.
infer_range : boolean, optional
When set to True, infer the stop position from the start, step,
repeat, and output tensor size.
ctx : Context, optional
Device context. Default context is the current default context.
dtype : str or numpy.dtype, optional
The data type of the `NDArray`. The default datatype is `np.float32`.
Returns
-------
NDArray
`NDArray` of evenly spaced values in the specified range.
Examples
--------
>>> mx.nd.arange(3).asnumpy()
array([ 0., 1., 2.], dtype=float32)
>>> mx.nd.arange(2, 6).asnumpy()
array([ 2., 3., 4., 5.], dtype=float32)
>>> mx.nd.arange(2, 6, step=2).asnumpy()
array([ 2., 4.], dtype=float32)
>>> mx.nd.arange(2, 6, step=1.5, repeat=2).asnumpy()
array([ 2. , 2. , 3.5, 3.5, 5. , 5. ], dtype=float32)
>>> mx.nd.arange(2, 6, step=2, repeat=3, dtype='int32').asnumpy()
array([2, 2, 2, 4, 4, 4], dtype=int32) | ['Returns', 'evenly', 'spaced', 'values', 'within', 'a', 'given', 'interval', '.'] | train | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/ndarray/ndarray.py#L2560-L2610 |
9,483 | watson-developer-cloud/python-sdk | ibm_watson/natural_language_classifier_v1.py | NaturalLanguageClassifierV1.classify | def classify(self, classifier_id, text, **kwargs):
"""
Classify a phrase.
Returns label information for the input. The status must be `Available` before you
can use the classifier to classify text.
:param str classifier_id: Classifier ID to use.
:param str text: The submitted phrase. The maximum length is 2048 characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if classifier_id is None:
raise ValueError('classifier_id must be provided')
if text is None:
raise ValueError('text must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('natural_language_classifier', 'V1',
'classify')
headers.update(sdk_headers)
data = {'text': text}
url = '/v1/classifiers/{0}/classify'.format(
*self._encode_path_vars(classifier_id))
response = self.request(
method='POST',
url=url,
headers=headers,
json=data,
accept_json=True)
return response | python | def classify(self, classifier_id, text, **kwargs):
"""
Classify a phrase.
Returns label information for the input. The status must be `Available` before you
can use the classifier to classify text.
:param str classifier_id: Classifier ID to use.
:param str text: The submitted phrase. The maximum length is 2048 characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if classifier_id is None:
raise ValueError('classifier_id must be provided')
if text is None:
raise ValueError('text must be provided')
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('natural_language_classifier', 'V1',
'classify')
headers.update(sdk_headers)
data = {'text': text}
url = '/v1/classifiers/{0}/classify'.format(
*self._encode_path_vars(classifier_id))
response = self.request(
method='POST',
url=url,
headers=headers,
json=data,
accept_json=True)
return response | ['def', 'classify', '(', 'self', ',', 'classifier_id', ',', 'text', ',', '*', '*', 'kwargs', ')', ':', 'if', 'classifier_id', 'is', 'None', ':', 'raise', 'ValueError', '(', "'classifier_id must be provided'", ')', 'if', 'text', 'is', 'None', ':', 'raise', 'ValueError', '(', "'text must be provided'", ')', 'headers', '=', '{', '}', 'if', "'headers'", 'in', 'kwargs', ':', 'headers', '.', 'update', '(', 'kwargs', '.', 'get', '(', "'headers'", ')', ')', 'sdk_headers', '=', 'get_sdk_headers', '(', "'natural_language_classifier'", ',', "'V1'", ',', "'classify'", ')', 'headers', '.', 'update', '(', 'sdk_headers', ')', 'data', '=', '{', "'text'", ':', 'text', '}', 'url', '=', "'/v1/classifiers/{0}/classify'", '.', 'format', '(', '*', 'self', '.', '_encode_path_vars', '(', 'classifier_id', ')', ')', 'response', '=', 'self', '.', 'request', '(', 'method', '=', "'POST'", ',', 'url', '=', 'url', ',', 'headers', '=', 'headers', ',', 'json', '=', 'data', ',', 'accept_json', '=', 'True', ')', 'return', 'response'] | Classify a phrase.
Returns label information for the input. The status must be `Available` before you
can use the classifier to classify text.
:param str classifier_id: Classifier ID to use.
:param str text: The submitted phrase. The maximum length is 2048 characters.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse | ['Classify', 'a', 'phrase', '.'] | train | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/natural_language_classifier_v1.py#L97-L133 |
9,484 | gagneurlab/concise | concise/legacy/concise.py | Concise._set_var_res | def _set_var_res(self, weights):
"""
Transform the weights to var_res
"""
if weights is None:
return
# layer 1
motif_base_weights_raw = np.swapaxes(weights["motif_base_weights"], 2, 0)
motif_base_weights = motif_base_weights_raw[np.newaxis]
motif_bias = weights["motif_bias"]
feature_weights = weights["feature_weights"]
spline_weights = weights["spline_weights"]
# filter
motif_weights = weights["motif_weights"]
final_bias = weights["final_bias"]
var_res = {
"motif_base_weights": motif_base_weights,
"motif_bias": motif_bias,
"spline_weights": spline_weights,
"feature_weights": feature_weights,
"motif_weights": motif_weights,
"final_bias": final_bias
}
# cast everything to float32
var_res = {key: value.astype(np.float32) if value is not None else None for key, value in var_res.items()}
self._var_res = var_res | python | def _set_var_res(self, weights):
"""
Transform the weights to var_res
"""
if weights is None:
return
# layer 1
motif_base_weights_raw = np.swapaxes(weights["motif_base_weights"], 2, 0)
motif_base_weights = motif_base_weights_raw[np.newaxis]
motif_bias = weights["motif_bias"]
feature_weights = weights["feature_weights"]
spline_weights = weights["spline_weights"]
# filter
motif_weights = weights["motif_weights"]
final_bias = weights["final_bias"]
var_res = {
"motif_base_weights": motif_base_weights,
"motif_bias": motif_bias,
"spline_weights": spline_weights,
"feature_weights": feature_weights,
"motif_weights": motif_weights,
"final_bias": final_bias
}
# cast everything to float32
var_res = {key: value.astype(np.float32) if value is not None else None for key, value in var_res.items()}
self._var_res = var_res | ['def', '_set_var_res', '(', 'self', ',', 'weights', ')', ':', 'if', 'weights', 'is', 'None', ':', 'return', '# layer 1', 'motif_base_weights_raw', '=', 'np', '.', 'swapaxes', '(', 'weights', '[', '"motif_base_weights"', ']', ',', '2', ',', '0', ')', 'motif_base_weights', '=', 'motif_base_weights_raw', '[', 'np', '.', 'newaxis', ']', 'motif_bias', '=', 'weights', '[', '"motif_bias"', ']', 'feature_weights', '=', 'weights', '[', '"feature_weights"', ']', 'spline_weights', '=', 'weights', '[', '"spline_weights"', ']', '# filter', 'motif_weights', '=', 'weights', '[', '"motif_weights"', ']', 'final_bias', '=', 'weights', '[', '"final_bias"', ']', 'var_res', '=', '{', '"motif_base_weights"', ':', 'motif_base_weights', ',', '"motif_bias"', ':', 'motif_bias', ',', '"spline_weights"', ':', 'spline_weights', ',', '"feature_weights"', ':', 'feature_weights', ',', '"motif_weights"', ':', 'motif_weights', ',', '"final_bias"', ':', 'final_bias', '}', '# cast everything to float32', 'var_res', '=', '{', 'key', ':', 'value', '.', 'astype', '(', 'np', '.', 'float32', ')', 'if', 'value', 'is', 'not', 'None', 'else', 'None', 'for', 'key', ',', 'value', 'in', 'var_res', '.', 'items', '(', ')', '}', 'self', '.', '_var_res', '=', 'var_res'] | Transform the weights to var_res | ['Transform', 'the', 'weights', 'to', 'var_res'] | train | https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L1028-L1059 |
9,485 | bpython/curtsies | examples/tttplaybitboard.py | tictactoe | def tictactoe(w, i, player, opponent, grid=None):
"Put two strategies to a classic battle of wits."
grid = grid or empty_grid
while True:
w.render_to_terminal(w.array_from_text(view(grid)))
if is_won(grid):
print(whose_move(grid), "wins.")
break
if not successors(grid):
print("A draw.")
break
grid = player(w, i, grid)
player, opponent = opponent, player | python | def tictactoe(w, i, player, opponent, grid=None):
"Put two strategies to a classic battle of wits."
grid = grid or empty_grid
while True:
w.render_to_terminal(w.array_from_text(view(grid)))
if is_won(grid):
print(whose_move(grid), "wins.")
break
if not successors(grid):
print("A draw.")
break
grid = player(w, i, grid)
player, opponent = opponent, player | ['def', 'tictactoe', '(', 'w', ',', 'i', ',', 'player', ',', 'opponent', ',', 'grid', '=', 'None', ')', ':', 'grid', '=', 'grid', 'or', 'empty_grid', 'while', 'True', ':', 'w', '.', 'render_to_terminal', '(', 'w', '.', 'array_from_text', '(', 'view', '(', 'grid', ')', ')', ')', 'if', 'is_won', '(', 'grid', ')', ':', 'print', '(', 'whose_move', '(', 'grid', ')', ',', '"wins."', ')', 'break', 'if', 'not', 'successors', '(', 'grid', ')', ':', 'print', '(', '"A draw."', ')', 'break', 'grid', '=', 'player', '(', 'w', ',', 'i', ',', 'grid', ')', 'player', ',', 'opponent', '=', 'opponent', ',', 'player'] | Put two strategies to a classic battle of wits. | ['Put', 'two', 'strategies', 'to', 'a', 'classic', 'battle', 'of', 'wits', '.'] | train | https://github.com/bpython/curtsies/blob/223e42b97fbf6c86b479ed4f0963a067333c5a63/examples/tttplaybitboard.py#L40-L52 |
9,486 | Erotemic/utool | utool/util_graph.py | nx_gen_edge_values | def nx_gen_edge_values(G, key, edges=None, default=util_const.NoParam,
on_missing='error', on_keyerr='default'):
"""
Generates attributes values of specific edges
Args:
on_missing (str): Strategy for handling nodes missing from G.
Can be {'error', 'default'}. defaults to 'error'.
on_keyerr (str): Strategy for handling keys missing from node dicts.
Can be {'error', 'default'}. defaults to 'default'
if default is specified, otherwise defaults to 'error'.
"""
if edges is None:
edges = G.edges()
if on_missing is None:
on_missing = 'error'
if on_keyerr is None:
on_keyerr = 'default'
if default is util_const.NoParam and on_keyerr == 'default':
on_keyerr = 'error'
# Generate `data_iter` edges and data dictionary
if on_missing == 'error':
data_iter = (G.adj[u][v] for u, v in edges)
elif on_missing == 'default':
data_iter = (G.adj[u][v] if G.has_edge(u, v) else {}
for u, v in edges)
else:
raise KeyError('on_missing={} must be error, filter or default'.format(
on_missing))
# Get `value_iter` desired value out of dictionary
if on_keyerr == 'error':
value_iter = (d[key] for d in data_iter)
elif on_keyerr == 'default':
value_iter = (d.get(key, default) for d in data_iter)
else:
raise KeyError('on_keyerr={} must be error or default'.format(on_keyerr))
return value_iter | python | def nx_gen_edge_values(G, key, edges=None, default=util_const.NoParam,
on_missing='error', on_keyerr='default'):
"""
Generates attributes values of specific edges
Args:
on_missing (str): Strategy for handling nodes missing from G.
Can be {'error', 'default'}. defaults to 'error'.
on_keyerr (str): Strategy for handling keys missing from node dicts.
Can be {'error', 'default'}. defaults to 'default'
if default is specified, otherwise defaults to 'error'.
"""
if edges is None:
edges = G.edges()
if on_missing is None:
on_missing = 'error'
if on_keyerr is None:
on_keyerr = 'default'
if default is util_const.NoParam and on_keyerr == 'default':
on_keyerr = 'error'
# Generate `data_iter` edges and data dictionary
if on_missing == 'error':
data_iter = (G.adj[u][v] for u, v in edges)
elif on_missing == 'default':
data_iter = (G.adj[u][v] if G.has_edge(u, v) else {}
for u, v in edges)
else:
raise KeyError('on_missing={} must be error, filter or default'.format(
on_missing))
# Get `value_iter` desired value out of dictionary
if on_keyerr == 'error':
value_iter = (d[key] for d in data_iter)
elif on_keyerr == 'default':
value_iter = (d.get(key, default) for d in data_iter)
else:
raise KeyError('on_keyerr={} must be error or default'.format(on_keyerr))
return value_iter | ['def', 'nx_gen_edge_values', '(', 'G', ',', 'key', ',', 'edges', '=', 'None', ',', 'default', '=', 'util_const', '.', 'NoParam', ',', 'on_missing', '=', "'error'", ',', 'on_keyerr', '=', "'default'", ')', ':', 'if', 'edges', 'is', 'None', ':', 'edges', '=', 'G', '.', 'edges', '(', ')', 'if', 'on_missing', 'is', 'None', ':', 'on_missing', '=', "'error'", 'if', 'on_keyerr', 'is', 'None', ':', 'on_keyerr', '=', "'default'", 'if', 'default', 'is', 'util_const', '.', 'NoParam', 'and', 'on_keyerr', '==', "'default'", ':', 'on_keyerr', '=', "'error'", '# Generate `data_iter` edges and data dictionary', 'if', 'on_missing', '==', "'error'", ':', 'data_iter', '=', '(', 'G', '.', 'adj', '[', 'u', ']', '[', 'v', ']', 'for', 'u', ',', 'v', 'in', 'edges', ')', 'elif', 'on_missing', '==', "'default'", ':', 'data_iter', '=', '(', 'G', '.', 'adj', '[', 'u', ']', '[', 'v', ']', 'if', 'G', '.', 'has_edge', '(', 'u', ',', 'v', ')', 'else', '{', '}', 'for', 'u', ',', 'v', 'in', 'edges', ')', 'else', ':', 'raise', 'KeyError', '(', "'on_missing={} must be error, filter or default'", '.', 'format', '(', 'on_missing', ')', ')', '# Get `value_iter` desired value out of dictionary', 'if', 'on_keyerr', '==', "'error'", ':', 'value_iter', '=', '(', 'd', '[', 'key', ']', 'for', 'd', 'in', 'data_iter', ')', 'elif', 'on_keyerr', '==', "'default'", ':', 'value_iter', '=', '(', 'd', '.', 'get', '(', 'key', ',', 'default', ')', 'for', 'd', 'in', 'data_iter', ')', 'else', ':', 'raise', 'KeyError', '(', "'on_keyerr={} must be error or default'", '.', 'format', '(', 'on_keyerr', ')', ')', 'return', 'value_iter'] | Generates attributes values of specific edges
Args:
on_missing (str): Strategy for handling nodes missing from G.
Can be {'error', 'default'}. defaults to 'error'.
on_keyerr (str): Strategy for handling keys missing from node dicts.
Can be {'error', 'default'}. defaults to 'default'
if default is specified, otherwise defaults to 'error'. | ['Generates', 'attributes', 'values', 'of', 'specific', 'edges'] | train | https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_graph.py#L880-L916 |
9,487 | datastax/python-driver | cassandra/cqlengine/query.py | DMLQuery.save | def save(self):
"""
Creates / updates a row.
This is a blind insert call.
All validation and cleaning needs to happen
prior to calling this.
"""
if self.instance is None:
raise CQLEngineException("DML Query intance attribute is None")
assert type(self.instance) == self.model
nulled_fields = set()
if self.instance._has_counter or self.instance._can_update():
if self.instance._has_counter:
warn("'create' and 'save' actions on Counters are deprecated. It will be disallowed in 4.0. "
"Use the 'update' mechanism instead.", DeprecationWarning)
return self.update()
else:
insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, if_not_exists=self._if_not_exists)
static_save_only = False if len(self.instance._clustering_keys) == 0 else True
for name, col in self.instance._clustering_keys.items():
static_save_only = static_save_only and col._val_is_null(getattr(self.instance, name, None))
for name, col in self.instance._columns.items():
if static_save_only and not col.static and not col.partition_key:
continue
val = getattr(self.instance, name, None)
if col._val_is_null(val):
if self.instance._values[name].changed:
nulled_fields.add(col.db_field_name)
continue
if col.has_default and not self.instance._values[name].changed:
# Ensure default columns included in a save() are marked as explicit, to get them *persisted* properly
self.instance._values[name].explicit = True
insert.add_assignment(col, getattr(self.instance, name, None))
# skip query execution if it's empty
# caused by pointless update queries
if not insert.is_empty:
self._execute(insert)
# delete any nulled columns
if not static_save_only:
self._delete_null_columns() | python | def save(self):
"""
Creates / updates a row.
This is a blind insert call.
All validation and cleaning needs to happen
prior to calling this.
"""
if self.instance is None:
raise CQLEngineException("DML Query intance attribute is None")
assert type(self.instance) == self.model
nulled_fields = set()
if self.instance._has_counter or self.instance._can_update():
if self.instance._has_counter:
warn("'create' and 'save' actions on Counters are deprecated. It will be disallowed in 4.0. "
"Use the 'update' mechanism instead.", DeprecationWarning)
return self.update()
else:
insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, if_not_exists=self._if_not_exists)
static_save_only = False if len(self.instance._clustering_keys) == 0 else True
for name, col in self.instance._clustering_keys.items():
static_save_only = static_save_only and col._val_is_null(getattr(self.instance, name, None))
for name, col in self.instance._columns.items():
if static_save_only and not col.static and not col.partition_key:
continue
val = getattr(self.instance, name, None)
if col._val_is_null(val):
if self.instance._values[name].changed:
nulled_fields.add(col.db_field_name)
continue
if col.has_default and not self.instance._values[name].changed:
# Ensure default columns included in a save() are marked as explicit, to get them *persisted* properly
self.instance._values[name].explicit = True
insert.add_assignment(col, getattr(self.instance, name, None))
# skip query execution if it's empty
# caused by pointless update queries
if not insert.is_empty:
self._execute(insert)
# delete any nulled columns
if not static_save_only:
self._delete_null_columns() | ['def', 'save', '(', 'self', ')', ':', 'if', 'self', '.', 'instance', 'is', 'None', ':', 'raise', 'CQLEngineException', '(', '"DML Query intance attribute is None"', ')', 'assert', 'type', '(', 'self', '.', 'instance', ')', '==', 'self', '.', 'model', 'nulled_fields', '=', 'set', '(', ')', 'if', 'self', '.', 'instance', '.', '_has_counter', 'or', 'self', '.', 'instance', '.', '_can_update', '(', ')', ':', 'if', 'self', '.', 'instance', '.', '_has_counter', ':', 'warn', '(', '"\'create\' and \'save\' actions on Counters are deprecated. It will be disallowed in 4.0. "', '"Use the \'update\' mechanism instead."', ',', 'DeprecationWarning', ')', 'return', 'self', '.', 'update', '(', ')', 'else', ':', 'insert', '=', 'InsertStatement', '(', 'self', '.', 'column_family_name', ',', 'ttl', '=', 'self', '.', '_ttl', ',', 'timestamp', '=', 'self', '.', '_timestamp', ',', 'if_not_exists', '=', 'self', '.', '_if_not_exists', ')', 'static_save_only', '=', 'False', 'if', 'len', '(', 'self', '.', 'instance', '.', '_clustering_keys', ')', '==', '0', 'else', 'True', 'for', 'name', ',', 'col', 'in', 'self', '.', 'instance', '.', '_clustering_keys', '.', 'items', '(', ')', ':', 'static_save_only', '=', 'static_save_only', 'and', 'col', '.', '_val_is_null', '(', 'getattr', '(', 'self', '.', 'instance', ',', 'name', ',', 'None', ')', ')', 'for', 'name', ',', 'col', 'in', 'self', '.', 'instance', '.', '_columns', '.', 'items', '(', ')', ':', 'if', 'static_save_only', 'and', 'not', 'col', '.', 'static', 'and', 'not', 'col', '.', 'partition_key', ':', 'continue', 'val', '=', 'getattr', '(', 'self', '.', 'instance', ',', 'name', ',', 'None', ')', 'if', 'col', '.', '_val_is_null', '(', 'val', ')', ':', 'if', 'self', '.', 'instance', '.', '_values', '[', 'name', ']', '.', 'changed', ':', 'nulled_fields', '.', 'add', '(', 'col', '.', 'db_field_name', ')', 'continue', 'if', 'col', '.', 'has_default', 'and', 'not', 'self', '.', 'instance', '.', '_values', '[', 'name', ']', '.', 'changed', ':', '# Ensure default columns included in a save() are marked as explicit, to get them *persisted* properly', 'self', '.', 'instance', '.', '_values', '[', 'name', ']', '.', 'explicit', '=', 'True', 'insert', '.', 'add_assignment', '(', 'col', ',', 'getattr', '(', 'self', '.', 'instance', ',', 'name', ',', 'None', ')', ')', "# skip query execution if it's empty", '# caused by pointless update queries', 'if', 'not', 'insert', '.', 'is_empty', ':', 'self', '.', '_execute', '(', 'insert', ')', '# delete any nulled columns', 'if', 'not', 'static_save_only', ':', 'self', '.', '_delete_null_columns', '(', ')'] | Creates / updates a row.
This is a blind insert call.
All validation and cleaning needs to happen
prior to calling this. | ['Creates', '/', 'updates', 'a', 'row', '.', 'This', 'is', 'a', 'blind', 'insert', 'call', '.', 'All', 'validation', 'and', 'cleaning', 'needs', 'to', 'happen', 'prior', 'to', 'calling', 'this', '.'] | train | https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cqlengine/query.py#L1464-L1505 |
9,488 | SheffieldML/GPyOpt | GPyOpt/interface/driver.py | BODriver._get_obj | def _get_obj(self,space):
"""
Imports the acquisition function.
"""
obj_func = self.obj_func
from ..core.task import SingleObjective
return SingleObjective(obj_func, self.config['resources']['cores'], space=space, unfold_args=True) | python | def _get_obj(self,space):
"""
Imports the acquisition function.
"""
obj_func = self.obj_func
from ..core.task import SingleObjective
return SingleObjective(obj_func, self.config['resources']['cores'], space=space, unfold_args=True) | ['def', '_get_obj', '(', 'self', ',', 'space', ')', ':', 'obj_func', '=', 'self', '.', 'obj_func', 'from', '.', '.', 'core', '.', 'task', 'import', 'SingleObjective', 'return', 'SingleObjective', '(', 'obj_func', ',', 'self', '.', 'config', '[', "'resources'", ']', '[', "'cores'", ']', ',', 'space', '=', 'space', ',', 'unfold_args', '=', 'True', ')'] | Imports the acquisition function. | ['Imports', 'the', 'acquisition', 'function', '.'] | train | https://github.com/SheffieldML/GPyOpt/blob/255539dc5927819ca701e44fe3d76cd4864222fa/GPyOpt/interface/driver.py#L24-L32 |
9,489 | tensorpack/tensorpack | examples/GAN/InfoGAN-mnist.py | shapeless_placeholder | def shapeless_placeholder(x, axis, name):
"""
Make the static shape of a tensor less specific.
If you want to feed to a tensor, the shape of the feed value must match
the tensor's static shape. This function creates a placeholder which
defaults to x if not fed, but has a less specific static shape than x.
See also `tensorflow#5680 <https://github.com/tensorflow/tensorflow/issues/5680>`_.
Args:
x: a tensor
axis(int or list of ints): these axes of ``x.get_shape()`` will become
None in the output.
name(str): name of the output tensor
Returns:
a tensor equal to x, but shape information is partially cleared.
"""
shp = x.get_shape().as_list()
if not isinstance(axis, list):
axis = [axis]
for a in axis:
if shp[a] is None:
raise ValueError("Axis {} of shape {} is already unknown!".format(a, shp))
shp[a] = None
x = tf.placeholder_with_default(x, shape=shp, name=name)
return x | python | def shapeless_placeholder(x, axis, name):
"""
Make the static shape of a tensor less specific.
If you want to feed to a tensor, the shape of the feed value must match
the tensor's static shape. This function creates a placeholder which
defaults to x if not fed, but has a less specific static shape than x.
See also `tensorflow#5680 <https://github.com/tensorflow/tensorflow/issues/5680>`_.
Args:
x: a tensor
axis(int or list of ints): these axes of ``x.get_shape()`` will become
None in the output.
name(str): name of the output tensor
Returns:
a tensor equal to x, but shape information is partially cleared.
"""
shp = x.get_shape().as_list()
if not isinstance(axis, list):
axis = [axis]
for a in axis:
if shp[a] is None:
raise ValueError("Axis {} of shape {} is already unknown!".format(a, shp))
shp[a] = None
x = tf.placeholder_with_default(x, shape=shp, name=name)
return x | ['def', 'shapeless_placeholder', '(', 'x', ',', 'axis', ',', 'name', ')', ':', 'shp', '=', 'x', '.', 'get_shape', '(', ')', '.', 'as_list', '(', ')', 'if', 'not', 'isinstance', '(', 'axis', ',', 'list', ')', ':', 'axis', '=', '[', 'axis', ']', 'for', 'a', 'in', 'axis', ':', 'if', 'shp', '[', 'a', ']', 'is', 'None', ':', 'raise', 'ValueError', '(', '"Axis {} of shape {} is already unknown!"', '.', 'format', '(', 'a', ',', 'shp', ')', ')', 'shp', '[', 'a', ']', '=', 'None', 'x', '=', 'tf', '.', 'placeholder_with_default', '(', 'x', ',', 'shape', '=', 'shp', ',', 'name', '=', 'name', ')', 'return', 'x'] | Make the static shape of a tensor less specific.
If you want to feed to a tensor, the shape of the feed value must match
the tensor's static shape. This function creates a placeholder which
defaults to x if not fed, but has a less specific static shape than x.
See also `tensorflow#5680 <https://github.com/tensorflow/tensorflow/issues/5680>`_.
Args:
x: a tensor
axis(int or list of ints): these axes of ``x.get_shape()`` will become
None in the output.
name(str): name of the output tensor
Returns:
a tensor equal to x, but shape information is partially cleared. | ['Make', 'the', 'static', 'shape', 'of', 'a', 'tensor', 'less', 'specific', '.'] | train | https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/examples/GAN/InfoGAN-mnist.py#L40-L66 |
9,490 | Cornices/cornice.ext.sphinx | cornice_sphinx/__init__.py | ServiceDirective._get_attributes | def _get_attributes(schema, location):
"""Return the schema's children, filtered by location."""
schema = DottedNameResolver(__name__).maybe_resolve(schema)
def _filter(attr):
if not hasattr(attr, "location"):
valid_location = 'body' in location
else:
valid_location = attr.location in to_list(location)
return valid_location
return list(filter(_filter, schema().children)) | python | def _get_attributes(schema, location):
"""Return the schema's children, filtered by location."""
schema = DottedNameResolver(__name__).maybe_resolve(schema)
def _filter(attr):
if not hasattr(attr, "location"):
valid_location = 'body' in location
else:
valid_location = attr.location in to_list(location)
return valid_location
return list(filter(_filter, schema().children)) | ['def', '_get_attributes', '(', 'schema', ',', 'location', ')', ':', 'schema', '=', 'DottedNameResolver', '(', '__name__', ')', '.', 'maybe_resolve', '(', 'schema', ')', 'def', '_filter', '(', 'attr', ')', ':', 'if', 'not', 'hasattr', '(', 'attr', ',', '"location"', ')', ':', 'valid_location', '=', "'body'", 'in', 'location', 'else', ':', 'valid_location', '=', 'attr', '.', 'location', 'in', 'to_list', '(', 'location', ')', 'return', 'valid_location', 'return', 'list', '(', 'filter', '(', '_filter', ',', 'schema', '(', ')', '.', 'children', ')', ')'] | Return the schema's children, filtered by location. | ['Return', 'the', 'schema', 's', 'children', 'filtered', 'by', 'location', '.'] | train | https://github.com/Cornices/cornice.ext.sphinx/blob/f73fdcc94d78fb5c94262adb9adc187c96378a53/cornice_sphinx/__init__.py#L117-L128 |
9,491 | mlperf/training | translation/tensorflow/transformer/utils/tokenizer.py | _save_vocab_file | def _save_vocab_file(vocab_file, subtoken_list):
"""Save subtokens to file."""
with tf.gfile.Open(vocab_file, mode="w") as f:
for subtoken in subtoken_list:
f.write("'%s'\n" % _unicode_to_native(subtoken)) | python | def _save_vocab_file(vocab_file, subtoken_list):
"""Save subtokens to file."""
with tf.gfile.Open(vocab_file, mode="w") as f:
for subtoken in subtoken_list:
f.write("'%s'\n" % _unicode_to_native(subtoken)) | ['def', '_save_vocab_file', '(', 'vocab_file', ',', 'subtoken_list', ')', ':', 'with', 'tf', '.', 'gfile', '.', 'Open', '(', 'vocab_file', ',', 'mode', '=', '"w"', ')', 'as', 'f', ':', 'for', 'subtoken', 'in', 'subtoken_list', ':', 'f', '.', 'write', '(', '"\'%s\'\\n"', '%', '_unicode_to_native', '(', 'subtoken', ')', ')'] | Save subtokens to file. | ['Save', 'subtokens', 'to', 'file', '.'] | train | https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/translation/tensorflow/transformer/utils/tokenizer.py#L185-L189 |
9,492 | StackStorm/pybind | pybind/slxos/v17r_1_01a/system_monitor/__init__.py | system_monitor._set_MM | def _set_MM(self, v, load=False):
"""
Setter method for MM, mapped from YANG variable /system_monitor/MM (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_MM is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_MM() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=MM.MM, is_container='container', presence=False, yang_name="MM", rest_name="MM", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure threshold setting for \n component:MM', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """MM must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=MM.MM, is_container='container', presence=False, yang_name="MM", rest_name="MM", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure threshold setting for \n component:MM', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)""",
})
self.__MM = t
if hasattr(self, '_set'):
self._set() | python | def _set_MM(self, v, load=False):
"""
Setter method for MM, mapped from YANG variable /system_monitor/MM (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_MM is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_MM() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=MM.MM, is_container='container', presence=False, yang_name="MM", rest_name="MM", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure threshold setting for \n component:MM', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """MM must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=MM.MM, is_container='container', presence=False, yang_name="MM", rest_name="MM", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure threshold setting for \n component:MM', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)""",
})
self.__MM = t
if hasattr(self, '_set'):
self._set() | ['def', '_set_MM', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'MM', '.', 'MM', ',', 'is_container', '=', "'container'", ',', 'presence', '=', 'False', ',', 'yang_name', '=', '"MM"', ',', 'rest_name', '=', '"MM"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Configure threshold setting for \\n component:MM'", ',', "u'cli-incomplete-no'", ':', 'None', '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-system-monitor'", ',', 'defining_module', '=', "'brocade-system-monitor'", ',', 'yang_type', '=', "'container'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""MM must be of a type compatible with container"""', ',', "'defined-type'", ':', '"container"', ',', "'generated-type'", ':', '"""YANGDynClass(base=MM.MM, is_container=\'container\', presence=False, yang_name="MM", rest_name="MM", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Configure threshold setting for \\n component:MM\', u\'cli-incomplete-no\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-system-monitor\', defining_module=\'brocade-system-monitor\', yang_type=\'container\', is_config=True)"""', ',', '}', ')', 'self', '.', '__MM', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')'] | Setter method for MM, mapped from YANG variable /system_monitor/MM (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_MM is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_MM() directly. | ['Setter', 'method', 'for', 'MM', 'mapped', 'from', 'YANG', 'variable', '/', 'system_monitor', '/', 'MM', '(', 'container', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_MM', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_MM', '()', 'directly', '.'] | train | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_1_01a/system_monitor/__init__.py#L275-L296 |
9,493 | mitsei/dlkit | dlkit/primordium/transport/objects.py | DataInputStream.read | def read(self, buf=None, n=None):
"""Reads a specified number of ``bytes`` from this stream.
arg: n (cardinal): the number of ``bytes`` to read
return: (integer) - the ``bytes`` read
raise: IllegalState - this stream has been closed or
``at_end_of_stream()`` is ``true``
raise: InvalidArgument - the size of ``buf`` is less than ``n``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
if n is not None:
return self._my_data.read(n)
else:
return self._my_data.read() | python | def read(self, buf=None, n=None):
"""Reads a specified number of ``bytes`` from this stream.
arg: n (cardinal): the number of ``bytes`` to read
return: (integer) - the ``bytes`` read
raise: IllegalState - this stream has been closed or
``at_end_of_stream()`` is ``true``
raise: InvalidArgument - the size of ``buf`` is less than ``n``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
if n is not None:
return self._my_data.read(n)
else:
return self._my_data.read() | ['def', 'read', '(', 'self', ',', 'buf', '=', 'None', ',', 'n', '=', 'None', ')', ':', 'if', 'n', 'is', 'not', 'None', ':', 'return', 'self', '.', '_my_data', '.', 'read', '(', 'n', ')', 'else', ':', 'return', 'self', '.', '_my_data', '.', 'read', '(', ')'] | Reads a specified number of ``bytes`` from this stream.
arg: n (cardinal): the number of ``bytes`` to read
return: (integer) - the ``bytes`` read
raise: IllegalState - this stream has been closed or
``at_end_of_stream()`` is ``true``
raise: InvalidArgument - the size of ``buf`` is less than ``n``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.* | ['Reads', 'a', 'specified', 'number', 'of', 'bytes', 'from', 'this', 'stream', '.'] | train | https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/primordium/transport/objects.py#L103-L118 |
9,494 | materialsproject/pymatgen | pymatgen/analysis/graphs.py | MoleculeGraph.with_empty_graph | def with_empty_graph(cls, molecule, name="bonds",
edge_weight_name=None,
edge_weight_units=None):
"""
Constructor for MoleculeGraph, returns a MoleculeGraph
object with an empty graph (no edges, only nodes defined
that correspond to Sites in Molecule).
:param molecule (Molecule):
:param name (str): name of graph, e.g. "bonds"
:param edge_weight_name (str): name of edge weights,
e.g. "bond_length" or "exchange_constant"
:param edge_weight_units (str): name of edge weight units
e.g. "Å" or "eV"
:return (MoleculeGraph):
"""
if edge_weight_name and (edge_weight_units is None):
raise ValueError("Please specify units associated "
"with your edge weights. Can be "
"empty string if arbitrary or "
"dimensionless.")
# construct graph with one node per site
# graph attributes don't change behavior of graph,
# they're just for book-keeping
graph = nx.MultiDiGraph(edge_weight_name=edge_weight_name,
edge_weight_units=edge_weight_units,
name=name)
graph.add_nodes_from(range(len(molecule)))
graph_data = json_graph.adjacency_data(graph)
return cls(molecule, graph_data=graph_data) | python | def with_empty_graph(cls, molecule, name="bonds",
edge_weight_name=None,
edge_weight_units=None):
"""
Constructor for MoleculeGraph, returns a MoleculeGraph
object with an empty graph (no edges, only nodes defined
that correspond to Sites in Molecule).
:param molecule (Molecule):
:param name (str): name of graph, e.g. "bonds"
:param edge_weight_name (str): name of edge weights,
e.g. "bond_length" or "exchange_constant"
:param edge_weight_units (str): name of edge weight units
e.g. "Å" or "eV"
:return (MoleculeGraph):
"""
if edge_weight_name and (edge_weight_units is None):
raise ValueError("Please specify units associated "
"with your edge weights. Can be "
"empty string if arbitrary or "
"dimensionless.")
# construct graph with one node per site
# graph attributes don't change behavior of graph,
# they're just for book-keeping
graph = nx.MultiDiGraph(edge_weight_name=edge_weight_name,
edge_weight_units=edge_weight_units,
name=name)
graph.add_nodes_from(range(len(molecule)))
graph_data = json_graph.adjacency_data(graph)
return cls(molecule, graph_data=graph_data) | ['def', 'with_empty_graph', '(', 'cls', ',', 'molecule', ',', 'name', '=', '"bonds"', ',', 'edge_weight_name', '=', 'None', ',', 'edge_weight_units', '=', 'None', ')', ':', 'if', 'edge_weight_name', 'and', '(', 'edge_weight_units', 'is', 'None', ')', ':', 'raise', 'ValueError', '(', '"Please specify units associated "', '"with your edge weights. Can be "', '"empty string if arbitrary or "', '"dimensionless."', ')', '# construct graph with one node per site', "# graph attributes don't change behavior of graph,", "# they're just for book-keeping", 'graph', '=', 'nx', '.', 'MultiDiGraph', '(', 'edge_weight_name', '=', 'edge_weight_name', ',', 'edge_weight_units', '=', 'edge_weight_units', ',', 'name', '=', 'name', ')', 'graph', '.', 'add_nodes_from', '(', 'range', '(', 'len', '(', 'molecule', ')', ')', ')', 'graph_data', '=', 'json_graph', '.', 'adjacency_data', '(', 'graph', ')', 'return', 'cls', '(', 'molecule', ',', 'graph_data', '=', 'graph_data', ')'] | Constructor for MoleculeGraph, returns a MoleculeGraph
object with an empty graph (no edges, only nodes defined
that correspond to Sites in Molecule).
:param molecule (Molecule):
:param name (str): name of graph, e.g. "bonds"
:param edge_weight_name (str): name of edge weights,
e.g. "bond_length" or "exchange_constant"
:param edge_weight_units (str): name of edge weight units
e.g. "Å" or "eV"
:return (MoleculeGraph): | ['Constructor', 'for', 'MoleculeGraph', 'returns', 'a', 'MoleculeGraph', 'object', 'with', 'an', 'empty', 'graph', '(', 'no', 'edges', 'only', 'nodes', 'defined', 'that', 'correspond', 'to', 'Sites', 'in', 'Molecule', ')', '.'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/graphs.py#L1519-L1552 |
9,495 | QUANTAXIS/QUANTAXIS | QUANTAXIS/QAFetch/QATdx.py | QA_fetch_get_future_transaction_realtime | def QA_fetch_get_future_transaction_realtime(code, ip=None, port=None):
'期货历史成交分笔'
ip, port = get_extensionmarket_ip(ip, port)
apix = TdxExHq_API()
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
code_market = extension_market_list.query(
'code=="{}"'.format(code)).iloc[0]
with apix.connect(ip, port):
data = pd.DataFrame()
data = pd.concat([apix.to_df(apix.get_transaction_data(
int(code_market.market), code, (30 - i) * 1800)) for i in range(31)], axis=0)
return data.assign(datetime=pd.to_datetime(data['date'])).assign(date=lambda x: str(x)[0:10]) \
.assign(code=str(code)).assign(order=range(len(data.index))).set_index('datetime', drop=False,
inplace=False) | python | def QA_fetch_get_future_transaction_realtime(code, ip=None, port=None):
'期货历史成交分笔'
ip, port = get_extensionmarket_ip(ip, port)
apix = TdxExHq_API()
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
code_market = extension_market_list.query(
'code=="{}"'.format(code)).iloc[0]
with apix.connect(ip, port):
data = pd.DataFrame()
data = pd.concat([apix.to_df(apix.get_transaction_data(
int(code_market.market), code, (30 - i) * 1800)) for i in range(31)], axis=0)
return data.assign(datetime=pd.to_datetime(data['date'])).assign(date=lambda x: str(x)[0:10]) \
.assign(code=str(code)).assign(order=range(len(data.index))).set_index('datetime', drop=False,
inplace=False) | ['def', 'QA_fetch_get_future_transaction_realtime', '(', 'code', ',', 'ip', '=', 'None', ',', 'port', '=', 'None', ')', ':', 'ip', ',', 'port', '=', 'get_extensionmarket_ip', '(', 'ip', ',', 'port', ')', 'apix', '=', 'TdxExHq_API', '(', ')', 'global', 'extension_market_list', 'extension_market_list', '=', 'QA_fetch_get_extensionmarket_list', '(', ')', 'if', 'extension_market_list', 'is', 'None', 'else', 'extension_market_list', 'code_market', '=', 'extension_market_list', '.', 'query', '(', '\'code=="{}"\'', '.', 'format', '(', 'code', ')', ')', '.', 'iloc', '[', '0', ']', 'with', 'apix', '.', 'connect', '(', 'ip', ',', 'port', ')', ':', 'data', '=', 'pd', '.', 'DataFrame', '(', ')', 'data', '=', 'pd', '.', 'concat', '(', '[', 'apix', '.', 'to_df', '(', 'apix', '.', 'get_transaction_data', '(', 'int', '(', 'code_market', '.', 'market', ')', ',', 'code', ',', '(', '30', '-', 'i', ')', '*', '1800', ')', ')', 'for', 'i', 'in', 'range', '(', '31', ')', ']', ',', 'axis', '=', '0', ')', 'return', 'data', '.', 'assign', '(', 'datetime', '=', 'pd', '.', 'to_datetime', '(', 'data', '[', "'date'", ']', ')', ')', '.', 'assign', '(', 'date', '=', 'lambda', 'x', ':', 'str', '(', 'x', ')', '[', '0', ':', '10', ']', ')', '.', 'assign', '(', 'code', '=', 'str', '(', 'code', ')', ')', '.', 'assign', '(', 'order', '=', 'range', '(', 'len', '(', 'data', '.', 'index', ')', ')', ')', '.', 'set_index', '(', "'datetime'", ',', 'drop', '=', 'False', ',', 'inplace', '=', 'False', ')'] | 期货历史成交分笔 | ['期货历史成交分笔'] | train | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAFetch/QATdx.py#L2112-L2128 |
9,496 | beerfactory/hbmqtt | hbmqtt/broker.py | Broker.authenticate | def authenticate(self, session: Session, listener):
"""
This method call the authenticate method on registered plugins to test user authentication.
User is considered authenticated if all plugins called returns True.
Plugins authenticate() method are supposed to return :
- True if user is authentication succeed
- False if user authentication fails
- None if authentication can't be achieved (then plugin result is then ignored)
:param session:
:param listener:
:return:
"""
auth_plugins = None
auth_config = self.config.get('auth', None)
if auth_config:
auth_plugins = auth_config.get('plugins', None)
returns = yield from self.plugins_manager.map_plugin_coro(
"authenticate",
session=session,
filter_plugins=auth_plugins)
auth_result = True
if returns:
for plugin in returns:
res = returns[plugin]
if res is False:
auth_result = False
self.logger.debug("Authentication failed due to '%s' plugin result: %s" % (plugin.name, res))
else:
self.logger.debug("'%s' plugin result: %s" % (plugin.name, res))
# If all plugins returned True, authentication is success
return auth_result | python | def authenticate(self, session: Session, listener):
"""
This method call the authenticate method on registered plugins to test user authentication.
User is considered authenticated if all plugins called returns True.
Plugins authenticate() method are supposed to return :
- True if user is authentication succeed
- False if user authentication fails
- None if authentication can't be achieved (then plugin result is then ignored)
:param session:
:param listener:
:return:
"""
auth_plugins = None
auth_config = self.config.get('auth', None)
if auth_config:
auth_plugins = auth_config.get('plugins', None)
returns = yield from self.plugins_manager.map_plugin_coro(
"authenticate",
session=session,
filter_plugins=auth_plugins)
auth_result = True
if returns:
for plugin in returns:
res = returns[plugin]
if res is False:
auth_result = False
self.logger.debug("Authentication failed due to '%s' plugin result: %s" % (plugin.name, res))
else:
self.logger.debug("'%s' plugin result: %s" % (plugin.name, res))
# If all plugins returned True, authentication is success
return auth_result | ['def', 'authenticate', '(', 'self', ',', 'session', ':', 'Session', ',', 'listener', ')', ':', 'auth_plugins', '=', 'None', 'auth_config', '=', 'self', '.', 'config', '.', 'get', '(', "'auth'", ',', 'None', ')', 'if', 'auth_config', ':', 'auth_plugins', '=', 'auth_config', '.', 'get', '(', "'plugins'", ',', 'None', ')', 'returns', '=', 'yield', 'from', 'self', '.', 'plugins_manager', '.', 'map_plugin_coro', '(', '"authenticate"', ',', 'session', '=', 'session', ',', 'filter_plugins', '=', 'auth_plugins', ')', 'auth_result', '=', 'True', 'if', 'returns', ':', 'for', 'plugin', 'in', 'returns', ':', 'res', '=', 'returns', '[', 'plugin', ']', 'if', 'res', 'is', 'False', ':', 'auth_result', '=', 'False', 'self', '.', 'logger', '.', 'debug', '(', '"Authentication failed due to \'%s\' plugin result: %s"', '%', '(', 'plugin', '.', 'name', ',', 'res', ')', ')', 'else', ':', 'self', '.', 'logger', '.', 'debug', '(', '"\'%s\' plugin result: %s"', '%', '(', 'plugin', '.', 'name', ',', 'res', ')', ')', '# If all plugins returned True, authentication is success', 'return', 'auth_result'] | This method call the authenticate method on registered plugins to test user authentication.
User is considered authenticated if all plugins called returns True.
Plugins authenticate() method are supposed to return :
- True if user is authentication succeed
- False if user authentication fails
- None if authentication can't be achieved (then plugin result is then ignored)
:param session:
:param listener:
:return: | ['This', 'method', 'call', 'the', 'authenticate', 'method', 'on', 'registered', 'plugins', 'to', 'test', 'user', 'authentication', '.', 'User', 'is', 'considered', 'authenticated', 'if', 'all', 'plugins', 'called', 'returns', 'True', '.', 'Plugins', 'authenticate', '()', 'method', 'are', 'supposed', 'to', 'return', ':', '-', 'True', 'if', 'user', 'is', 'authentication', 'succeed', '-', 'False', 'if', 'user', 'authentication', 'fails', '-', 'None', 'if', 'authentication', 'can', 't', 'be', 'achieved', '(', 'then', 'plugin', 'result', 'is', 'then', 'ignored', ')', ':', 'param', 'session', ':', ':', 'param', 'listener', ':', ':', 'return', ':'] | train | https://github.com/beerfactory/hbmqtt/blob/4aa6fe982141abc3c54e9f4d7b981ab3eba0a13c/hbmqtt/broker.py#L532-L562 |
9,497 | datastax/python-driver | cassandra/cluster.py | ControlConnection._set_new_connection | def _set_new_connection(self, conn):
"""
Replace existing connection (if there is one) and close it.
"""
with self._lock:
old = self._connection
self._connection = conn
if old:
log.debug("[control connection] Closing old connection %r, replacing with %r", old, conn)
old.close() | python | def _set_new_connection(self, conn):
"""
Replace existing connection (if there is one) and close it.
"""
with self._lock:
old = self._connection
self._connection = conn
if old:
log.debug("[control connection] Closing old connection %r, replacing with %r", old, conn)
old.close() | ['def', '_set_new_connection', '(', 'self', ',', 'conn', ')', ':', 'with', 'self', '.', '_lock', ':', 'old', '=', 'self', '.', '_connection', 'self', '.', '_connection', '=', 'conn', 'if', 'old', ':', 'log', '.', 'debug', '(', '"[control connection] Closing old connection %r, replacing with %r"', ',', 'old', ',', 'conn', ')', 'old', '.', 'close', '(', ')'] | Replace existing connection (if there is one) and close it. | ['Replace', 'existing', 'connection', '(', 'if', 'there', 'is', 'one', ')', 'and', 'close', 'it', '.'] | train | https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cluster.py#L2888-L2898 |
9,498 | mikhaildubov/AST-text-analysis | east/asts/ast_naive.py | NaiveAnnotatedSuffixTree._construct | def _construct(self, strings_collection):
"""
Naive generalized suffix tree construction algorithm,
with quadratic [O(n_1^2 + ... + n_m^2)] worst-case time complexity,
where m is the number of strings in collection.
"""
# 0. Add a unique character to each string in the collection,
# to preserve simplicity while building the tree
strings_collection = utils.make_unique_endings(strings_collection)
root = ast.AnnotatedSuffixTree.Node()
root.strings_collection = strings_collection
# For each string in the collection...
for string_ind in xrange(len(strings_collection)):
string = strings_collection[string_ind]
# For each suffix of that string...
# (do not handle unique last characters as suffixes)
for suffix_start in xrange(len(string)-1):
suffix = string[suffix_start:]
# ... first try to find maximal matching path
node = root
child_node = node.chose_arc(suffix)
while child_node:
(str_ind, substr_start, substr_end) = child_node.arc()
match = utils.match_strings(
suffix, strings_collection[str_ind][substr_start:substr_end])
if match == substr_end-substr_start:
# matched the arc, proceed with child node
suffix = suffix[match:]
suffix_start += match
node = child_node
node.weight += 1
child_node = node.chose_arc(suffix)
else:
# ... then, where the matching path ends;
# create new inner node
# (that's the only possible alternative
# since we have unique string endings)
node.remove_child(child_node)
new_node = node.add_new_child(string_ind, suffix_start,
suffix_start+match)
new_leaf = new_node.add_new_child(string_ind, suffix_start+match,
len(string))
(osi, oss, ose) = child_node._arc
child_node._arc = (osi, oss+match, ose)
new_node.add_child(child_node)
new_leaf.weight = 1
new_node.weight = 1 + child_node.weight
suffix = ''
break
# ... or create new leaf if there was no appropriate arc to proceed
if suffix:
new_leaf = node.add_new_child(string_ind, suffix_start, len(string))
new_leaf.weight = 1
# Root will also be annotated by the weight of its children,
# to preserve simplicity while calculating string matching
for k in root.children:
root.weight += root.children[k].weight
return root | python | def _construct(self, strings_collection):
"""
Naive generalized suffix tree construction algorithm,
with quadratic [O(n_1^2 + ... + n_m^2)] worst-case time complexity,
where m is the number of strings in collection.
"""
# 0. Add a unique character to each string in the collection,
# to preserve simplicity while building the tree
strings_collection = utils.make_unique_endings(strings_collection)
root = ast.AnnotatedSuffixTree.Node()
root.strings_collection = strings_collection
# For each string in the collection...
for string_ind in xrange(len(strings_collection)):
string = strings_collection[string_ind]
# For each suffix of that string...
# (do not handle unique last characters as suffixes)
for suffix_start in xrange(len(string)-1):
suffix = string[suffix_start:]
# ... first try to find maximal matching path
node = root
child_node = node.chose_arc(suffix)
while child_node:
(str_ind, substr_start, substr_end) = child_node.arc()
match = utils.match_strings(
suffix, strings_collection[str_ind][substr_start:substr_end])
if match == substr_end-substr_start:
# matched the arc, proceed with child node
suffix = suffix[match:]
suffix_start += match
node = child_node
node.weight += 1
child_node = node.chose_arc(suffix)
else:
# ... then, where the matching path ends;
# create new inner node
# (that's the only possible alternative
# since we have unique string endings)
node.remove_child(child_node)
new_node = node.add_new_child(string_ind, suffix_start,
suffix_start+match)
new_leaf = new_node.add_new_child(string_ind, suffix_start+match,
len(string))
(osi, oss, ose) = child_node._arc
child_node._arc = (osi, oss+match, ose)
new_node.add_child(child_node)
new_leaf.weight = 1
new_node.weight = 1 + child_node.weight
suffix = ''
break
# ... or create new leaf if there was no appropriate arc to proceed
if suffix:
new_leaf = node.add_new_child(string_ind, suffix_start, len(string))
new_leaf.weight = 1
# Root will also be annotated by the weight of its children,
# to preserve simplicity while calculating string matching
for k in root.children:
root.weight += root.children[k].weight
return root | ['def', '_construct', '(', 'self', ',', 'strings_collection', ')', ':', '# 0. Add a unique character to each string in the collection,', '# to preserve simplicity while building the tree', 'strings_collection', '=', 'utils', '.', 'make_unique_endings', '(', 'strings_collection', ')', 'root', '=', 'ast', '.', 'AnnotatedSuffixTree', '.', 'Node', '(', ')', 'root', '.', 'strings_collection', '=', 'strings_collection', '# For each string in the collection...', 'for', 'string_ind', 'in', 'xrange', '(', 'len', '(', 'strings_collection', ')', ')', ':', 'string', '=', 'strings_collection', '[', 'string_ind', ']', '# For each suffix of that string...', '# (do not handle unique last characters as suffixes)', 'for', 'suffix_start', 'in', 'xrange', '(', 'len', '(', 'string', ')', '-', '1', ')', ':', 'suffix', '=', 'string', '[', 'suffix_start', ':', ']', '# ... first try to find maximal matching path', 'node', '=', 'root', 'child_node', '=', 'node', '.', 'chose_arc', '(', 'suffix', ')', 'while', 'child_node', ':', '(', 'str_ind', ',', 'substr_start', ',', 'substr_end', ')', '=', 'child_node', '.', 'arc', '(', ')', 'match', '=', 'utils', '.', 'match_strings', '(', 'suffix', ',', 'strings_collection', '[', 'str_ind', ']', '[', 'substr_start', ':', 'substr_end', ']', ')', 'if', 'match', '==', 'substr_end', '-', 'substr_start', ':', '# matched the arc, proceed with child node', 'suffix', '=', 'suffix', '[', 'match', ':', ']', 'suffix_start', '+=', 'match', 'node', '=', 'child_node', 'node', '.', 'weight', '+=', '1', 'child_node', '=', 'node', '.', 'chose_arc', '(', 'suffix', ')', 'else', ':', '# ... then, where the matching path ends;', '# create new inner node', "# (that's the only possible alternative", '# since we have unique string endings)', 'node', '.', 'remove_child', '(', 'child_node', ')', 'new_node', '=', 'node', '.', 'add_new_child', '(', 'string_ind', ',', 'suffix_start', ',', 'suffix_start', '+', 'match', ')', 'new_leaf', '=', 'new_node', '.', 'add_new_child', '(', 'string_ind', ',', 'suffix_start', '+', 'match', ',', 'len', '(', 'string', ')', ')', '(', 'osi', ',', 'oss', ',', 'ose', ')', '=', 'child_node', '.', '_arc', 'child_node', '.', '_arc', '=', '(', 'osi', ',', 'oss', '+', 'match', ',', 'ose', ')', 'new_node', '.', 'add_child', '(', 'child_node', ')', 'new_leaf', '.', 'weight', '=', '1', 'new_node', '.', 'weight', '=', '1', '+', 'child_node', '.', 'weight', 'suffix', '=', "''", 'break', '# ... or create new leaf if there was no appropriate arc to proceed', 'if', 'suffix', ':', 'new_leaf', '=', 'node', '.', 'add_new_child', '(', 'string_ind', ',', 'suffix_start', ',', 'len', '(', 'string', ')', ')', 'new_leaf', '.', 'weight', '=', '1', '# Root will also be annotated by the weight of its children,', '# to preserve simplicity while calculating string matching', 'for', 'k', 'in', 'root', '.', 'children', ':', 'root', '.', 'weight', '+=', 'root', '.', 'children', '[', 'k', ']', '.', 'weight', 'return', 'root'] | Naive generalized suffix tree construction algorithm,
with quadratic [O(n_1^2 + ... + n_m^2)] worst-case time complexity,
where m is the number of strings in collection. | ['Naive', 'generalized', 'suffix', 'tree', 'construction', 'algorithm', 'with', 'quadratic', '[', 'O', '(', 'n_1^2', '+', '...', '+', 'n_m^2', ')', ']', 'worst', '-', 'case', 'time', 'complexity', 'where', 'm', 'is', 'the', 'number', 'of', 'strings', 'in', 'collection', '.'] | train | https://github.com/mikhaildubov/AST-text-analysis/blob/055ad8d2492c100bbbaa25309ec1074bdf1dfaa5/east/asts/ast_naive.py#L12-L76 |
9,499 | bsmurphy/PyKrige | pykrige/core.py | _krige | def _krige(X, y, coords, variogram_function,
variogram_model_parameters, coordinates_type):
"""Sets up and solves the ordinary kriging system for the given
coordinate pair. This function is only used for the statistics calculations.
Parameters
----------
X: ndarray
float array [n_samples, n_dim], the input array of coordinates
y: ndarray
float array [n_samples], the input array of measurement values
coords: ndarray
float array [1, n_dim], point at which to evaluate the kriging system
variogram_function: callable
function that will be called to evaluate variogram model
variogram_model_parameters: list
user-specified parameters for variogram model
coordinates_type: str
type of coordinates in X array, can be 'euclidean' for standard
rectangular coordinates or 'geographic' if the coordinates are lat/lon
Returns
-------
zinterp: float
kriging estimate at the specified point
sigmasq: float
mean square error of the kriging estimate
"""
zero_index = None
zero_value = False
# calculate distance between points... need a square distance matrix
# of inter-measurement-point distances and a vector of distances between
# measurement points (X) and the kriging point (coords)
if coordinates_type == 'euclidean':
d = squareform(pdist(X, metric='euclidean'))
bd = np.squeeze(cdist(X, coords[None, :], metric='euclidean'))
# geographic coordinate distances still calculated in the old way...
# assume X[:, 0] ('x') => lon, X[:, 1] ('y') => lat
# also assume problem is 2D; check done earlier in initializing variogram
elif coordinates_type == 'geographic':
x1, x2 = np.meshgrid(X[:, 0], X[:, 0], sparse=True)
y1, y2 = np.meshgrid(X[:, 1], X[:, 1], sparse=True)
d = great_circle_distance(x1, y1, x2, y2)
bd = great_circle_distance(X[:, 0], X[:, 1],
coords[0] * np.ones(X.shape[0]),
coords[1] * np.ones(X.shape[0]))
# this check is done when initializing variogram, but kept here anyways...
else:
raise ValueError("Specified coordinate type '%s' "
"is not supported." % coordinates_type)
# check if kriging point overlaps with measurement point
if np.any(np.absolute(bd) <= 1e-10):
zero_value = True
zero_index = np.where(bd <= 1e-10)[0][0]
# set up kriging matrix
n = X.shape[0]
a = np.zeros((n+1, n+1))
a[:n, :n] = - variogram_function(variogram_model_parameters, d)
np.fill_diagonal(a, 0.0)
a[n, :] = 1.0
a[:, n] = 1.0
a[n, n] = 0.0
# set up RHS
b = np.zeros((n+1, 1))
b[:n, 0] = - variogram_function(variogram_model_parameters, bd)
if zero_value:
b[zero_index, 0] = 0.0
b[n, 0] = 1.0
# solve
res = np.linalg.solve(a, b)
zinterp = np.sum(res[:n, 0] * y)
sigmasq = np.sum(res[:, 0] * -b[:, 0])
return zinterp, sigmasq | python | def _krige(X, y, coords, variogram_function,
variogram_model_parameters, coordinates_type):
"""Sets up and solves the ordinary kriging system for the given
coordinate pair. This function is only used for the statistics calculations.
Parameters
----------
X: ndarray
float array [n_samples, n_dim], the input array of coordinates
y: ndarray
float array [n_samples], the input array of measurement values
coords: ndarray
float array [1, n_dim], point at which to evaluate the kriging system
variogram_function: callable
function that will be called to evaluate variogram model
variogram_model_parameters: list
user-specified parameters for variogram model
coordinates_type: str
type of coordinates in X array, can be 'euclidean' for standard
rectangular coordinates or 'geographic' if the coordinates are lat/lon
Returns
-------
zinterp: float
kriging estimate at the specified point
sigmasq: float
mean square error of the kriging estimate
"""
zero_index = None
zero_value = False
# calculate distance between points... need a square distance matrix
# of inter-measurement-point distances and a vector of distances between
# measurement points (X) and the kriging point (coords)
if coordinates_type == 'euclidean':
d = squareform(pdist(X, metric='euclidean'))
bd = np.squeeze(cdist(X, coords[None, :], metric='euclidean'))
# geographic coordinate distances still calculated in the old way...
# assume X[:, 0] ('x') => lon, X[:, 1] ('y') => lat
# also assume problem is 2D; check done earlier in initializing variogram
elif coordinates_type == 'geographic':
x1, x2 = np.meshgrid(X[:, 0], X[:, 0], sparse=True)
y1, y2 = np.meshgrid(X[:, 1], X[:, 1], sparse=True)
d = great_circle_distance(x1, y1, x2, y2)
bd = great_circle_distance(X[:, 0], X[:, 1],
coords[0] * np.ones(X.shape[0]),
coords[1] * np.ones(X.shape[0]))
# this check is done when initializing variogram, but kept here anyways...
else:
raise ValueError("Specified coordinate type '%s' "
"is not supported." % coordinates_type)
# check if kriging point overlaps with measurement point
if np.any(np.absolute(bd) <= 1e-10):
zero_value = True
zero_index = np.where(bd <= 1e-10)[0][0]
# set up kriging matrix
n = X.shape[0]
a = np.zeros((n+1, n+1))
a[:n, :n] = - variogram_function(variogram_model_parameters, d)
np.fill_diagonal(a, 0.0)
a[n, :] = 1.0
a[:, n] = 1.0
a[n, n] = 0.0
# set up RHS
b = np.zeros((n+1, 1))
b[:n, 0] = - variogram_function(variogram_model_parameters, bd)
if zero_value:
b[zero_index, 0] = 0.0
b[n, 0] = 1.0
# solve
res = np.linalg.solve(a, b)
zinterp = np.sum(res[:n, 0] * y)
sigmasq = np.sum(res[:, 0] * -b[:, 0])
return zinterp, sigmasq | ['def', '_krige', '(', 'X', ',', 'y', ',', 'coords', ',', 'variogram_function', ',', 'variogram_model_parameters', ',', 'coordinates_type', ')', ':', 'zero_index', '=', 'None', 'zero_value', '=', 'False', '# calculate distance between points... need a square distance matrix', '# of inter-measurement-point distances and a vector of distances between', '# measurement points (X) and the kriging point (coords)', 'if', 'coordinates_type', '==', "'euclidean'", ':', 'd', '=', 'squareform', '(', 'pdist', '(', 'X', ',', 'metric', '=', "'euclidean'", ')', ')', 'bd', '=', 'np', '.', 'squeeze', '(', 'cdist', '(', 'X', ',', 'coords', '[', 'None', ',', ':', ']', ',', 'metric', '=', "'euclidean'", ')', ')', '# geographic coordinate distances still calculated in the old way...', "# assume X[:, 0] ('x') => lon, X[:, 1] ('y') => lat", '# also assume problem is 2D; check done earlier in initializing variogram', 'elif', 'coordinates_type', '==', "'geographic'", ':', 'x1', ',', 'x2', '=', 'np', '.', 'meshgrid', '(', 'X', '[', ':', ',', '0', ']', ',', 'X', '[', ':', ',', '0', ']', ',', 'sparse', '=', 'True', ')', 'y1', ',', 'y2', '=', 'np', '.', 'meshgrid', '(', 'X', '[', ':', ',', '1', ']', ',', 'X', '[', ':', ',', '1', ']', ',', 'sparse', '=', 'True', ')', 'd', '=', 'great_circle_distance', '(', 'x1', ',', 'y1', ',', 'x2', ',', 'y2', ')', 'bd', '=', 'great_circle_distance', '(', 'X', '[', ':', ',', '0', ']', ',', 'X', '[', ':', ',', '1', ']', ',', 'coords', '[', '0', ']', '*', 'np', '.', 'ones', '(', 'X', '.', 'shape', '[', '0', ']', ')', ',', 'coords', '[', '1', ']', '*', 'np', '.', 'ones', '(', 'X', '.', 'shape', '[', '0', ']', ')', ')', '# this check is done when initializing variogram, but kept here anyways...', 'else', ':', 'raise', 'ValueError', '(', '"Specified coordinate type \'%s\' "', '"is not supported."', '%', 'coordinates_type', ')', '# check if kriging point overlaps with measurement point', 'if', 'np', '.', 'any', '(', 'np', '.', 'absolute', '(', 'bd', ')', '<=', '1e-10', ')', ':', 'zero_value', '=', 'True', 'zero_index', '=', 'np', '.', 'where', '(', 'bd', '<=', '1e-10', ')', '[', '0', ']', '[', '0', ']', '# set up kriging matrix', 'n', '=', 'X', '.', 'shape', '[', '0', ']', 'a', '=', 'np', '.', 'zeros', '(', '(', 'n', '+', '1', ',', 'n', '+', '1', ')', ')', 'a', '[', ':', 'n', ',', ':', 'n', ']', '=', '-', 'variogram_function', '(', 'variogram_model_parameters', ',', 'd', ')', 'np', '.', 'fill_diagonal', '(', 'a', ',', '0.0', ')', 'a', '[', 'n', ',', ':', ']', '=', '1.0', 'a', '[', ':', ',', 'n', ']', '=', '1.0', 'a', '[', 'n', ',', 'n', ']', '=', '0.0', '# set up RHS', 'b', '=', 'np', '.', 'zeros', '(', '(', 'n', '+', '1', ',', '1', ')', ')', 'b', '[', ':', 'n', ',', '0', ']', '=', '-', 'variogram_function', '(', 'variogram_model_parameters', ',', 'bd', ')', 'if', 'zero_value', ':', 'b', '[', 'zero_index', ',', '0', ']', '=', '0.0', 'b', '[', 'n', ',', '0', ']', '=', '1.0', '# solve', 'res', '=', 'np', '.', 'linalg', '.', 'solve', '(', 'a', ',', 'b', ')', 'zinterp', '=', 'np', '.', 'sum', '(', 'res', '[', ':', 'n', ',', '0', ']', '*', 'y', ')', 'sigmasq', '=', 'np', '.', 'sum', '(', 'res', '[', ':', ',', '0', ']', '*', '-', 'b', '[', ':', ',', '0', ']', ')', 'return', 'zinterp', ',', 'sigmasq'] | Sets up and solves the ordinary kriging system for the given
coordinate pair. This function is only used for the statistics calculations.
Parameters
----------
X: ndarray
float array [n_samples, n_dim], the input array of coordinates
y: ndarray
float array [n_samples], the input array of measurement values
coords: ndarray
float array [1, n_dim], point at which to evaluate the kriging system
variogram_function: callable
function that will be called to evaluate variogram model
variogram_model_parameters: list
user-specified parameters for variogram model
coordinates_type: str
type of coordinates in X array, can be 'euclidean' for standard
rectangular coordinates or 'geographic' if the coordinates are lat/lon
Returns
-------
zinterp: float
kriging estimate at the specified point
sigmasq: float
mean square error of the kriging estimate | ['Sets', 'up', 'and', 'solves', 'the', 'ordinary', 'kriging', 'system', 'for', 'the', 'given', 'coordinate', 'pair', '.', 'This', 'function', 'is', 'only', 'used', 'for', 'the', 'statistics', 'calculations', '.'] | train | https://github.com/bsmurphy/PyKrige/blob/a4db3003b0b5688658c12faeb95a5a8b2b14b433/pykrige/core.py#L585-L666 |
Subsets and Splits