text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def op_at_code_loc(code, loc, opc):
"""Return the instruction name at code[loc] using
opc to look up instruction names. Returns 'got IndexError'
if code[loc] is invalid.
`code` is instruction bytecode, `loc` is an offset (integer) and
`opc` is an opcode module from `xdis`.
"""
try:
op = code[loc]
except IndexError:
return 'got IndexError'
return opc.opname[op] | 0.00241 |
def _resetID(self):
"""Reset all ID fields."""
# Dirty.. .=))
self._setID((None,) * len(self._sqlPrimary))
self._new = True | 0.012903 |
def depth_limited_search(problem, limit=50):
"[Fig. 3.17]"
def recursive_dls(node, problem, limit):
if problem.goal_test(node.state):
return node
elif node.depth == limit:
return 'cutoff'
else:
cutoff_occurred = False
for child in node.expand(problem):
result = recursive_dls(child, problem, limit)
if result == 'cutoff':
cutoff_occurred = True
elif result is not None:
return result
return if_(cutoff_occurred, 'cutoff', None)
# Body of depth_limited_search:
return recursive_dls(Node(problem.initial), problem, limit) | 0.001412 |
def gen_tokens(self):
"""
>>> list(Program("ls").gen_tokens())
['ls']
>>> list(Program("ls -a").gen_tokens())
['ls', '-a']
>>> list(Program("cd /; pwd").gen_tokens())
['cd', '/', None, 'pwd']
>>> list(Program("'cd /; pwd'").gen_tokens())
['cd /; pwd']
"""
current_token = []
escape = False
quote = None
skip = 0
for char, peek in zip(self.text, self.text[1:] + " "):
if skip > 0:
skip -= 1
continue
if quote is None:
if escape:
current_token.append(char)
escape = False
elif char in self.escape_chars:
escape = True
elif char in QUOTES:
quote = char
elif char in self.metacharacters:
if current_token:
yield token.Word(''.join(current_token))
current_token = []
if char == "(":
yield token.LParen()
elif char == ")":
yield token.RParen()
elif char in "|&;":
if peek == char:
yield token.Word(char + peek)
skip += 1
else:
yield token.Word(char)
else:
current_token.append(char)
elif char == quote:
if current_token:
yield token.DoubleQuote(''.join(current_token))
current_token = []
quote = None
else:
current_token.append(char)
if quote is not None:
raise ValueError("No closing quotation")
if escape:
raise ValueError("No escaped character")
if current_token:
yield token.Word(''.join(current_token)) | 0.000986 |
def retry(*dargs, **dkw):
"""Wrap a function with a new `Retrying` object.
:param dargs: positional arguments passed to Retrying object
:param dkw: keyword arguments passed to the Retrying object
"""
# support both @retry and @retry() as valid syntax
if len(dargs) == 1 and callable(dargs[0]):
return retry()(dargs[0])
else:
def wrap(f):
if asyncio and asyncio.iscoroutinefunction(f):
r = AsyncRetrying(*dargs, **dkw)
elif tornado and hasattr(tornado.gen, 'is_coroutine_function') \
and tornado.gen.is_coroutine_function(f):
r = TornadoRetrying(*dargs, **dkw)
else:
r = Retrying(*dargs, **dkw)
return r.wraps(f)
return wrap | 0.001259 |
def get_negative_cycle(self):
'''
API:
get_negative_cycle(self)
Description:
Finds and returns negative cost cycle using 'cost' attribute of
arcs. Return value is a list of nodes representing cycle it is in
the following form; n_1-n_2-...-n_k, when the cycle has k nodes.
Pre:
Arcs should have 'cost' attribute.
Return:
Returns a list of nodes in the cycle if a negative cycle exists,
returns None otherwise.
'''
nl = self.get_node_list()
i = nl[0]
(valid, distance, nextn) = self.floyd_warshall()
if not valid:
cycle = self.floyd_warshall_get_cycle(distance, nextn)
return cycle
else:
return None | 0.002484 |
def last_error(self):
"""Get the output of the last command exevuted."""
if not len(self.log):
raise RuntimeError('Nothing executed')
try:
errs = [l for l in self.log if l[1] != 0]
return errs[-1][2]
except IndexError:
# odd case where there were no errors
#TODO
return 'no last error' | 0.01023 |
def warn(self, collection):
"""Checks this code element for documentation related problems."""
if not self.has_docstring():
collection.append("WARNING: no docstring on code element {}".format(self.name)) | 0.012987 |
def count_words(text, to_lower=True, delimiters=DEFAULT_DELIMITERS):
"""
If `text` is an SArray of strings or an SArray of lists of strings, the
occurances of word are counted for each row in the SArray.
If `text` is an SArray of dictionaries, the keys are tokenized and the
values are the counts. Counts for the same word, in the same row, are
added together.
This output is commonly known as the "bag-of-words" representation of text
data.
Parameters
----------
text : SArray[str | dict | list]
SArray of type: string, dict or list.
to_lower : bool, optional
If True, all strings are converted to lower case before counting.
delimiters : list[str], None, optional
Input strings are tokenized using `delimiters` characters in this list.
Each entry in this list must contain a single character. If set to
`None`, then a Penn treebank-style tokenization is used, which contains
smart handling of punctuations.
Returns
-------
out : SArray[dict]
An SArray with the same length as the`text` input. For each row, the keys
of the dictionary are the words and the values are the corresponding counts.
See Also
--------
count_ngrams, tf_idf, tokenize,
References
----------
- `Bag of words model <http://en.wikipedia.org/wiki/Bag-of-words_model>`_
- `Penn treebank tokenization <https://web.archive.org/web/19970614072242/http://www.cis.upenn.edu:80/~treebank/tokenization.html>`_
Examples
--------
.. sourcecode:: python
>>> import turicreate
# Create input data
>>> sa = turicreate.SArray(["The quick brown fox jumps.",
"Word word WORD, word!!!word"])
# Run count_words
>>> turicreate.text_analytics.count_words(sa)
dtype: dict
Rows: 2
[{'quick': 1, 'brown': 1, 'the': 1, 'fox': 1, 'jumps.': 1},
{'word,': 5}]
# Run count_words with Penn treebank style tokenization to handle
# punctuations
>>> turicreate.text_analytics.count_words(sa, delimiters=None)
dtype: dict
Rows: 2
[{'brown': 1, 'jumps': 1, 'fox': 1, '.': 1, 'quick': 1, 'the': 1},
{'word': 3, 'word!!!word': 1, ',': 1}]
# Run count_words with dictionary input
>>> sa = turicreate.SArray([{'alice bob': 1, 'Bob alice': 0.5},
{'a dog': 0, 'a dog cat': 5}])
>>> turicreate.text_analytics.count_words(sa)
dtype: dict
Rows: 2
[{'bob': 1.5, 'alice': 1.5}, {'a': 5, 'dog': 5, 'cat': 5}]
# Run count_words with list input
>>> sa = turicreate.SArray([['one', 'bar bah'], ['a dog', 'a dog cat']])
>>> turicreate.text_analytics.count_words(sa)
dtype: dict
Rows: 2
[{'bar': 1, 'bah': 1, 'one': 1}, {'a': 2, 'dog': 2, 'cat': 1}]
"""
_raise_error_if_not_sarray(text, "text")
## Compute word counts
sf = _turicreate.SFrame({'docs': text})
fe = _feature_engineering.WordCounter(features='docs',
to_lower=to_lower,
delimiters=delimiters,
output_column_prefix=None)
output_sf = fe.fit_transform(sf)
return output_sf['docs'] | 0.001751 |
def DynamicNestedSampler(loglikelihood, prior_transform, ndim,
bound='multi', sample='auto', periodic=None,
update_interval=None, first_update=None,
npdim=None, rstate=None, queue_size=None, pool=None,
use_pool=None, logl_args=None, logl_kwargs=None,
ptform_args=None, ptform_kwargs=None,
gradient=None, grad_args=None, grad_kwargs=None,
compute_jac=False,
enlarge=None, bootstrap=0,
vol_dec=0.5, vol_check=2.0,
walks=25, facc=0.5,
slices=5, fmove=0.9, max_move=100,
**kwargs):
"""
Initializes and returns a sampler object for Dynamic Nested Sampling.
Parameters
----------
loglikelihood : function
Function returning ln(likelihood) given parameters as a 1-d `~numpy`
array of length `ndim`.
prior_transform : function
Function translating a unit cube to the parameter space according to
the prior. The input is a 1-d `~numpy` array with length `ndim`, where
each value is in the range [0, 1). The return value should also be a
1-d `~numpy` array with length `ndim`, where each value is a parameter.
The return value is passed to the loglikelihood function. For example,
for a 2 parameter model with flat priors in the range [0, 2), the
function would be::
def prior_transform(u):
return 2.0 * u
ndim : int
Number of parameters returned by `prior_transform` and accepted by
`loglikelihood`.
bound : {`'none'`, `'single'`, `'multi'`, `'balls'`, `'cubes'`}, optional
Method used to approximately bound the prior using the current
set of live points. Conditions the sampling methods used to
propose new live points. Choices are no bound (`'none'`), a single
bounding ellipsoid (`'single'`), multiple bounding ellipsoids
(`'multi'`), balls centered on each live point (`'balls'`), and
cubes centered on each live point (`'cubes'`). Default is `'multi'`.
sample : {`'auto'`, `'unif'`, `'rwalk'`, `'rstagger'`,
`'slice'`, `'rslice'`, `'hslice'`}, optional
Method used to sample uniformly within the likelihood constraint,
conditioned on the provided bounds. Unique methods available are:
uniform sampling within the bounds(`'unif'`),
random walks with fixed proposals (`'rwalk'`),
random walks with variable ("staggering") proposals (`'rstagger'`),
multivariate slice sampling along preferred orientations (`'slice'`),
"random" slice sampling along all orientations (`'rslice'`), and
"Hamiltonian" slices along random trajectories (`'hslice'`).
`'auto'` selects the sampling method based on the dimensionality
of the problem (from `ndim`).
When `ndim < 10`, this defaults to `'unif'`.
When `10 <= ndim <= 20`, this defaults to `'rwalk'`.
When `ndim > 20`, this defaults to `'hslice'` if a `gradient` is
provided and `'slice'` otherwise. `'rstagger'` and `'rslice'`
are provided as alternatives for `'rwalk'` and `'slice'`, respectively.
Default is `'auto'`.
periodic : iterable, optional
A list of indices for parameters with periodic boundary conditions.
These parameters *will not* have their positions constrained to be
within the unit cube, enabling smooth behavior for parameters
that may wrap around the edge. It is assumed that their periodicity
is dealt with in the `prior_transform` and/or `loglikelihood`
functions. Default is `None` (i.e. no periodic boundary conditions).
update_interval : int or float, optional
If an integer is passed, only update the proposal distribution every
`update_interval`-th likelihood call. If a float is passed, update the
proposal after every `round(update_interval * nlive)`-th likelihood
call. Larger update intervals larger can be more efficient
when the likelihood function is quick to evaluate. Default behavior
is to target a roughly constant change in prior volume, with
`1.5` for `'unif'`, `0.15 * walks` for `'rwalk'` and `'rstagger'`,
`0.9 * ndim * slices` for `'slice'`, `2.0 * slices` for `'rslice'`,
and `25.0 * slices` for `'hslice'`.
first_update : dict, optional
A dictionary containing parameters governing when the sampler should
first update the bounding distribution from the unit cube (`'none'`)
to the one specified by `sample`. Options are the minimum number of
likelihood calls (`'min_ncall'`) and the minimum allowed overall
efficiency in percent (`'min_eff'`). Defaults are `2 * nlive` and
`10.`, respectively.
npdim : int, optional
Number of parameters accepted by `prior_transform`. This might differ
from `ndim` in the case where a parameter of loglikelihood is dependent
upon multiple independently distributed parameters, some of which may
be nuisance parameters.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance. If not given, the
global random state of the `~numpy.random` module will be used.
queue_size : int, optional
Carry out likelihood evaluations in parallel by queueing up new live
point proposals using (at most) `queue_size` many threads. Each thread
independently proposes new live points until the proposal distribution
is updated. If no value is passed, this defaults to `pool.size` (if
a `pool` has been provided) and `1` otherwise (no parallelism).
pool : user-provided pool, optional
Use this pool of workers to execute operations in parallel.
use_pool : dict, optional
A dictionary containing flags indicating where a pool should be used to
execute operations in parallel. These govern whether `prior_transform`
is executed in parallel during initialization (`'prior_transform'`),
`loglikelihood` is executed in parallel during initialization
(`'loglikelihood'`), live points are proposed in parallel during a run
(`'propose_point'`), bounding distributions are updated in
parallel during a run (`'update_bound'`), and the stopping criteria
is evaluated in parallel during a run (`'stop_function'`).
Default is `True` for all options.
logl_args : dict, optional
Additional arguments that can be passed to `loglikelihood`.
logl_kwargs : dict, optional
Additional keyword arguments that can be passed to `loglikelihood`.
ptform_args : dict, optional
Additional arguments that can be passed to `prior_transform`.
ptform_kwargs : dict, optional
Additional keyword arguments that can be passed to `prior_transform`.
gradient : function, optional
A function which returns the gradient corresponding to
the provided `loglikelihood` *with respect to the unit cube*.
If provided, this will be used when computing reflections
when sampling with `'hslice'`. If not provided, gradients are
approximated numerically using 2-sided differencing.
grad_args : dict, optional
Additional arguments that can be passed to `gradient`.
grad_kwargs : dict, optional
Additional keyword arguments that can be passed to `gradient`.
compute_jac : bool, optional
Whether to compute and apply the Jacobian `dv/du`
from the target space `v` to the unit cube `u` when evaluating the
`gradient`. If `False`, the gradient provided is assumed to be
already defined with respect to the unit cube. If `True`, the gradient
provided is assumed to be defined with respect to the target space
so the Jacobian needs to be numerically computed and applied. Default
is `False`.
enlarge : float, optional
Enlarge the volumes of the specified bounding object(s) by this
fraction. The preferred method is to determine this organically
using bootstrapping. If `bootstrap > 0`, this defaults to `1.0`.
If `bootstrap = 0`, this instead defaults to `1.25`.
bootstrap : int, optional
Compute this many bootstrapped realizations of the bounding
objects. Use the maximum distance found to the set of points left
out during each iteration to enlarge the resulting volumes. Can lead
to unstable bounding ellipsoids. Default is `0` (no bootstrap).
vol_dec : float, optional
For the `'multi'` bounding option, the required fractional reduction
in volume after splitting an ellipsoid in order to to accept the split.
Default is `0.5`.
vol_check : float, optional
For the `'multi'` bounding option, the factor used when checking if
the volume of the original bounding ellipsoid is large enough to
warrant `> 2` splits via `ell.vol > vol_check * nlive * pointvol`.
Default is `2.0`.
walks : int, optional
For the `'rwalk'` sampling option, the minimum number of steps
(minimum 2) before proposing a new live point. Default is `25`.
facc : float, optional
The target acceptance fraction for the `'rwalk'` sampling option.
Default is `0.5`. Bounded to be between `[1. / walks, 1.]`.
slices : int, optional
For the `'slice'`, `'rslice'`, and `'hslice'` sampling
options, the number of times to execute a "slice update"
before proposing a new live point. Default is `5`.
Note that `'slice'` cycles through **all dimensions**
when executing a "slice update".
fmove : float, optional
The target fraction of samples that are proposed along a trajectory
(i.e. not reflecting) for the `'hslice'` sampling option.
Default is `0.9`.
max_move : int, optional
The maximum number of timesteps allowed for `'hslice'`
per proposal forwards and backwards in time. Default is `100`.
Returns
-------
sampler : a :class:`dynesty.DynamicSampler` instance
An initialized instance of the dynamic nested sampler.
"""
# Prior dimensions.
if npdim is None:
npdim = ndim
# Bounding method.
if bound not in _SAMPLERS:
raise ValueError("Unknown bounding method: '{0}'".format(bound))
# Sampling method.
if sample == 'auto':
if npdim < 10:
sample = 'unif'
elif 10 <= npdim <= 20:
sample = 'rwalk'
else:
if gradient is None:
sample = 'slice'
else:
sample = 'hslice'
if sample not in _SAMPLING:
raise ValueError("Unknown sampling method: '{0}'".format(sample))
# Gather non-periodic boundary conditions.
if periodic is not None:
nonperiodic = np.ones(npdim, dtype='bool')
nonperiodic[periodic] = False
else:
nonperiodic = None
kwargs['nonperiodic'] = nonperiodic
# Update interval for bounds.
if update_interval is None:
if sample == 'unif':
update_interval = 1.5
elif sample == 'rwalk' or sample == 'rstagger':
update_interval = 0.15 * walks
elif sample == 'slice':
update_interval = 0.9 * npdim * slices
elif sample == 'rslice':
update_interval = 2.0 * slices
elif sample == 'hslice':
update_interval = 25.0 * slices
else:
raise ValueError("Unknown sampling method: '{0}'".format(sample))
if bound == 'none':
update_interval = sys.maxsize # no need to update with no bounds
# Keyword arguments controlling the first update.
if first_update is None:
first_update = dict()
# Random state.
if rstate is None:
rstate = np.random
# Log-likelihood.
if logl_args is None:
logl_args = []
if logl_kwargs is None:
logl_kwargs = {}
# Prior transform.
if ptform_args is None:
ptform_args = []
if ptform_kwargs is None:
ptform_kwargs = dict()
# gradient
if grad_args is None:
grad_args = []
if grad_kwargs is None:
grad_kwargs = {}
# Bounding distribution modifications.
if enlarge is not None:
kwargs['enlarge'] = enlarge
if bootstrap is not None:
kwargs['bootstrap'] = bootstrap
if vol_dec is not None:
kwargs['vol_dec'] = vol_dec
if vol_check is not None:
kwargs['vol_check'] = vol_check
# Sampling.
if walks is not None:
kwargs['walks'] = walks
if facc is not None:
kwargs['facc'] = facc
if slices is not None:
kwargs['slices'] = slices
if fmove is not None:
kwargs['fmove'] = fmove
if max_move is not None:
kwargs['max_move'] = max_move
# Set up parallel (or serial) evaluation.
if queue_size is not None and queue_size < 1:
raise ValueError("The queue must contain at least one element!")
elif (queue_size == 1) or (pool is None and queue_size is None):
queue_size = 1
elif pool is not None:
if queue_size is None:
try:
queue_size = pool.size
except:
raise ValueError("Cannot initialize `queue_size` because "
"`pool.size` has not been provided. Please "
"define `pool.size` or specify `queue_size` "
"explicitly.")
else:
raise ValueError("`queue_size > 1` but no `pool` provided.")
if use_pool is None:
use_pool = dict()
# Wrap functions.
ptform = _function_wrapper(prior_transform, ptform_args, ptform_kwargs,
name='prior_transform')
loglike = _function_wrapper(loglikelihood, logl_args, logl_kwargs,
name='loglikelihood')
# Add in gradient.
if gradient is not None:
grad = _function_wrapper(gradient, grad_args, grad_kwargs,
name='gradient')
kwargs['grad'] = grad
kwargs['compute_jac'] = compute_jac
# Initialize our nested sampler.
sampler = DynamicSampler(loglike, ptform, npdim,
bound, sample, update_interval, first_update,
rstate, queue_size, pool, use_pool, kwargs)
return sampler | 0.000136 |
def remap( x, oMin, oMax, nMin, nMax ):
"""Map to a 0 to 1 scale
http://stackoverflow.com/questions/929103/convert-a-number-range-to-another-range-maintaining-ratio
"""
#range check
if oMin == oMax:
log.warning("Zero input range, unable to rescale")
return x
if nMin == nMax:
log.warning("Zero output range, unable to rescale")
return x
#check reversed input range
reverseInput = False
oldMin = min( oMin, oMax )
oldMax = max( oMin, oMax )
if not oldMin == oMin:
reverseInput = True
#check reversed output range
reverseOutput = False
newMin = min( nMin, nMax )
newMax = max( nMin, nMax )
if not newMin == nMin :
reverseOutput = True
portion = (x-oldMin)*(newMax-newMin)/(oldMax-oldMin)
if reverseInput:
portion = (oldMax-x)*(newMax-newMin)/(oldMax-oldMin)
result = portion + newMin
if reverseOutput:
result = newMax - portion
return result | 0.015015 |
def main() -> None:
"""
Command-line handler for the ``find_bad_openxml`` tool.
Use the ``--help`` option for help.
"""
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
description="""
Tool to scan rescued Microsoft Office OpenXML files (produced by the
find_recovered_openxml.py tool in this kit; q.v.) and detect bad (corrupted)
ones.
"""
)
parser.add_argument(
"filename", nargs="*",
help="File(s) to check. You can also specify directores if you use "
"--recursive"
)
parser.add_argument(
"--filenames_from_stdin", "-x", action="store_true",
help="Take filenames from stdin instead, one line per filename "
"(useful for chained grep)."
)
parser.add_argument(
"--recursive", action="store_true",
help="Allow search to descend recursively into any directories "
"encountered."
)
parser.add_argument(
"--skip_files", nargs="*", default=[],
help="File pattern(s) to skip. You can specify wildcards like '*.txt' "
"(but you will have to enclose that pattern in quotes under "
"UNIX-like operating systems). The basename of each file will be "
"tested against these filenames/patterns. Consider including "
"Scalpel's 'audit.txt'."
)
parser.add_argument(
"--good", action="store_true",
help="List good files, not bad"
)
parser.add_argument(
"--delete_if_bad", action="store_true",
help="If a file is found to be bad, delete it. DANGEROUS."
)
parser.add_argument(
"--run_repeatedly", type=int,
help="Run the tool repeatedly with a pause of <run_repeatedly> "
"seconds between runs. (For this to work well with the move/"
"delete options, you should specify one or more DIRECTORIES in "
"the 'filename' arguments, not files, and you will need the "
"--recursive option.)"
)
parser.add_argument(
"--nprocesses", type=int, default=multiprocessing.cpu_count(),
help="Specify the number of processes to run in parallel."
)
parser.add_argument(
"--verbose", action="store_true",
help="Verbose output"
)
args = parser.parse_args()
main_only_quicksetup_rootlogger(
level=logging.DEBUG if args.verbose else logging.INFO,
with_process_id=True
)
if bool(args.filenames_from_stdin) == bool(args.filename):
raise ValueError("Specify --filenames_from_stdin or filenames on the "
"command line, but not both")
if args.filenames_from_stdin and args.run_repeatedly:
raise ValueError("Can't use both --filenames_from_stdin and "
"--run_repeatedly")
# Repeated scanning loop
while True:
log.debug("Starting scan.")
log.debug("- Scanning files/directories {!r}{}",
args.filename,
" recursively" if args.recursive else "")
log.debug("- Skipping files matching {!r}", args.skip_files)
log.debug("- Using {} simultaneous processes", args.nprocesses)
log.debug("- Reporting {} filenames", "good" if args.good else "bad")
if args.delete_if_bad:
log.warning("- Deleting bad OpenXML files.")
# Iterate through files
pool = multiprocessing.Pool(processes=args.nprocesses)
if args.filenames_from_stdin:
generator = gen_from_stdin()
else:
generator = gen_filenames(starting_filenames=args.filename,
recursive=args.recursive)
for filename in generator:
src_basename = os.path.basename(filename)
if any(fnmatch.fnmatch(src_basename, pattern)
for pattern in args.skip_files):
log.debug("Skipping file as ordered: " + filename)
continue
exists, locked = exists_locked(filename)
if locked or not exists:
log.debug("Skipping currently inaccessible file: " + filename)
continue
kwargs = {
'filename': filename,
'print_good': args.good,
'delete_if_bad': args.delete_if_bad,
}
# log.critical("start")
pool.apply_async(process_openxml_file, [], kwargs)
# result = pool.apply_async(process_file, [], kwargs)
# result.get() # will re-raise any child exceptions
# ... but it waits for the process to complete! That's no help.
# log.critical("next")
# ... https://stackoverflow.com/questions/22094852/how-to-catch-exceptions-in-workers-in-multiprocessing # noqa
pool.close()
pool.join()
log.debug("Finished scan.")
if args.run_repeatedly is None:
break
log.info("Sleeping for {} s...", args.run_repeatedly)
sleep(args.run_repeatedly) | 0.000197 |
def centroid_sources(data, xpos, ypos, box_size=11, footprint=None,
error=None, mask=None, centroid_func=centroid_com):
"""
Calculate the centroid of sources at the defined positions.
A cutout image centered on each input position will be used to
calculate the centroid position. The cutout image is defined either
using the ``box_size`` or ``footprint`` keyword. The ``footprint``
keyword can be used to create a non-rectangular cutout image.
Parameters
----------
data : array_like
The 2D array of the image.
xpos, ypos : float or array-like of float
The initial ``x`` and ``y`` pixel position(s) of the center
position. A cutout image centered on this position be used to
calculate the centroid.
box_size : int or array-like of int, optional
The size of the cutout image along each axis. If ``box_size``
is a number, then a square cutout of ``box_size`` will be
created. If ``box_size`` has two elements, they should be in
``(ny, nx)`` order.
footprint : `~numpy.ndarray` of bools, optional
A 2D boolean array where `True` values describe the local
footprint region to cutout. ``footprint`` can be used to create
a non-rectangular cutout image, in which case the input ``xpos``
and ``ypos`` represent the center of the minimal bounding box
for the input ``footprint``. ``box_size=(n, m)`` is equivalent
to ``footprint=np.ones((n, m))``. Either ``box_size`` or
``footprint`` must be defined. If they are both defined, then
``footprint`` overrides ``box_size``.
mask : array_like, bool, optional
A 2D boolean array with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data`` is
masked.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
``error`` must have the same shape as ``data``. ``error`` will
be used only if supported by the input ``centroid_func``.
centroid_func : callable, optional
A callable object (e.g. function or class) that is used to
calculate the centroid of a 2D array. The ``centroid_func``
must accept a 2D `~numpy.ndarray`, have a ``mask`` keyword and
optionally an ``error`` keyword. The callable object must
return a tuple of two 1D `~numpy.ndarray`\\s, representing the x
and y centroids. The default is
`~photutils.centroids.centroid_com`.
Returns
-------
xcentroid, ycentroid : `~numpy.ndarray`
The ``x`` and ``y`` pixel position(s) of the centroids.
"""
xpos = np.atleast_1d(xpos)
ypos = np.atleast_1d(ypos)
if xpos.ndim != 1:
raise ValueError('xpos must be a 1D array.')
if ypos.ndim != 1:
raise ValueError('ypos must be a 1D array.')
if footprint is None:
if box_size is None:
raise ValueError('box_size or footprint must be defined.')
else:
box_size = np.atleast_1d(box_size)
if len(box_size) == 1:
box_size = np.repeat(box_size, 2)
if len(box_size) != 2:
raise ValueError('box_size must have 1 or 2 elements.')
footprint = np.ones(box_size, dtype=bool)
else:
footprint = np.asanyarray(footprint, dtype=bool)
if footprint.ndim != 2:
raise ValueError('footprint must be a 2D array.')
use_error = False
spec = inspect.getfullargspec(centroid_func)
if 'mask' not in spec.args:
raise ValueError('The input "centroid_func" must have a "mask" '
'keyword.')
if 'error' in spec.args:
use_error = True
xcentroids = []
ycentroids = []
for xp, yp in zip(xpos, ypos):
slices_large, slices_small = overlap_slices(data.shape,
footprint.shape, (yp, xp))
data_cutout = data[slices_large]
mask_cutout = None
if mask is not None:
mask_cutout = mask[slices_large]
footprint_mask = ~footprint
# trim footprint mask if partial overlap on the data
footprint_mask = footprint_mask[slices_small]
if mask_cutout is None:
mask_cutout = footprint_mask
else:
# combine the input mask and footprint mask
mask_cutout = np.logical_or(mask_cutout, footprint_mask)
if error is not None and use_error:
error_cutout = error[slices_large]
xcen, ycen = centroid_func(data_cutout, mask=mask_cutout,
error=error_cutout)
else:
xcen, ycen = centroid_func(data_cutout, mask=mask_cutout)
xcentroids.append(xcen + slices_large[1].start)
ycentroids.append(ycen + slices_large[0].start)
return np.array(xcentroids), np.array(ycentroids) | 0.0002 |
def get_node(self, name):
"""Retrieve a node from the graph.
Given a node's name the corresponding Node
instance will be returned.
If one or more nodes exist with that name a list of
Node instances is returned.
An empty list is returned otherwise.
"""
match = list()
if self.obj_dict['nodes'].has_key(name):
match.extend( [ Node( obj_dict = obj_dict ) for obj_dict in self.obj_dict['nodes'][name] ])
return match | 0.030466 |
def get_desc2nts_fnc(self, hdrgo_prt=True, section_prt=None,
top_n=None, use_sections=True):
"""Return grouped, sorted namedtuples in either format: flat, sections."""
# RETURN: flat list of namedtuples
nts_flat = self.get_nts_flat(hdrgo_prt, use_sections)
if nts_flat:
flds = nts_flat[0]._fields
if not use_sections:
return {'sortobj':self, 'flat' : nts_flat, 'hdrgo_prt':hdrgo_prt, 'flds':flds,
'num_items':len(nts_flat), 'num_sections':1}
else:
return {'sortobj':self,
'sections' : [(self.grprobj.hdrobj.secdflt, nts_flat)],
'hdrgo_prt':hdrgo_prt,
'flds':flds,
'num_items':len(nts_flat), 'num_sections':1}
# print('FFFF Sorter:get_desc2nts_fnc: nts_flat is None')
# RETURN: 2-D list [(section_name0, namedtuples0), (section_name1, namedtuples1), ...
# kws: top_n hdrgo_prt section_sortby
# Over-ride hdrgo_prt depending on top_n value
assert top_n is not True and top_n is not False, \
"top_n({T}) MUST BE None OR AN int".format(T=top_n)
assert self.sectobj is not None, "SECTIONS OBJECT DOES NOT EXIST"
sec_sb = self.sectobj.section_sortby
# Override hdrgo_prt, if sorting by sections or returning a subset of GO IDs in section
hdrgo_prt_curr = hdrgo_prt is True
if sec_sb is True or (sec_sb is not False and sec_sb is not None) or top_n is not None:
hdrgo_prt_curr = False
# print('GGGG Sorter:get_desc2nts_fnc: hdrgo_prt_curr({}) sec_sb({}) top_n({})'.format(
# hdrgo_prt_curr, sec_sb, top_n))
nts_section = self.sectobj.get_sorted_nts_keep_section(hdrgo_prt_curr)
# print('HHHH Sorter:get_desc2nts_fnc: nts_section')
# Take top_n in each section, if requested
if top_n is not None:
nts_section = [(s, nts[:top_n]) for s, nts in nts_section]
if section_prt is None:
nts_flat = self.get_sections_flattened(nts_section)
flds = nts_flat[0]._fields if nts_flat else []
return {'sortobj':self, 'flat' : nts_flat, 'hdrgo_prt':hdrgo_prt_curr, 'flds':flds,
'num_items':len(nts_flat), 'num_sections':1}
# Send flat list of sections nts back, as requested
if section_prt is False:
nts_flat = self.get_sections_flattened(nts_section)
flds = nts_flat[0]._fields if nts_flat else []
return {'sortobj':self, 'flat' : nts_flat, 'hdrgo_prt':hdrgo_prt_curr, 'flds':flds,
'num_items':len(nts_flat),
'num_sections':len(nts_section)}
# Send 2-D sections nts back
# print('IIII Sorter:get_desc2nts_fnc: nts_section')
flds = nts_section[0][1][0]._fields if nts_section else []
return {'sortobj':self, 'sections' : nts_section, 'hdrgo_prt':hdrgo_prt_curr, 'flds':flds,
'num_items':sum(len(nts) for _, nts in nts_section),
'num_sections':len(nts_section)} | 0.01315 |
def unirange(a, b):
"""Returns a regular expression string to match the given non-BMP range."""
if b < a:
raise ValueError("Bad character range")
if a < 0x10000 or b < 0x10000:
raise ValueError("unirange is only defined for non-BMP ranges")
if sys.maxunicode > 0xffff:
# wide build
return u'[%s-%s]' % (unichr(a), unichr(b))
else:
# narrow build stores surrogates, and the 're' module handles them
# (incorrectly) as characters. Since there is still ordering among
# these characters, expand the range to one that it understands. Some
# background in http://bugs.python.org/issue3665 and
# http://bugs.python.org/issue12749
#
# Additionally, the lower constants are using unichr rather than
# literals because jython [which uses the wide path] can't load this
# file if they are literals.
ah, al = _surrogatepair(a)
bh, bl = _surrogatepair(b)
if ah == bh:
return u'(?:%s[%s-%s])' % (unichr(ah), unichr(al), unichr(bl))
else:
buf = []
buf.append(u'%s[%s-%s]' %
(unichr(ah), unichr(al),
ah == bh and unichr(bl) or unichr(0xdfff)))
if ah - bh > 1:
buf.append(u'[%s-%s][%s-%s]' %
unichr(ah+1), unichr(bh-1), unichr(0xdc00), unichr(0xdfff))
if ah != bh:
buf.append(u'%s[%s-%s]' %
(unichr(bh), unichr(0xdc00), unichr(bl)))
return u'(?:' + u'|'.join(buf) + u')' | 0.001235 |
def apply_chromatic_adaptation_on_color(color, targ_illum, adaptation='bradford'):
"""
Convenience function to apply an adaptation directly to a Color object.
"""
xyz_x = color.xyz_x
xyz_y = color.xyz_y
xyz_z = color.xyz_z
orig_illum = color.illuminant
targ_illum = targ_illum.lower()
observer = color.observer
adaptation = adaptation.lower()
# Return individual X, Y, and Z coordinates.
color.xyz_x, color.xyz_y, color.xyz_z = apply_chromatic_adaptation(
xyz_x, xyz_y, xyz_z, orig_illum, targ_illum,
observer=observer, adaptation=adaptation)
color.set_illuminant(targ_illum)
return color | 0.003021 |
def normalize_lons(l1, l2):
"""
An international date line safe way of returning a range of longitudes.
>>> normalize_lons(20, 30) # no IDL within the range
[(20, 30)]
>>> normalize_lons(-17, +17) # no IDL within the range
[(-17, 17)]
>>> normalize_lons(-178, +179)
[(-180, -178), (179, 180)]
>>> normalize_lons(178, -179)
[(-180, -179), (178, 180)]
>>> normalize_lons(179, -179)
[(-180, -179), (179, 180)]
>>> normalize_lons(177, -176)
[(-180, -176), (177, 180)]
"""
if l1 > l2: # exchange lons
l1, l2 = l2, l1
delta = l2 - l1
if l1 < 0 and l2 > 0 and delta > 180:
return [(-180, l1), (l2, 180)]
elif l1 > 0 and l2 > 180 and delta < 180:
return [(l1, 180), (-180, l2 - 360)]
elif l1 < -180 and l2 < 0 and delta < 180:
return [(l1 + 360, 180), (l2, -180)]
return [(l1, l2)] | 0.00112 |
def get(self, identity):
"""
Constructs a DocumentPermissionContext
:param identity: Identity of the user to whom the Sync Document Permission applies.
:returns: twilio.rest.sync.v1.service.document.document_permission.DocumentPermissionContext
:rtype: twilio.rest.sync.v1.service.document.document_permission.DocumentPermissionContext
"""
return DocumentPermissionContext(
self._version,
service_sid=self._solution['service_sid'],
document_sid=self._solution['document_sid'],
identity=identity,
) | 0.008183 |
def ansi(color, text):
"""Wrap text in an ansi escape sequence"""
code = COLOR_CODES[color]
return '\033[1;{0}m{1}{2}'.format(code, text, RESET_TERM) | 0.006211 |
def _surfdens(self,R,z,phi=0.,t=0.):
"""
NAME:
_surfdens
PURPOSE:
evaluate the surface density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the surface density
HISTORY:
2018-08-19 - Written - Bovy (UofT)
"""
return 2.*integrate.quad(lambda x: self._dens(R,x,phi=phi,t=t),0,z)[0] | 0.022 |
def get_polygons_coordinates(geometry):
"""
Extract exterior coordinates from polygon(s) to pass to OSM in a query by
polygon. Ignore the interior ("holes") coordinates.
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to extract exterior coordinates from
Returns
-------
polygon_coord_strs : list
"""
# extract the exterior coordinates of the geometry to pass to the API later
polygons_coords = []
if isinstance(geometry, Polygon):
x, y = geometry.exterior.xy
polygons_coords.append(list(zip(x, y)))
elif isinstance(geometry, MultiPolygon):
for polygon in geometry:
x, y = polygon.exterior.xy
polygons_coords.append(list(zip(x, y)))
else:
raise TypeError('Geometry must be a shapely Polygon or MultiPolygon')
# convert the exterior coordinates of the polygon(s) to the string format
# the API expects
polygon_coord_strs = []
for coords in polygons_coords:
s = ''
separator = ' '
for coord in list(coords):
# round floating point lats and longs to 6 decimal places (ie, ~100 mm),
# so we can hash and cache strings consistently
s = '{}{}{:.6f}{}{:.6f}'.format(s, separator, coord[1], separator, coord[0])
polygon_coord_strs.append(s.strip(separator))
return polygon_coord_strs | 0.00211 |
def hide_alert(self, id, **kwargs): # noqa: E501
"""Hide a specific integration alert # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.hide_alert(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: ResponseContainerAlert
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.hide_alert_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.hide_alert_with_http_info(id, **kwargs) # noqa: E501
return data | 0.002326 |
def update_permissions_rejected_analysis_requests():
"""
Maps and updates the permissions for rejected analysis requests so lab clerks, clients, owners and
RegulatoryInspector can see rejected analysis requests on lists.
:return: None
"""
workflow_tool = api.get_tool("portal_workflow")
workflow = workflow_tool.getWorkflowById('bika_ar_workflow')
catalog = api.get_tool(CATALOG_ANALYSIS_REQUEST_LISTING)
brains = catalog(review_state='rejected')
counter = 0
total = len(brains)
logger.info(
"Changing permissions for rejected analysis requests. " +
"Number of rejected analysis requests: {0}".format(total))
for brain in brains:
if 'LabClerk' not in brain.allowedRolesAndUsers:
if counter % 100 == 0:
logger.info(
"Changing permissions for rejected analysis requests: " +
"{0}/{1}".format(counter, total))
obj = api.get_object(brain)
workflow.updateRoleMappingsFor(obj)
obj.reindexObject()
counter += 1
logger.info(
"Changed permissions for rejected analysis requests: " +
"{0}/{1}".format(counter, total)) | 0.001645 |
def find_le_index(self, k):
'Return last item with a key <= k. Raise ValueError if not found.'
i = bisect_right(self._keys, k)
if i:
return i - 1
raise ValueError('No item found with key at or below: %r' % (k,)) | 0.007813 |
async def send_endpoint(self, endpoint: str) -> None:
"""
Send anchor's own endpoint attribute to ledger (and endpoint cache),
if ledger does not yet have input value. Specify None to clear.
Raise BadIdentifier on endpoint not formatted as '<ip-address>:<port>',
BadLedgerTxn on failure, WalletState if wallet is closed.
:param endpoint: value to set as endpoint attribute on ledger and cache:
specify URL or None to clear.
"""
LOGGER.debug('BaseAnchor.send_endpoint >>> endpoint: %s', endpoint)
ledger_endpoint = await self.get_endpoint()
if ledger_endpoint == endpoint:
LOGGER.info('%s endpoint already set as %s', self.name, endpoint)
LOGGER.debug('BaseAnchor.send_endpoint <<< (%s already set for %s )')
return
attr_json = json.dumps({
'endpoint': {
'endpoint': endpoint # indy-sdk likes 'ha' here but won't map 'ha' to a URL, only ip:port
}
})
req_json = await ledger.build_attrib_request(self.did, self.did, None, attr_json, None)
await self._sign_submit(req_json)
for _ in range(16): # reasonable timeout
if await self.get_endpoint(None, False) == endpoint:
break
await asyncio.sleep(1)
LOGGER.info('Sent endpoint %s to ledger, waiting 1s for its confirmation', endpoint)
else:
LOGGER.debug('BaseAnchor.send_endpoint <!< timed out waiting on send endpoint %s', endpoint)
raise BadLedgerTxn('Timed out waiting on sent endpoint {}'.format(endpoint))
LOGGER.debug('BaseAnchor.send_endpoint <<<') | 0.005269 |
def with_organisation(self, organisation):
"""Add an organisation segment.
Args:
organisation (str): Official name of an administrative body
holding an election.
Returns:
IdBuilder
Raises:
ValueError
"""
if organisation is None:
organisation = ''
organisation = slugify(organisation)
self._validate_organisation(organisation)
self.organisation = organisation
return self | 0.003868 |
def quorum(name, **kwargs):
'''
Quorum state
This state checks the mon daemons are in quorum. It does not alter the
cluster but can be used in formula as a dependency for many cluster
operations.
Example usage in sls file:
.. code-block:: yaml
quorum:
sesceph.quorum:
- require:
- sesceph: mon_running
'''
parameters = _ordereddict2dict(kwargs)
if parameters is None:
return _error(name, "Invalid parameters:%s")
if __opts__['test']:
return _test(name, "cluster quorum")
try:
cluster_quorum = __salt__['ceph.cluster_quorum'](**parameters)
except (CommandExecutionError, CommandNotFoundError) as err:
return _error(name, err.strerror)
if cluster_quorum:
return _unchanged(name, "cluster is quorum")
return _error(name, "cluster is not quorum") | 0.001122 |
def nested_model(model, nested_fields):
"""
Return :class:`zsl.db.model.app_model import AppModel` with the nested
models attached. ``nested_fields`` can be a simple list as model
fields, or it can be a tree definition in dict with leafs as keys with
``None`` value
"""
# type: (ModelBase, Any)->Optional[AppModel]
if model is None:
return None
app_model = model.get_app_model()
is_dict = isinstance(nested_fields, dict)
for field in nested_fields:
field = get_nested_field_name(field)
nested_nested = nested_fields.get(
field) if is_dict and nested_fields.get(field) else []
value = getattr(model, field, None)
# we can have also lists in field
nm_fn = nested_models if isinstance(value, list) else nested_model
setattr(app_model, field, nm_fn(value, nested_nested))
return app_model | 0.001085 |
def _current_user_manager(self, session=None):
"""Return the current user, or SYSTEM user."""
if session is None:
session = db.session()
try:
user = g.user
except Exception:
return session.query(User).get(0)
if sa.orm.object_session(user) is not session:
# this can happen when called from a celery task during development
# (with CELERY_ALWAYS_EAGER=True): the task SA session is not
# app.db.session, and we should not attach this object to
# the other session, because it can make weird, hard-to-debug
# errors related to session.identity_map.
return session.query(User).get(user.id)
else:
return user | 0.002584 |
def _makeHttpRequest(self, method, route, payload):
""" Make an HTTP Request for the API endpoint. This method wraps
the logic about doing failure retry and passes off the actual work
of doing an HTTP request to another method."""
url = self._constructUrl(route)
log.debug('Full URL used is: %s', url)
hawkExt = self.makeHawkExt()
# Serialize payload if given
if payload is not None:
payload = utils.dumpJson(payload)
# Do a loop of retries
retry = -1 # we plus first in the loop, and attempt 1 is retry 0
retries = self.options['maxRetries']
while retry < retries:
retry += 1
# if this isn't the first retry then we sleep
if retry > 0:
time.sleep(utils.calculateSleepTime(retry))
# Construct header
if self._hasCredentials():
sender = mohawk.Sender(
credentials={
'id': self.options['credentials']['clientId'],
'key': self.options['credentials']['accessToken'],
'algorithm': 'sha256',
},
ext=hawkExt if hawkExt else {},
url=url,
content=payload if payload else '',
content_type='application/json' if payload else '',
method=method,
)
headers = {'Authorization': sender.request_header}
else:
log.debug('Not using hawk!')
headers = {}
if payload:
# Set header for JSON if payload is given, note that we serialize
# outside this loop.
headers['Content-Type'] = 'application/json'
log.debug('Making attempt %d', retry)
try:
response = utils.makeSingleHttpRequest(method, url, payload, headers)
except requests.exceptions.RequestException as rerr:
if retry < retries:
log.warn('Retrying because of: %s' % rerr)
continue
# raise a connection exception
raise exceptions.TaskclusterConnectionError(
"Failed to establish connection",
superExc=rerr
)
# Handle non 2xx status code and retry if possible
status = response.status_code
if status == 204:
return None
# Catch retryable errors and go to the beginning of the loop
# to do the retry
if 500 <= status and status < 600 and retry < retries:
log.warn('Retrying because of a %s status code' % status)
continue
# Throw errors for non-retryable errors
if status < 200 or status >= 300:
data = {}
try:
data = response.json()
except Exception:
pass # Ignore JSON errors in error messages
# Find error message
message = "Unknown Server Error"
if isinstance(data, dict):
message = data.get('message')
else:
if status == 401:
message = "Authentication Error"
elif status == 500:
message = "Internal Server Error"
# Raise TaskclusterAuthFailure if this is an auth issue
if status == 401:
raise exceptions.TaskclusterAuthFailure(
message,
status_code=status,
body=data,
superExc=None
)
# Raise TaskclusterRestFailure for all other issues
raise exceptions.TaskclusterRestFailure(
message,
status_code=status,
body=data,
superExc=None
)
# Try to load JSON
try:
return response.json()
except ValueError:
return {"response": response}
# This code-path should be unreachable
assert False, "Error from last retry should have been raised!" | 0.000907 |
def is_not_from_subdomain(self, response, site_dict):
"""
Ensures the response's url isn't from a subdomain.
:param obj response: The scrapy response
:param dict site_dict: The site object from the JSON-File
:return bool: Determines if the response's url is from a subdomain
"""
root_url = re.sub(re_url_root, '', site_dict["url"])
return UrlExtractor.get_allowed_domain(response.url) == root_url | 0.00432 |
def _prefix_from_ip_int(self, ip_int):
"""Return prefix length from the bitwise netmask.
Args:
ip_int: An integer, the netmask in expanded bitwise format
Returns:
An integer, the prefix length.
Raises:
ValueError: If the input intermingles zeroes & ones
"""
trailing_zeroes = _count_righthand_zero_bits(ip_int,
self._max_prefixlen)
prefixlen = self._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
all_ones = (1 << prefixlen) - 1
if leading_ones != all_ones:
byteslen = self._max_prefixlen // 8
details = _int_to_bytes(ip_int, byteslen, 'big')
msg = 'Netmask pattern %r mixes zeroes & ones'
raise ValueError(msg % details)
return prefixlen | 0.002242 |
def pnum_to_group(mesh_shape, group_dims, pnum):
"""Group number for grouped allreduce.
Args:
mesh_shape: a Shape
group_dims: a list of integers (the dimensions reduced over)
pnum: an integer
Returns:
an integer
"""
coord = pnum_to_processor_coordinates(mesh_shape, pnum)
remaining_shape = Shape(
[d for i, d in enumerate(mesh_shape) if i not in group_dims])
remaining_coord = [d for i, d in enumerate(coord) if i not in group_dims]
return processor_coordinates_to_pnum(remaining_shape, remaining_coord) | 0.01105 |
def serve(self, app_docopt=DEFAULT_DOC, description=''):
''' Configure from cli and run the server '''
exit_status = 0
if isinstance(app_docopt, str):
args = docopt(app_docopt, version=description)
elif isinstance(app_docopt, dict):
args = app_docopt
else:
raise ValueError('unknown configuration object ({})'
.format(type(app_docopt)))
log_level = args.get('--log', 'debug')
is_debug = args.get('--debug', False)
# TODO More serious default
log_output = 'stdout' if is_debug else 'apy.log'
safe_bind = args.get('--bind', '127.0.0.1')
safe_port = int(args.get('--port', 5000))
log_setup = dna.logging.setup(level=log_level, output=log_output)
with log_setup.applicationbound():
try:
log.info('server ready',
version=description,
log=log_level,
debug=is_debug,
bind='{}:{}'.format(safe_bind, safe_port))
self.app.run(host=safe_bind,
port=safe_port,
debug=is_debug)
except Exception as error:
if is_debug:
raise
log.error('{}: {}'.format(type(error).__name__, str(error)))
exit_status = 1
finally:
log.info('session ended with status {}'.format(exit_status))
return exit_status | 0.001276 |
def request(self, command=None):
"""command text
view: Execution user view exec
Configuration system view exec
"""
node = new_ele("CLI")
node.append(validated_element(command))
# sub_ele(node, view).text = command
return self._request(node) | 0.006452 |
def _authstr(self, auth):
"""Convert auth to str so that it can be hashed"""
if type(auth) is dict:
return '{' + ','.join(["{0}:{1}".format(k, auth[k]) for k in sorted(auth.keys())]) + '}'
return auth | 0.012712 |
def get_bids(session, project_ids=[], bid_ids=[], limit=10, offset=0):
"""
Get the list of bids
"""
get_bids_data = {}
if bid_ids:
get_bids_data['bids[]'] = bid_ids
if project_ids:
get_bids_data['projects[]'] = project_ids
get_bids_data['limit'] = limit
get_bids_data['offset'] = offset
# GET /api/projects/0.1/bids/
response = make_get_request(session, 'bids', params_data=get_bids_data)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise BidsNotFoundException(
message=json_data['message'], error_code=json_data['error_code'],
request_id=json_data['request_id']
) | 0.001372 |
def CheckSameObj(obj0, obj1, LFields=None):
""" Check if two variables are the same instance of a ToFu class
Checks a list of attributes, provided by LField
Parameters
----------
obj0 : tofu object
A variable refering to a ToFu object of any class
obj1 : tofu object
A variable refering to a ToFu object of the same class as obj0
LFields : None / str / list
The criteria against which the two objects are evaluated:
- None: True is returned
- str or list: tests whether all listed attributes have the same value
Returns
-------
A : bool
True only is LField is None or a list of attributes that all match
"""
A = True
if LField is not None and obj0.__class__==obj1.__class__:
assert type(LFields) in [str,list]
if type(LFields) is str:
LFields = [LFields]
assert all([type(s) is str for s in LFields])
ind = [False for ii in range(0,len(LFields))]
Dir0 = dir(obj0.Id)+dir(obj0)
Dir1 = dir(obj1.Id)+dir(obj1)
for ii in range(0,len(LFields)):
assert LFields[ii] in Dir0, LFields[ii]+" not in "+obj0.Id.Name
assert LFields[ii] in Dir1, LFields[ii]+" not in "+obj1.Id.Name
if hasattr(obj0,LFields[ii]):
ind[ii] = np.all(getattr(obj0,LFields[ii])==getattr(obj1,LFields[ii]))
else:
ind[ii] = getattr(obj0.Id,LFields[ii])==getattr(obj1.Id,LFields[ii])
A = all(ind)
return A | 0.009659 |
def get_intercom_data(self):
"""Specify the data sent to Intercom API according to event type"""
data = {
"event_name": self.get_type_display(), # event type
"created_at": calendar.timegm(self.created.utctimetuple()), # date
"metadata": self.metadata
}
if self.user:
data["user_id"] = self.user.intercom_id
return data | 0.004902 |
def scaleBy(self, value, origin=None, width=False, height=False):
"""
%s
**width** indicates if the glyph's width should be scaled.
**height** indicates if the glyph's height should be scaled.
The origin must not be specified when scaling the width or height.
"""
value = normalizers.normalizeTransformationScale(value)
if origin is None:
origin = (0, 0)
origin = normalizers.normalizeCoordinateTuple(origin)
if origin != (0, 0) and (width or height):
raise FontPartsError(("The origin must not be set when "
"scaling the width or height."))
super(BaseGlyph, self).scaleBy(value, origin=origin)
sX, sY = value
if width:
self._scaleWidthBy(sX)
if height:
self._scaleHeightBy(sY) | 0.002294 |
def run(self):
"""Build the Fortran library, all python extensions and the docs."""
print('---- BUILDING ----')
_build.run(self)
# build documentation
print('---- BUILDING DOCS ----')
docdir = os.path.join(self.build_lib, 'pyshtools', 'doc')
self.mkpath(docdir)
doc_builder = os.path.join(self.build_lib, 'pyshtools', 'make_docs.py')
doc_source = '.'
check_call([sys.executable, doc_builder, doc_source, self.build_lib])
print('---- ALL DONE ----') | 0.003717 |
def generate_configuration(directory):
"""
Generates a Sphinx configuration in `directory`.
Parameters
----------
directory : str
Base directory to use
"""
# conf.py file for Sphinx
conf = osp.join(get_module_source_path('spyder.plugins.help.utils'),
'conf.py')
# Docstring layout page (in Jinja):
layout = osp.join(osp.join(CONFDIR_PATH, 'templates'), 'layout.html')
os.makedirs(osp.join(directory, 'templates'))
os.makedirs(osp.join(directory, 'static'))
shutil.copy(conf, directory)
shutil.copy(layout, osp.join(directory, 'templates'))
open(osp.join(directory, '__init__.py'), 'w').write('')
open(osp.join(directory, 'static', 'empty'), 'w').write('') | 0.003953 |
def open(self):
"""Generator that opens and yields filehandles using appropriate facilities:
test if path represents a local file or file over URL, if file is compressed
or not.
:return: Filehandle to be processed into an instance.
"""
is_url = self.is_url(self.path)
compression_type = self.is_compressed(self.path)
if not compression_type:
if is_url:
filehandle = urlopen(self.path)
else:
filehandle = open(self.path, "r")
source = self.path
yield filehandle, source
filehandle.close()
elif compression_type:
if is_url:
response = urlopen(self.path)
path = response.read()
response.close()
else:
path = self.path
if compression_type == "zip":
ziparchive = zipfile.ZipFile(io.BytesIO(path), "r") if is_url else zipfile.ZipFile(path)
for name in ziparchive.infolist():
if not name.filename.endswith("/"):
filehandle = ziparchive.open(name)
source = self.path + "/" + name.filename
yield filehandle, source
filehandle.close()
elif compression_type in ("tar", "tar.bz2", "tar.gz"):
tararchive = tarfile.open(fileobj=io.BytesIO(path)) if is_url else tarfile.open(path)
for name in tararchive:
if name.isfile():
filehandle = tararchive.extractfile(name)
source = self.path + "/" + name.name
yield filehandle, source
filehandle.close()
elif compression_type == "bz2":
filehandle = bz2.BZ2File(io.BytesIO(path)) if is_url else bz2.BZ2File(path)
source = self.path
yield filehandle, source
filehandle.close()
elif compression_type == "gz":
filehandle = gzip.open(io.BytesIO(path)) if is_url else gzip.open(path)
source = self.path
yield filehandle, source
filehandle.close() | 0.003481 |
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and is_protected_type(s):
return s
if isinstance(s, memoryview):
return bytes(s)
if not isinstance(s, str):
return str(s).encode(encoding, errors)
else:
return s.encode(encoding, errors) | 0.001316 |
def _dequantize(q, params):
"""Dequantize q according to params."""
if not params.quantize:
return q
return tf.to_float(tf.bitcast(q, tf.int16)) * params.quantization_scale | 0.021978 |
def do_create_virtualenv(python=None, site_packages=False, pypi_mirror=None):
"""Creates a virtualenv."""
click.echo(
crayons.normal(fix_utf8("Creating a virtualenv for this project…"), bold=True), err=True
)
click.echo(
u"Pipfile: {0}".format(crayons.red(project.pipfile_location, bold=True)),
err=True,
)
# Default to using sys.executable, if Python wasn't provided.
if not python:
python = sys.executable
click.echo(
u"{0} {1} {3} {2}".format(
crayons.normal("Using", bold=True),
crayons.red(python, bold=True),
crayons.normal(fix_utf8("to create virtualenv…"), bold=True),
crayons.green("({0})".format(python_version(python))),
),
err=True,
)
cmd = [
vistir.compat.Path(sys.executable).absolute().as_posix(),
"-m",
"virtualenv",
"--prompt=({0}) ".format(project.name),
"--python={0}".format(python),
project.get_location_for_virtualenv(),
]
# Pass site-packages flag to virtualenv, if desired…
if site_packages:
click.echo(
crayons.normal(fix_utf8("Making site-packages available…"), bold=True), err=True
)
cmd.append("--system-site-packages")
if pypi_mirror:
pip_config = {"PIP_INDEX_URL": vistir.misc.fs_str(pypi_mirror)}
else:
pip_config = {}
# Actually create the virtualenv.
nospin = environments.PIPENV_NOSPIN
with create_spinner("Creating virtual environment...") as sp:
c = vistir.misc.run(
cmd, verbose=False, return_object=True, write_to_stdout=False,
combine_stderr=False, block=True, nospin=True, env=pip_config,
)
click.echo(crayons.blue("{0}".format(c.out)), err=True)
if c.returncode != 0:
sp.fail(environments.PIPENV_SPINNER_FAIL_TEXT.format("Failed creating virtual environment"))
error = c.err if environments.is_verbose() else exceptions.prettify_exc(c.err)
raise exceptions.VirtualenvCreationException(
extra=[crayons.red("{0}".format(error)),]
)
else:
sp.green.ok(environments.PIPENV_SPINNER_OK_TEXT.format(u"Successfully created virtual environment!"))
# Associate project directory with the environment.
# This mimics Pew's "setproject".
project_file_name = os.path.join(project.virtualenv_location, ".project")
with open(project_file_name, "w") as f:
f.write(vistir.misc.fs_str(project.project_directory))
from .environment import Environment
sources = project.pipfile_sources
project._environment = Environment(
prefix=project.get_location_for_virtualenv(),
is_venv=True,
sources=sources,
pipfile=project.parsed_pipfile,
project=project
)
project._environment.add_dist("pipenv")
# Say where the virtualenv is.
do_where(virtualenv=True, bare=False) | 0.002674 |
def end_profiling(profiler, filename, sorting=None):
"""
Helper function to stop the profiling process and write out the profiled
data into the given filename. Before this, sort the stats by the passed sorting.
:param profiler: An already started profiler (probably by start_profiling).
:type profiler: cProfile.Profile
:param filename: The name of the output file to save the profile.
:type filename: basestring
:param sorting: The sorting of the statistics passed to the sort_stats function.
:type sorting: basestring
:return: None
:rtype: None
Start and stop the profiler with:
>>> profiler = start_profiling()
>>> # Do something you want to profile
>>> end_profiling(profiler, "out.txt", "cumulative")
"""
profiler.disable()
s = six.StringIO()
ps = pstats.Stats(profiler, stream=s).sort_stats(sorting)
ps.print_stats()
with open(filename, "w+") as f:
_logger.info("[calculate_ts_features] Finished profiling of time series feature extraction")
f.write(s.getvalue()) | 0.003724 |
def visualize_explanation(explanation, label=None):
"""
Given the output of the explain() endpoint, produces a terminal visual that plots response strength over a sequence
"""
if not sys.version_info[:2] >= (3, 5):
raise IndicoError("Python >= 3.5+ is required for explanation visualization")
try:
from colr import Colr as C
except ImportError:
raise IndicoError("Package colr >= 0.8.1 is required for explanation visualization.")
cursor = 0
text = explanation['text']
for token in explanation.get('token_predictions'):
try:
class_confidence = token.get('prediction')[label]
except KeyError:
raise IndicoError("Invalid label: {}".format(label))
if class_confidence > 0.5:
fg_color = (255, 255, 255)
else:
fg_color = (0, 0, 0)
rg_value = 255 - int(class_confidence * 255)
token_end = token.get('token').get('end')
token_text = text[cursor:token_end]
cursor = token_end
sys.stdout.write(
str(C().b_rgb(
rg_value, rg_value, 255
).rgb(
fg_color[0], fg_color[1], fg_color[2], token_text
))
)
sys.stdout.write("\n")
sys.stdout.flush() | 0.00549 |
def run(self, *args):
"""List, add or delete entries from the blacklist.
By default, it prints the list of entries available on
the blacklist.
"""
params = self.parser.parse_args(args)
entry = params.entry
if params.add:
code = self.add(entry)
elif params.delete:
code = self.delete(entry)
else:
term = entry
code = self.blacklist(term)
return code | 0.004167 |
def update_dimensions(self, dims):
"""
Update multiple dimension on the cube.
.. code-block:: python
cube.update_dimensions([
{'name' : 'ntime', 'global_size' : 10,
'lower_extent' : 2, 'upper_extent' : 7 },
{'name' : 'na', 'global_size' : 3,
'lower_extent' : 2, 'upper_extent' : 7 },
])
Parameters
----------
dims : list or dict:
A list or dictionary of dimension updates
"""
if isinstance(dims, collections.Mapping):
dims = dims.itervalues()
for dim in dims:
# Defer to update dimension for dictionaries
if isinstance(dim, dict):
self.update_dimension(**dim)
# Replace if given a Dimension object
elif isinstance(dim, Dimension):
self._dims[dim.name] = dim
else:
raise TypeError("Unhandled type '{t}'"
"in update_dimensions".format(t=type(dim))) | 0.002822 |
def dataverse_download_doi(doi,
local_fname=None,
file_requirements={},
clobber=False):
"""
Downloads a file from the Dataverse, using a DOI and set of metadata
parameters to locate the file.
Args:
doi (str): Digital Object Identifier (DOI) containing the file.
local_fname (Optional[str]): Local filename to download the file to. If
`None`, then use the filename provided by the Dataverse. Defaults to
`None`.
file_requirements (Optional[dict]): Select the file containing the
given metadata entries. If multiple files meet these requirements,
only the first in downloaded. Defaults to `{}`, corresponding to no
requirements.
Raises:
DownloadError: Either no matching file was found under the given DOI, or
the MD5 sum of the file was not as expected.
requests.exceptions.HTTPError: The given DOI does not exist, or there
was a problem connecting to the Dataverse.
"""
metadata = dataverse_search_doi(doi)
def requirements_match(metadata):
for key in file_requirements.keys():
if metadata['dataFile'].get(key, None) != file_requirements[key]:
return False
return True
for file_metadata in metadata['data']['latestVersion']['files']:
if requirements_match(file_metadata):
file_id = file_metadata['dataFile']['id']
md5sum = file_metadata['dataFile']['md5']
if local_fname is None:
local_fname = file_metadata['dataFile']['filename']
# Check if the file already exists on disk
if (not clobber) and os.path.isfile(local_fname):
print('Checking existing file to see if MD5 sum matches ...')
md5_existing = get_md5sum(local_fname)
if md5_existing == md5sum:
print('File exists. Not overwriting.')
return
print("Downloading data to '{}' ...".format(local_fname))
dataverse_download_id(file_id, md5sum,
fname=local_fname, clobber=False)
return
raise DownloadError(
'No file found under the given DOI matches the requirements.\n'
'The metadata found for this DOI was:\n'
+ json.dumps(file_metadata, indent=2, sort_keys=True)) | 0.001208 |
def render_koji(self):
"""
if there is yum repo specified, don't pick stuff from koji
"""
phase = 'prebuild_plugins'
plugin = 'koji'
if not self.dj.dock_json_has_plugin_conf(phase, plugin):
return
if self.spec.yum_repourls.value:
logger.info("removing koji from request "
"because there is yum repo specified")
self.dj.remove_plugin(phase, plugin)
elif not (self.spec.koji_target.value and
self.spec.kojiroot.value and
self.spec.kojihub.value):
logger.info("removing koji from request as not specified")
self.dj.remove_plugin(phase, plugin)
else:
self.dj.dock_json_set_arg(phase, plugin,
"target", self.spec.koji_target.value)
self.dj.dock_json_set_arg(phase, plugin,
"root", self.spec.kojiroot.value)
self.dj.dock_json_set_arg(phase, plugin,
"hub", self.spec.kojihub.value)
if self.spec.proxy.value:
self.dj.dock_json_set_arg(phase, plugin,
"proxy", self.spec.proxy.value) | 0.001555 |
def findNode( self, objectName ):
"""
Looks up the node based on the inputed node name.
:param objectName | <str>
"""
for item in self.items():
if ( isinstance(item, XNode) and
item.objectName() == objectName ):
return item
return None | 0.023055 |
def fixed_poch(a, n):
"""Implementation of the Pochhammer symbol :math:`(a)_n` which handles negative integer arguments properly.
Need conditional statement because scipy's impelementation of the Pochhammer
symbol is wrong for negative integer arguments. This function uses the
definition from
http://functions.wolfram.com/GammaBetaErf/Pochhammer/02/
Parameters
----------
a : float
The argument.
n : nonnegative int
The order.
"""
# Old form, calls gamma function:
# if a < 0.0 and a % 1 == 0 and n <= -a:
# p = (-1.0)**n * scipy.misc.factorial(-a) / scipy.misc.factorial(-a - n)
# else:
# p = scipy.special.poch(a, n)
# return p
if (int(n) != n) or (n < 0):
raise ValueError("Parameter n must be a nonnegative int!")
n = int(n)
# Direct form based on product:
terms = [a + k for k in range(0, n)]
return scipy.prod(terms) | 0.006322 |
def _move(self, from_state=None, to_state=None, when=None, mode=None):
"""
Internal helper to move a task from one state to another (e.g. from
QUEUED to DELAYED). The "when" argument indicates the timestamp of the
task in the new state. If no to_state is specified, the task will be
simply removed from the original state.
The "mode" param can be specified to define how the timestamp in the
new state should be updated and is passed to the ZADD Redis script (see
its documentation for details).
Raises TaskNotFound if the task is not in the expected state or not in
the expected queue.
"""
pipeline = self.tiger.connection.pipeline()
scripts = self.tiger.scripts
_key = self.tiger._key
from_state = from_state or self.state
queue = self.queue
assert from_state
assert queue
scripts.fail_if_not_in_zset(_key(from_state, queue), self.id,
client=pipeline)
if to_state:
if not when:
when = time.time()
if mode:
scripts.zadd(_key(to_state, queue), when, self.id,
mode, client=pipeline)
else:
pipeline.zadd(_key(to_state, queue), self.id, when)
pipeline.sadd(_key(to_state), queue)
pipeline.zrem(_key(from_state, queue), self.id)
if not to_state: # Remove the task if necessary
if self.unique:
# Only delete if it's not in any other queue
check_states = set([ACTIVE, QUEUED, ERROR, SCHEDULED])
check_states.remove(from_state)
# TODO: Do the following two in one call.
scripts.delete_if_not_in_zsets(_key('task', self.id, 'executions'),
self.id, [
_key(state, queue) for state in check_states
], client=pipeline)
scripts.delete_if_not_in_zsets(_key('task', self.id),
self.id, [
_key(state, queue) for state in check_states
], client=pipeline)
else:
# Safe to remove
pipeline.delete(_key('task', self.id, 'executions'))
pipeline.delete(_key('task', self.id))
scripts.srem_if_not_exists(_key(from_state), queue,
_key(from_state, queue), client=pipeline)
if to_state == QUEUED:
pipeline.publish(_key('activity'), queue)
try:
scripts.execute_pipeline(pipeline)
except redis.ResponseError as e:
if '<FAIL_IF_NOT_IN_ZSET>' in e.args[0]:
raise TaskNotFound('Task {} not found in queue "{}" in state "{}".'.format(
self.id, queue, from_state
))
raise
else:
self._state = to_state | 0.002971 |
def get_password(self, service, username):
"""Get password of the username for the service
"""
items = self._find_passwords(service, username)
if not items:
return None
secret = items[0].secret
return (
secret
if isinstance(secret, six.text_type) else
secret.decode('utf-8')
) | 0.005249 |
def set_channel(self, channel):
""" Set the radio channel to be used """
if channel != self.current_channel:
_send_vendor_setup(self.handle, SET_RADIO_CHANNEL, channel, 0, ())
self.current_channel = channel | 0.00813 |
def set(self, instance, value, **kwargs):
"""Adds the value to the existing text stored in the field,
along with a small divider showing username and date of this entry.
"""
if not value:
return
value = value.strip()
date = DateTime().rfc822()
user = getSecurityManager().getUser()
username = user.getUserName()
divider = "=== {} ({})".format(date, username)
existing_remarks = instance.getRawRemarks()
remarks = '\n'.join([divider, value, existing_remarks])
ObjectField.set(self, instance, remarks)
# reindex the object after save to update all catalog metadata
instance.reindexObject()
# notify object edited event
event.notify(ObjectEditedEvent(instance)) | 0.002506 |
def _create_scales(hist: HistogramBase, vega: dict, kwargs: dict):
"""Find proper scales for axes."""
if hist.ndim == 1:
bins0 = hist.bins.astype(float)
else:
bins0 = hist.bins[0].astype(float)
xlim = kwargs.pop("xlim", "auto")
ylim = kwargs.pop("ylim", "auto")
if xlim is "auto":
nice_x = True
else:
nice_x = False
if ylim is "auto":
nice_y = True
else:
nice_y = False
# TODO: Unify xlim & ylim parameters with matplotlib
# TODO: Apply xscale & yscale parameters
vega["scales"] = [
{
"name": "xscale",
"type": "linear",
"range": "width",
"nice": nice_x,
"zero": None,
"domain": [bins0[0, 0], bins0[-1, 1]] if xlim == "auto" else [float(xlim[0]), float(xlim[1])],
# "domain": {"data": "table", "field": "x"}
},
{
"name": "yscale",
"type": "linear",
"range": "height",
"nice": nice_y,
"zero": True if hist.ndim == 1 else None,
"domain": {"data": "table", "field": "y"} if ylim == "auto" else [float(ylim[0]), float(ylim[1])]
}
]
if hist.ndim >= 2:
bins1 = hist.bins[1].astype(float)
vega["scales"][1]["domain"] = [bins1[0, 0], bins1[-1, 1]] | 0.002217 |
def set_condition(self, condition = True):
"""
Sets a new condition callback for the breakpoint.
@see: L{__init__}
@type condition: function
@param condition: (Optional) Condition callback function.
"""
if condition is None:
self.__condition = True
else:
self.__condition = condition | 0.010695 |
def arcovar(x, order):
r"""Simple and fast implementation of the covariance AR estimate
This code is 10 times faster than :func:`arcovar_marple` and more importantly
only 10 lines of code, compared to a 200 loc for :func:`arcovar_marple`
:param array X: Array of complex data samples
:param int oder: Order of linear prediction model
:return:
* a - Array of complex forward linear prediction coefficients
* e - error
The covariance method fits a Pth order autoregressive (AR) model to the
input signal, which is assumed to be the output of
an AR system driven by white noise. This method minimizes the forward
prediction error in the least-squares sense. The output vector
contains the normalized estimate of the AR system parameters
The white noise input variance estimate is also returned.
If is the power spectral density of y(n), then:
.. math:: \frac{e}{\left| A(e^{jw}) \right|^2} = \frac{e}{\left| 1+\sum_{k-1}^P a(k)e^{-jwk}\right|^2}
Because the method characterizes the input data using an all-pole model,
the correct choice of the model order p is important.
.. plot::
:width: 80%
:include-source:
from spectrum import arcovar, marple_data, arma2psd
from pylab import plot, log10, linspace, axis
ar_values, error = arcovar(marple_data, 15)
psd = arma2psd(ar_values, sides='centerdc')
plot(linspace(-0.5, 0.5, len(psd)), 10*log10(psd/max(psd)))
axis([-0.5, 0.5, -60, 0])
.. seealso:: :class:`pcovar`
:validation: the AR parameters are the same as those returned by
a completely different function :func:`arcovar_marple`.
:References: [Mathworks]_
"""
from spectrum import corrmtx
import scipy.linalg
X = corrmtx(x, order, 'covariance')
Xc = np.matrix(X[:, 1:])
X1 = np.array(X[:, 0])
# Coefficients estimated via the covariance method
# Here we use lstsq rathre than solve function because Xc is not square
# matrix
a, _residues, _rank, _singular_values = scipy.linalg.lstsq(-Xc, X1)
# Estimate the input white noise variance
Cz = np.dot(X1.conj().transpose(), Xc)
e = np.dot(X1.conj().transpose(), X1) + np.dot(Cz, a)
assert e.imag < 1e-4, 'wierd behaviour'
e = float(e.real) # ignore imag part that should be small
return a, e | 0.001672 |
def debugger(self,force=False):
"""Call the pydb/pdb debugger.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
"""
if not (force or self.call_pdb):
return
if not hasattr(sys,'last_traceback'):
error('No traceback has been produced, nothing to debug.')
return
# use pydb if available
if debugger.has_pydb:
from pydb import pm
else:
# fallback to our internal debugger
pm = lambda : self.InteractiveTB.debugger(force=True)
with self.readline_no_record:
pm() | 0.007177 |
def setSeed(self, value):
"""
Sets the seed to value.
"""
self.seed = value
random.seed(self.seed)
if self.verbosity >= 0:
print("Conx using seed:", self.seed) | 0.009132 |
def accessibility(graph):
"""
Accessibility matrix (transitive closure).
@type graph: graph, digraph, hypergraph
@param graph: Graph.
@rtype: dictionary
@return: Accessibility information for each node.
"""
recursionlimit = getrecursionlimit()
setrecursionlimit(max(len(graph.nodes())*2,recursionlimit))
accessibility = {} # Accessibility matrix
# For each node i, mark each node j if that exists a path from i to j.
for each in graph:
access = {}
# Perform DFS to explore all reachable nodes
_dfs(graph, access, 1, each)
accessibility[each] = list(access.keys())
setrecursionlimit(recursionlimit)
return accessibility | 0.005495 |
def choices_label(choices: tuple, value) -> str:
"""
Iterates (value,label) list and returns label matching the choice
:param choices: [(choice1, label1), (choice2, label2), ...]
:param value: Value to find
:return: label or None
"""
for key, label in choices:
if key == value:
return label
return '' | 0.002841 |
def get_config(config_file=None, config_values=None, load_project_conf=True):
"""Loads config file and returns its content."""
config_values = config_values or {}
config_settings = {}
default_conf = _get_default_conf()
user_conf = _get_user_conf(config_file) if config_file else {}
# load project configuration only when user configuration was not specified
project_conf = {} if user_conf or not load_project_conf else _get_project_conf()
if not (user_conf or project_conf or config_values):
if load_project_conf:
raise Dump2PolarionException(
"Failed to find configuration file for the project "
"and no configuration file or values passed."
)
raise Dump2PolarionException("No configuration file or values passed.")
# merge configuration
config_settings.update(default_conf)
config_settings.update(user_conf)
config_settings.update(project_conf)
config_settings.update(config_values)
_populate_urls(config_settings)
_set_legacy_project_id(config_settings)
_set_legacy_custom_fields(config_settings)
_check_config(config_settings)
return config_settings | 0.001664 |
def _distance_squared(self, p2: "Point2") -> Union[int, float]:
""" Function used to not take the square root as the distances will stay proportionally the same. This is to speed up the sorting process. """
return (self[0] - p2[0]) ** 2 + (self[1] - p2[1]) ** 2 | 0.01083 |
def read_key_value_pairs_from_file(*path):
"""
Read key value pairs from a file (each pair on a separate line).
Key and value are separated by ' ' as often used by the kernel.
@return a generator of tuples
"""
with open(os.path.join(*path)) as f:
for line in f:
yield line.split(' ', 1) | 0.00303 |
def get_asset_query_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the asset query service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.AssetQuerySession) - an
``AssetQuerySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_asset_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_asset_query()`` is ``true``.*
"""
return AssetQuerySession(
self._provider_manager.get_asset_query_session(proxy),
self._config_map) | 0.002793 |
def solve(self, **kwargs):
"""
The kwargs required depend upon the script type.
hash160_lookup:
dict-like structure that returns a secret exponent for a hash160
existing_script:
existing solution to improve upon (optional)
sign_value:
the integer value to sign (derived from the transaction hash)
signature_type:
usually SIGHASH_ALL (1)
"""
# we need a hash160 => secret_exponent lookup
db = kwargs.get("hash160_lookup")
if db is None:
raise SolvingError("missing hash160_lookup parameter")
sign_value = kwargs.get("sign_value")
signature_type = kwargs.get("signature_type")
secs_solved = set()
existing_signatures = []
existing_script = kwargs.get("existing_script")
if existing_script:
pc = 0
opcode, data, pc = tools.get_opcode(existing_script, pc)
# ignore the first opcode
while pc < len(existing_script):
opcode, data, pc = tools.get_opcode(existing_script, pc)
sig_pair, actual_signature_type = parse_signature_blob(data)
for sec_key in self.sec_keys:
try:
public_pair = encoding.sec_to_public_pair(sec_key)
sig_pair, signature_type = parse_signature_blob(data)
v = ecdsa.verify(ecdsa.generator_secp256k1, public_pair, sign_value, sig_pair)
if v:
existing_signatures.append(data)
secs_solved.add(sec_key)
break
except encoding.EncodingError:
# if public_pair is invalid, we just ignore it
pass
for sec_key in self.sec_keys:
if sec_key in secs_solved:
continue
if len(existing_signatures) >= self.n:
break
hash160 = encoding.hash160(sec_key)
result = db.get(hash160)
if result is None:
continue
secret_exponent, public_pair, compressed = result
binary_signature = self._create_script_signature(secret_exponent, sign_value, signature_type)
existing_signatures.append(b2h(binary_signature))
DUMMY_SIGNATURE = "OP_0"
while len(existing_signatures) < self.n:
existing_signatures.append(DUMMY_SIGNATURE)
script = "OP_0 %s" % " ".join(s for s in existing_signatures)
solution = tools.compile(script)
return solution | 0.001226 |
def set_widgets(self):
"""Set widgets on the Extra Keywords tab."""
existing_inasafe_field = self.parent.get_existing_keyword(
'inasafe_fields')
# Remove old container and parameter
if self.parameter_container:
self.kwExtraKeywordsGridLayout.removeWidget(
self.parameter_container)
if self.parameters:
self.parameters = []
# Iterate through all inasafe fields
for inasafe_field in self.inasafe_fields_for_the_layer():
# Option for Not Available
option_list = [no_field]
for field in self.parent.layer.fields():
# Check the field type
if isinstance(inasafe_field['type'], list):
if field.type() in inasafe_field['type']:
field_name = field.name()
option_list.append('%s' % field_name)
else:
if field.type() == inasafe_field['type']:
field_name = field.name()
option_list.append('%s' % field_name)
# If there is no option, pass
if option_list == [no_field]:
continue
# Create SelectParameter
select_parameter = SelectParameter()
select_parameter.guid = inasafe_field['key']
select_parameter.name = inasafe_field['name']
select_parameter.is_required = False
select_parameter.description = inasafe_field['description']
select_parameter.help_text = inasafe_field['help_text']
select_parameter.element_type = str
select_parameter.options_list = option_list
select_parameter.value = no_field
# Check if there is already value in the metadata.
if existing_inasafe_field:
existing_value = existing_inasafe_field.get(
inasafe_field['key'])
if existing_value:
if existing_value in select_parameter.options_list:
select_parameter.value = existing_value
self.parameters.append(select_parameter)
# Create the parameter container and add to the wizard.
self.parameter_container = ParameterContainer(self.parameters)
self.parameter_container.setup_ui()
self.kwExtraKeywordsGridLayout.addWidget(self.parameter_container)
if not self.parameters:
no_field_message = tr(
'There is no available field that has match type for the '
'InaSAFE fields. You can click next.')
self.lblInaSAFEFields.setText(no_field_message) | 0.000736 |
def uploadPackages(self, directory):
"""
Not working. Am not able ot figure out how to upload. IT return status 200OK with this code but do not store the files.
In tomcat.log (IM) there?s a complaint about character encoding whn uploading file. Not sure how to rectiy it in requests post call though
"""
files_to_upload_dict = {}
files_to_upload_list = [ f for f in listdir(directory) if isfile(join(directory,f)) ]
self.logger.debug("uploadPackages(" + "{})".format(directory))
#print "Files to upload:"
for index in range(len(files_to_upload_list)):
self.logger.info(files_to_upload_list[index])
self.uploadFileToIM (directory, files_to_upload_list[index], files_to_upload_list[index]) | 0.015171 |
def _execute_pep8(pep8_options, source):
"""Execute pycodestyle via python method calls."""
class QuietReport(pycodestyle.BaseReport):
"""Version of checker that does not print."""
def __init__(self, options):
super(QuietReport, self).__init__(options)
self.__full_error_results = []
def error(self, line_number, offset, text, check):
"""Collect errors."""
code = super(QuietReport, self).error(line_number,
offset,
text,
check)
if code:
self.__full_error_results.append(
{'id': code,
'line': line_number,
'column': offset + 1,
'info': text})
def full_error_results(self):
"""Return error results in detail.
Results are in the form of a list of dictionaries. Each
dictionary contains 'id', 'line', 'column', and 'info'.
"""
return self.__full_error_results
checker = pycodestyle.Checker('', lines=source, reporter=QuietReport,
**pep8_options)
checker.check_all()
return checker.report.full_error_results() | 0.000732 |
def get_component(self, component_name):
"""
Looks up a component by its name.
Args:
component_name: The name of the component to look up.
Returns:
The component for the provided name or None if there is no such component.
"""
mapping = self.get_components()
return mapping[component_name] if component_name in mapping else None | 0.007335 |
def optimizer_setter(
net, param, value, optimizer_attr='optimizer_', optimizer_name='optimizer'
):
"""Handle setting of optimizer parameters such as learning rate and
parameter group specific parameters such as momentum.
The parameters ``optimizer_attr`` and ``optimizer_name`` can be specified
if there exists more than one optimizer (e.g., in seq2seq models).
"""
if param == 'lr':
param_group = 'all'
param_name = 'lr'
net.lr = value
else:
param_group, param_name = _extract_optimizer_param_name_and_group(
optimizer_name, param)
_set_optimizer_param(
optimizer=getattr(net, optimizer_attr),
param_group=param_group,
param_name=param_name,
value=value
) | 0.003831 |
def get_stub(source, generic=False):
"""Get the stub code for a source code.
:sig: (str, bool) -> str
:param source: Source code to generate the stub for.
:param generic: Whether to produce generic stubs.
:return: Generated stub code.
"""
generator = StubGenerator(source, generic=generic)
stub = generator.generate_stub()
return stub | 0.002695 |
def Parse(self):
"""Parse the file."""
if not self._file:
logging.error("Couldn't open file")
return
# Limit read size to 5MB.
self.input_dat = self._file.read(1024 * 1024 * 5)
if not self.input_dat.startswith(self.FILE_HEADER):
logging.error("Invalid index.dat file %s", self._file)
return
# Events aren't time ordered in the history file, so we collect them all
# then sort.
events = []
for event in self._DoParse():
events.append(event)
for event in sorted(events, key=operator.itemgetter("mtime")):
yield event | 0.011804 |
def require_repeated_start():
"""Enable repeated start conditions for I2C register reads. This is the
normal behavior for I2C, however on some platforms like the Raspberry Pi
there are bugs which disable repeated starts unless explicitly enabled with
this function. See this thread for more details:
http://www.raspberrypi.org/forums/viewtopic.php?f=44&t=15840
"""
plat = Platform.platform_detect()
if plat == Platform.RASPBERRY_PI and os.path.exists('/sys/module/i2c_bcm2708/parameters/combined'):
# On the Raspberry Pi there is a bug where register reads don't send a
# repeated start condition like the kernel smbus I2C driver functions
# define. As a workaround this bit in the BCM2708 driver sysfs tree can
# be changed to enable I2C repeated starts.
subprocess.check_call('chmod 666 /sys/module/i2c_bcm2708/parameters/combined', shell=True)
subprocess.check_call('echo -n 1 > /sys/module/i2c_bcm2708/parameters/combined', shell=True) | 0.004883 |
def decorate(cls, app, *args, run_middleware=False, with_context=False,
**kwargs):
"""
This is a decorator that can be used to apply this plugin to a specific
route/view on your app, rather than the whole app.
:param app:
:type app: Sanic | Blueprint
:param args:
:type args: tuple(Any)
:param run_middleware:
:type run_middleware: bool
:param with_context:
:type with_context: bool
:param kwargs:
:param kwargs: dict(Any)
:return: the decorated route/view
:rtype: fn
"""
from spf.framework import SanicPluginsFramework
spf = SanicPluginsFramework(app) # get the singleton from the app
try:
assoc = spf.register_plugin(cls, skip_reg=True)
except ValueError as e:
# this is normal, if this plugin has been registered previously
assert e.args and len(e.args) > 1
assoc = e.args[1]
(plugin, reg) = assoc
inst = spf.get_plugin(plugin) # plugin may not actually be registered
# registered might be True, False or None at this point
regd = True if inst else None
if regd is True:
# middleware will be run on this route anyway, because the plugin
# is registered on the app. Turn it off on the route-level.
run_middleware = False
req_middleware = deque()
resp_middleware = deque()
if run_middleware:
for i, m in enumerate(plugin._middlewares):
attach_to = m.kwargs.pop('attach_to', 'request')
priority = m.kwargs.pop('priority', 5)
with_context = m.kwargs.pop('with_context', False)
mw_handle_fn = m.middleware
if attach_to == 'response':
relative = m.kwargs.pop('relative', 'post')
if relative == "pre":
mw = (0, 0 - priority, 0 - i, mw_handle_fn,
with_context, m.args, m.kwargs)
else: # relative = "post"
mw = (1, 0 - priority, 0 - i, mw_handle_fn,
with_context, m.args, m.kwargs)
resp_middleware.append(mw)
else: # attach_to = "request"
relative = m.kwargs.pop('relative', 'pre')
if relative == "post":
mw = (1, priority, i, mw_handle_fn, with_context,
m.args, m.kwargs)
else: # relative = "pre"
mw = (0, priority, i, mw_handle_fn, with_context,
m.args, m.kwargs)
req_middleware.append(mw)
req_middleware = tuple(sorted(req_middleware))
resp_middleware = tuple(sorted(resp_middleware))
def _decorator(f):
nonlocal spf, plugin, regd, run_middleware, with_context
nonlocal req_middleware, resp_middleware, args, kwargs
async def wrapper(request, *a, **kw):
nonlocal spf, plugin, regd, run_middleware, with_context
nonlocal req_middleware, resp_middleware, f, args, kwargs
# the plugin was not registered on the app, it might be now
if regd is None:
_inst = spf.get_plugin(plugin)
regd = _inst is not None
context = plugin.get_context_from_spf(spf)
if run_middleware and not regd and len(req_middleware) > 0:
for (_a, _p, _i, handler, with_context, args, kwargs) \
in req_middleware:
if with_context:
resp = handler(request, *args, context=context,
**kwargs)
else:
resp = handler(request, *args, **kwargs)
if isawaitable(resp):
resp = await resp
if resp:
return
response = await plugin.route_wrapper(
f, request, context, a, kw, *args,
with_context=with_context, **kwargs)
if isawaitable(response):
response = await response
if run_middleware and not regd and len(resp_middleware) > 0:
for (_a, _p, _i, handler, with_context, args, kwargs) \
in resp_middleware:
if with_context:
_resp = handler(request, response, *args,
context=context, **kwargs)
else:
_resp = handler(request, response, *args, **kwargs)
if isawaitable(_resp):
_resp = await _resp
if _resp:
response = _resp
break
return response
return update_wrapper(wrapper, f)
return _decorator | 0.000574 |
def get_item(self, obj):
"""Return a result item.
:param obj: Instance object
:returns: a dictionnary with at least `id` and `text` values
"""
return {"id": obj.id, "text": self.get_label(obj), "name": obj.name} | 0.007937 |
def get_v_eff_stress_at_depth(self, y_c):
"""
Determine the vertical effective stress at a single depth z_c.
:param y_c: float, depth from surface
"""
sigma_v_c = self.get_v_total_stress_at_depth(y_c)
pp = self.get_hydrostatic_pressure_at_depth(y_c)
sigma_veff_c = sigma_v_c - pp
return sigma_veff_c | 0.005495 |
def get_developer_certificate(self, developer_certificate_id, authorization, **kwargs): # noqa: E501
"""Fetch an existing developer certificate to connect to the bootstrap server. # noqa: E501
This REST API is intended to be used by customers to fetch an existing developer certificate (a certificate that can be flashed into multiple devices to connect to bootstrap server). **Example usage:** curl -X GET \"http://api.us-east-1.mbedcloud.com/v3/developer-certificates/THE_CERTIFICATE_ID\" -H \"accept: application/json\" -H \"Authorization: Bearer THE_ACCESS_TOKEN\" # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.get_developer_certificate(developer_certificate_id, authorization, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param str developer_certificate_id: A unique identifier for the developer certificate. (required)
:param str authorization: Bearer {Access Token}. (required)
:return: DeveloperCertificateResponseData
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.get_developer_certificate_with_http_info(developer_certificate_id, authorization, **kwargs) # noqa: E501
else:
(data) = self.get_developer_certificate_with_http_info(developer_certificate_id, authorization, **kwargs) # noqa: E501
return data | 0.001211 |
def do_edit_settings(fake):
"""Opens legit settings in editor."""
path = resources.user.open('config.ini').name
click.echo('Legit Settings:\n')
for (option, _, description) in legit_settings.config_defaults:
click.echo(columns([crayons.yellow(option), 25], [description, None]))
click.echo("") # separate settings info from os output
if fake:
click.echo(crayons.red('Faked! >>> edit {}'.format(path)))
else:
click.edit(path) | 0.002079 |
def pointOnCircle(cx, cy, radius, angle):
"""
Calculates the coordinates of a point on a circle given the center point,
radius, and angle.
"""
angle = math.radians(angle) - (math.pi / 2)
x = cx + radius * math.cos(angle)
if x < cx:
x = math.ceil(x)
else:
x = math.floor(x)
y = cy + radius * math.sin(angle)
if y < cy:
y = math.ceil(y)
else:
y = math.floor(y)
return (int(x), int(y)) | 0.002151 |
def distinguish(self, how=True):
"""Distinguishes this thing (POST). Calls :meth:`narwal.Reddit.distinguish`.
:param how: either True, False, or 'admin'
"""
return self._reddit.distinguish(self.name, how=how) | 0.016 |
def _reference_keys(self, reference):
""" Returns a list of all of keys for a given reference.
:param reference: a :string:
:rtype: A :list: of reference keys.
"""
if not isinstance(reference, six.string_types):
raise TypeError(
'When using ~ to reference dynamic attributes ref must be a str. a {0} was provided.'.format(type(reference).__name__)
)
if '~' in reference:
reference = reference[1:]
scheme = self._scheme_references.get(reference)
if not scheme:
# TODO: need to create nice error here as well and print pretty message.
raise LookupError(
"Was unable to find {0} in the scheme references. "
"available references {1}".format(reference, ', '.join(self._scheme_references.keys()))
)
return scheme['keys']
else:
raise AttributeError('references must start with ~. Please update {0} and retry.'.format(reference)) | 0.005607 |
def findbestparams_ho(xsamples):
""" Minimize sum of square differences of H_sho-<H_sho> for timesamples """
return np.abs(leastsq(deltaH_ho,np.array([10.,10.,10.]), Dfun = Jac_deltaH_ho, args=(xsamples,))[0])[:3] | 0.031674 |
def assoc2(self, assets_by_site, assoc_dist, mode, asset_refs):
"""
Associated a list of assets by site to the site collection used
to instantiate GeographicObjects.
:param assets_by_sites: a list of lists of assets
:param assoc_dist: the maximum distance for association
:param mode: 'strict', 'warn' or 'filter'
:param asset_ref: ID of the assets are a list of strings
:returns: filtered site collection, filtered assets by site, discarded
"""
assert mode in 'strict filter', mode
self.objects.filtered # self.objects must be a SiteCollection
asset_dt = numpy.dtype(
[('asset_ref', vstr), ('lon', F32), ('lat', F32)])
assets_by_sid = collections.defaultdict(list)
discarded = []
for assets in assets_by_site:
lon, lat = assets[0].location
obj, distance = self.get_closest(lon, lat)
if distance <= assoc_dist:
# keep the assets, otherwise discard them
assets_by_sid[obj['sids']].extend(assets)
elif mode == 'strict':
raise SiteAssociationError(
'There is nothing closer than %s km '
'to site (%s %s)' % (assoc_dist, lon, lat))
else:
discarded.extend(assets)
sids = sorted(assets_by_sid)
if not sids:
raise SiteAssociationError(
'Could not associate any site to any assets within the '
'asset_hazard_distance of %s km' % assoc_dist)
assets_by_site = [
sorted(assets_by_sid[sid], key=operator.attrgetter('ordinal'))
for sid in sids]
data = [(asset_refs[asset.ordinal],) + asset.location
for asset in discarded]
discarded = numpy.array(data, asset_dt)
return self.objects.filtered(sids), assets_by_site, discarded | 0.00103 |
def is_any_down(self):
"""Is any button depressed?"""
for key in range(len(self.current_state.key_states)):
if self.is_down(key):
return True
return False | 0.009709 |
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks) | 0.003384 |
def _get_threshold(self, graph, benchmark, entry_name):
"""
Compute the regression threshold in asv.conf.json.
"""
if graph.params.get('branch'):
branch_suffix = '@' + graph.params.get('branch')
else:
branch_suffix = ''
max_threshold = None
for regex, threshold in six.iteritems(self.conf.regressions_thresholds):
if re.match(regex, entry_name + branch_suffix):
try:
threshold = float(threshold)
except ValueError:
raise util.UserError("Non-float threshold in asv.conf.json: {!r}".format(threshold))
if max_threshold is None:
max_threshold = threshold
else:
max_threshold = max(threshold, max_threshold)
if max_threshold is None:
max_threshold = 0.05
return max_threshold | 0.004255 |
def color(self):
"""Function color in IDA View"""
color = idc.GetColor(self.ea, idc.CIC_FUNC)
if color == 0xFFFFFFFF:
return None
return color | 0.010695 |
def _handle_request_exception(self, e):
"""This method handle HTTPError exceptions the same as how tornado does,
leave other exceptions to be handled by user defined handler function
maped in class attribute `EXCEPTION_HANDLERS`
Common HTTP status codes:
200 OK
301 Moved Permanently
302 Found
400 Bad Request
401 Unauthorized
403 Forbidden
404 Not Found
405 Method Not Allowed
500 Internal Server Error
It is suggested only to use above HTTP status codes
"""
handle_func = self._exception_default_handler
if self.EXCEPTION_HANDLERS:
for excs, func_name in self.EXCEPTION_HANDLERS.items():
if isinstance(e, excs):
handle_func = getattr(self, func_name)
break
handle_func(e)
if not self._finished:
self.finish() | 0.003058 |
def iterate(self, max_iter=None):
"""Yields items from the mux, and handles stream exhaustion and
replacement.
"""
if max_iter is None:
max_iter = np.inf
# Calls Streamer's __enter__, which calls activate()
with self as active_mux:
# Main sampling loop
n = 0
while n < max_iter and active_mux._streamers_available():
# Pick a stream from the active set
idx = active_mux._next_sample_index()
# Can we sample from it?
try:
# Then yield the sample
yield six.advance_iterator(active_mux.streams_[idx])
# Increment the sample counter
n += 1
active_mux.stream_counts_[idx] += 1
except StopIteration:
# Oops, this stream is exhausted.
# Call child-class exhausted-stream behavior
active_mux._on_stream_exhausted(idx)
# Setup a new stream for this index
active_mux._replace_stream(idx) | 0.001724 |
def _init_actions(self, create_standard_actions):
""" Init context menu action """
menu_advanced = QtWidgets.QMenu(_('Advanced'))
self.add_menu(menu_advanced)
self._sub_menus = {
'Advanced': menu_advanced
}
if create_standard_actions:
# Undo
action = QtWidgets.QAction(_('Undo'), self)
action.setShortcut('Ctrl+Z')
action.setIcon(icons.icon(
'edit-undo', ':/pyqode-icons/rc/edit-undo.png', 'fa.undo'))
action.triggered.connect(self.undo)
self.undoAvailable.connect(action.setVisible)
action.setVisible(False)
self.add_action(action, sub_menu=None)
self.action_undo = action
# Redo
action = QtWidgets.QAction(_('Redo'), self)
action.setShortcut('Ctrl+Y')
action.setIcon(icons.icon(
'edit-redo', ':/pyqode-icons/rc/edit-redo.png', 'fa.repeat'))
action.triggered.connect(self.redo)
self.redoAvailable.connect(action.setVisible)
action.setVisible(False)
self.add_action(action, sub_menu=None)
self.action_redo = action
# Copy
action = QtWidgets.QAction(_('Copy'), self)
action.setShortcut(QtGui.QKeySequence.Copy)
action.setIcon(icons.icon(
'edit-copy', ':/pyqode-icons/rc/edit-copy.png', 'fa.copy'))
action.triggered.connect(self.copy)
self.add_action(action, sub_menu=None)
self.action_copy = action
# cut
action = QtWidgets.QAction(_('Cut'), self)
action.setShortcut(QtGui.QKeySequence.Cut)
action.setIcon(icons.icon(
'edit-cut', ':/pyqode-icons/rc/edit-cut.png', 'fa.cut'))
action.triggered.connect(self.cut)
self.add_action(action, sub_menu=None)
self.action_cut = action
# paste
action = QtWidgets.QAction(_('Paste'), self)
action.setShortcut(QtGui.QKeySequence.Paste)
action.setIcon(icons.icon(
'edit-paste', ':/pyqode-icons/rc/edit-paste.png',
'fa.paste'))
action.triggered.connect(self.paste)
self.add_action(action, sub_menu=None)
self.action_paste = action
# duplicate line
action = QtWidgets.QAction(_('Duplicate line'), self)
action.setShortcut('Ctrl+D')
action.triggered.connect(self.duplicate_line)
self.add_action(action, sub_menu=None)
self.action_duplicate_line = action
# swap line up
action = QtWidgets.QAction(_('Swap line up'), self)
action.setShortcut("Alt++")
action.triggered.connect(self.swapLineUp)
self.add_action(action, sub_menu=None)
self.action_swap_line_up = action
# swap line down
action = QtWidgets.QAction(_('Swap line down'), self)
action.setShortcut("Alt+-")
action.triggered.connect(self.swapLineDown)
self.add_action(action, sub_menu=None)
self.action_swap_line_down = action
# select all
action = QtWidgets.QAction(_('Select all'), self)
action.setShortcut(QtGui.QKeySequence.SelectAll)
action.triggered.connect(self.selectAll)
self.action_select_all = action
self.add_action(self.action_select_all, sub_menu=None)
self.add_separator(sub_menu=None)
if create_standard_actions:
# indent
action = QtWidgets.QAction(_('Indent'), self)
action.setShortcut('Tab')
action.setIcon(icons.icon(
'format-indent-more',
':/pyqode-icons/rc/format-indent-more.png', 'fa.indent'))
action.triggered.connect(self.indent)
self.add_action(action)
self.action_indent = action
# unindent
action = QtWidgets.QAction(_('Un-indent'), self)
action.setShortcut('Shift+Tab')
action.setIcon(icons.icon(
'format-indent-less',
':/pyqode-icons/rc/format-indent-less.png', 'fa.dedent'))
action.triggered.connect(self.un_indent)
self.add_action(action)
self.action_un_indent = action
self.add_separator()
# goto
action = QtWidgets.QAction(_('Go to line'), self)
action.setShortcut('Ctrl+G')
action.setIcon(icons.icon(
'go-jump', ':/pyqode-icons/rc/goto-line.png', 'fa.share'))
action.triggered.connect(self.goto_line)
self.add_action(action)
self.action_goto_line = action | 0.000424 |
def make_directory(self, directory_name, *args, **kwargs):
""" :meth:`.WNetworkClientProto.make_directory` method implementation
"""
previous_path = self.session_path()
try:
self.session_path(directory_name)
os.mkdir(self.full_path())
finally:
self.session_path(previous_path) | 0.033898 |
def demo(args):
"""
%prog demo
Draw sample gene features to illustrate the various fates of duplicate
genes - to be used in a book chapter.
"""
p = OptionParser(demo.__doc__)
opts, args = p.parse_args(args)
fig = plt.figure(1, (8, 5))
root = fig.add_axes([0, 0, 1, 1])
panel_space = .23
dup_space = .025
# Draw a gene and two regulatory elements at these arbitrary locations
locs = [(.5, .9), # ancestral gene
(.5, .9 - panel_space + dup_space), # identical copies
(.5, .9 - panel_space - dup_space),
(.5, .9 - 2 * panel_space + dup_space), # degenerate copies
(.5, .9 - 2 * panel_space - dup_space),
(.2, .9 - 3 * panel_space + dup_space), # sub-functionalization
(.2, .9 - 3 * panel_space - dup_space),
(.5, .9 - 3 * panel_space + dup_space), # neo-functionalization
(.5, .9 - 3 * panel_space - dup_space),
(.8, .9 - 3 * panel_space + dup_space), # non-functionalization
(.8, .9 - 3 * panel_space - dup_space),
]
default_regulator = "gm"
regulators = [default_regulator,
default_regulator, default_regulator,
"wm", default_regulator,
"wm", "gw",
"wb", default_regulator,
"ww", default_regulator,
]
width = .24
for i, (xx, yy) in enumerate(locs):
regulator = regulators[i]
x1, x2 = xx - .5 * width, xx + .5 * width
Glyph(root, x1, x2, yy)
if i == 9: # upper copy for non-functionalization
continue
# coding region
x1, x2 = xx - .16 * width, xx + .45 * width
Glyph(root, x1, x2, yy, fc="k")
# two regulatory elements
x1, x2 = xx - .4 * width, xx - .28 * width
for xx, fc in zip((x1, x2), regulator):
if fc == 'w':
continue
DoubleCircle(root, xx, yy, fc=fc)
rotation = 30
tip = .02
if i == 0:
ya = yy + tip
root.text(x1, ya, "Flower", rotation=rotation, va="bottom")
root.text(x2, ya, "Root", rotation=rotation, va="bottom")
elif i == 7:
ya = yy + tip
root.text(x2, ya, "Leaf", rotation=rotation, va="bottom")
# Draw arrows between panels (center)
arrow_dist = .08
ar_xpos = .5
for ar_ypos in (.3, .53, .76):
root.annotate(" ", (ar_xpos, ar_ypos),
(ar_xpos, ar_ypos + arrow_dist),
arrowprops=arrowprops)
ar_ypos = .3
for ar_xpos in (.2, .8):
root.annotate(" ", (ar_xpos, ar_ypos),
(.5, ar_ypos + arrow_dist),
arrowprops=arrowprops)
# Duplication, Degeneration
xx = .6
ys = (.76, .53)
processes = ("Duplication", "Degeneration")
for yy, process in zip(ys, processes):
root.text(xx, yy + .02, process, fontweight="bold")
# Label of fates
xs = (.2, .5, .8)
fates = ("Subfunctionalization", "Neofunctionalization",
"Nonfunctionalization")
yy = .05
for xx, fate in zip(xs, fates):
RoundLabel(root, xx, yy, fate)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
figname = "demo.pdf"
savefig(figname, dpi=300) | 0.004825 |
def get_page_text(self, project, wiki_identifier, path=None, recursion_level=None, version_descriptor=None, include_content=None, **kwargs):
"""GetPageText.
[Preview API] Gets metadata or content of the wiki page for the provided path. Content negotiation is done based on the `Accept` header sent in the request.
:param str project: Project ID or project name
:param str wiki_identifier: Wiki Id or name.
:param str path: Wiki page path.
:param str recursion_level: Recursion level for subpages retrieval. Defaults to `None` (Optional).
:param :class:`<GitVersionDescriptor> <azure.devops.v5_1.wiki.models.GitVersionDescriptor>` version_descriptor: GitVersionDescriptor for the page. Defaults to the default branch (Optional).
:param bool include_content: True to include the content of the page in the response for Json content type. Defaults to false (Optional)
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if wiki_identifier is not None:
route_values['wikiIdentifier'] = self._serialize.url('wiki_identifier', wiki_identifier, 'str')
query_parameters = {}
if path is not None:
query_parameters['path'] = self._serialize.query('path', path, 'str')
if recursion_level is not None:
query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str')
if version_descriptor is not None:
if version_descriptor.version_type is not None:
query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type
if version_descriptor.version is not None:
query_parameters['versionDescriptor.version'] = version_descriptor.version
if version_descriptor.version_options is not None:
query_parameters['versionDescriptor.versionOptions'] = version_descriptor.version_options
if include_content is not None:
query_parameters['includeContent'] = self._serialize.query('include_content', include_content, 'bool')
response = self._send(http_method='GET',
location_id='25d3fbc7-fe3d-46cb-b5a5-0b6f79caf27b',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback) | 0.005751 |
def get_project_export(self, project_id):
""" Get project info for export """
try:
result = self._request('/getprojectexport/',
{'projectid': project_id})
return TildaProject(**result)
except NetworkError:
return [] | 0.006452 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.