text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def simulate(s0, transmat, steps=1):
"""Simulate the next state
Parameters
----------
s0 : ndarray
Vector with state variables at t=0
transmat : ndarray
The estimated transition/stochastic matrix.
steps : int
(Default: 1) The number of steps to simulate model outputs ahead.
If steps>1 the a Mult-Step Simulation is triggered.
Returns
-------
out : ndarray
(steps=1) Vector with simulated state variables ().
(steps>1) Matrix with out[:,step] columns (Fortran order) from a
Multi-Step Simulation. The first column is the initial state
vector out[:,0]=s0 for algorithmic reasons.
"""
# Single-Step simulation
if steps == 1:
return np.dot(s0, transmat)
# Multi-Step simulation
out = np.zeros(shape=(steps + 1, len(s0)), order='C')
out[0, :] = s0
for i in range(1, steps + 1):
out[i, :] = np.dot(out[i - 1, :], transmat)
return out | 0.001016 |
def tcc(text: str) -> str:
"""
TCC generator, generates Thai Character Clusters
:param str text: text to be tokenized to character clusters
:return: subword (character cluster)
"""
if not text or not isinstance(text, str):
return ""
p = 0
while p < len(text):
m = PAT_TCC.match(text[p:])
if m:
n = m.span()[1]
else:
n = 1
yield text[p : p + n]
p += n | 0.004396 |
def unweave(iterable, n=2):
r"""Divide `iterable` in `n` lists, so that every `n`th element belongs to
list `n`.
Example:
>>> unweave((1,2,3,4,5), 3)
[[1, 4], [2, 5], [3]]
"""
res = [[] for i in range(n)]
i = 0
for x in iterable:
res[i % n].append(x)
i += 1
return res | 0.003067 |
def _factory_default(self, confirm=False):
"""Resets the device to factory defaults.
:param confirm: This function should not normally be used, to prevent
accidental resets, a confirm value of `True` must be used.
"""
if confirm is True:
self._write(('DFLT', Integer), 99)
else:
raise ValueError('Reset to factory defaults was not confirmed.') | 0.004751 |
def _download_tlds_list(self):
"""
Function downloads list of TLDs from IANA.
LINK: https://data.iana.org/TLD/tlds-alpha-by-domain.txt
:return: True if list was downloaded, False in case of an error
:rtype: bool
"""
url_list = 'https://data.iana.org/TLD/tlds-alpha-by-domain.txt'
# Default cache file exist (set by _default_cache_file)
# and we want to write permission
if self._default_cache_file and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.info("Default cache file is not writable.")
self._tld_list_path = self._get_cache_file_path()
self._logger.info(
"Changed path of cache file to: %s",
self._tld_list_path
)
if os.access(self._tld_list_path, os.F_OK) and \
not os.access(self._tld_list_path, os.W_OK):
self._logger.error("ERROR: Cache file is not writable for current "
"user. ({})".format(self._tld_list_path))
return False
req = urllib.request.Request(url_list)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.0; '
'WOW64; rv:24.0) Gecko/20100101 '
'Firefox/24.0')
with open(self._tld_list_path, 'w') as ftld:
try:
with urllib.request.urlopen(req) as f:
page = f.read().decode('utf-8')
ftld.write(page)
except HTTPError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(HTTPError: {})".format(e.reason))
return False
except URLError as e:
self._logger.error("ERROR: Can not download list ot TLDs. "
"(URLError: {})".format(e.reason))
return False
return True | 0.000999 |
def create_sonos_playlist(self, title):
"""Create a new empty Sonos playlist.
Args:
title: Name of the playlist
:rtype: :py:class:`~.soco.data_structures.DidlPlaylistContainer`
"""
response = self.avTransport.CreateSavedQueue([
('InstanceID', 0),
('Title', title),
('EnqueuedURI', ''),
('EnqueuedURIMetaData', ''),
])
item_id = response['AssignedObjectID']
obj_id = item_id.split(':', 2)[1]
uri = "file:///jffs/settings/savedqueues.rsq#{0}".format(obj_id)
res = [DidlResource(uri=uri, protocol_info="x-rincon-playlist:*:*:*")]
return DidlPlaylistContainer(
resources=res, title=title, parent_id='SQ:', item_id=item_id) | 0.002558 |
def NodeDriver_wait_until_running(self, node, wait_period=3, timeout=600,
ssh_interface='public_ips', force_ipv4=True):
"""
Block until node is fully booted and has an IP address assigned.
@keyword node: Node instance.
@type node: C{Node}
@keyword wait_period: How many seconds to between each loop
iteration (default is 3)
@type wait_period: C{int}
@keyword timeout: How many seconds to wait before timing out
(default is 600)
@type timeout: C{int}
@keyword ssh_interface: The interface to wait for
(default is 'public_ips')
@type ssh_interface: C{str}
@keyword force_ipv4: Ignore ipv6 IP addresses (default is True)
@type force_ipv4: C{bool}
@return: C{(Node, ip_addresses)} tuple of Node instance and
list of ip_address on success.
"""
start = time.time()
end = start + timeout
def is_supported(address):
"""Return True for supported address"""
if force_ipv4 and not len(address.split('.')) == 4:
return False
return True
def filter_addresses(addresses):
"""Return list of supported addresses"""
return [a for a in addresses if is_supported(a)]
while time.time() < end:
nodes = [n for n in self.list_nodes() if n.uuid == node.uuid]
if len(nodes) > 1:
raise LibcloudError(value=('Booted single node[%s], ' % node
+ 'but multiple nodes have same UUID'),
driver=self)
if len(nodes) == 1:
print nodes[0].state, getattr(nodes[0], ssh_interface)
if len(nodes) == 1 and filter_addresses(getattr(nodes[0], ssh_interface)) \
and nodes[0].state == NodeState.RUNNING:
return nodes[0], filter_addresses(getattr(nodes[0], ssh_interface))
else:
time.sleep(wait_period)
continue
raise LibcloudError(value='Timed out after %s seconds' % (timeout),
driver=self) | 0.000921 |
def subvolume_delete(name=None, names=None, commit=None):
'''
Delete the subvolume(s) from the filesystem
The user can remove one single subvolume (name) or multiple of
then at the same time (names). One of the two parameters needs to
specified.
Please, refer to the documentation to understand the implication
on the transactions, and when the subvolume is really deleted.
Return True if the subvolume is deleted, False is the subvolume
was already missing.
name
Name of the subvolume to remove
names
List of names of subvolumes to remove
commit
* 'after': Wait for transaction commit at the end
* 'each': Wait for transaction commit after each delete
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_delete /var/volumes/tmp
salt '*' btrfs.subvolume_delete /var/volumes/tmp commit=after
'''
if not name and not (names and type(names) is list):
raise CommandExecutionError('Provide a value for the name parameter')
if commit and commit not in ('after', 'each'):
raise CommandExecutionError('Value for commit not recognized')
# Filter the names and take the ones that are still there
names = [n for n in itertools.chain([name], names or [])
if n and subvolume_exists(n)]
# If the subvolumes are gone, we are done
if not names:
return False
cmd = ['btrfs', 'subvolume', 'delete']
if commit == 'after':
cmd.append('--commit-after')
elif commit == 'each':
cmd.append('--commit-each')
cmd.extend(names)
res = __salt__['cmd.run_all'](cmd)
salt.utils.fsutils._verify_run(res)
return True | 0.000582 |
def getcwd(fs_encoding=FS_ENCODING, cwd_fnc=os.getcwd):
'''
Get current work directory's absolute path.
Like os.getcwd but garanteed to return an unicode-str object.
:param fs_encoding: filesystem encoding, defaults to autodetected
:type fs_encoding: str
:param cwd_fnc: callable used to get the path, defaults to os.getcwd
:type cwd_fnc: Callable
:return: path
:rtype: str
'''
path = fsdecode(cwd_fnc(), fs_encoding=fs_encoding)
return os.path.abspath(path) | 0.001972 |
def _checkin_remote_bundle(self, remote, ref):
"""
Checkin a remote bundle from a remote
:param remote: a Remote object
:param ref: Any bundle reference
:return: The vid of the loaded bundle
"""
from ambry.bundle.process import call_interval
from ambry.orm.exc import NotFoundError
from ambry.orm import Remote
from ambry.util.flo import copy_file_or_flo
from tempfile import NamedTemporaryFile
assert isinstance(remote, Remote)
@call_interval(5)
def cb(r, total):
self.logger.info("{}: Downloaded {} bytes".format(ref, total))
b = None
try:
b = self.bundle(ref)
self.logger.info("{}: Already installed".format(ref))
vid = b.identity.vid
except NotFoundError:
self.logger.info("{}: Syncing".format(ref))
db_dir = self.filesystem.downloads('bundles')
db_f = os.path.join(db_dir, ref) #FIXME. Could get multiple versions of same file. ie vid and vname
if not os.path.exists(os.path.join(db_dir, db_f)):
self.logger.info("Downloading bundle '{}' to '{}".format(ref, db_f))
with open(db_f, 'wb') as f_out:
with remote.checkout(ref) as f:
copy_file_or_flo(f, f_out, cb=cb)
f_out.flush()
self.checkin_bundle(db_f)
b = self.bundle(ref) # Should exist now.
b.dataset.data['remote_name'] = remote.short_name
b.dataset.upstream = remote.url
b.dstate = b.STATES.CHECKEDOUT
b.commit()
finally:
if b:
b.progress.close()
vid = b.identity.vid
return vid | 0.003313 |
def _process_stockprop(self, limit):
"""
This will add depiction association between a strain and
images hosted at flybase.
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
raw = '/'.join((self.rawdir, 'stockprop'))
LOG.info("processing stock-image depictions")
line_counter = 0
with open(raw, 'r') as f:
f.readline() # read the header row; skip
filereader = csv.reader(f, delimiter='\t', quotechar='\"')
for line in filereader:
# skip comments
if re.match(r'#', ''.join(line)) or ''.join(line) == '':
continue
(stockprop_id, stock_id, cvterm, value, rank) = line
line_counter += 1
if self.test_mode and self.test_keys['strain'] is not None \
and int(stock_id) not in self.test_keys['strain']:
continue
sid = self.idhash['stock'].get(stock_id)
# linked_image
if cvterm == "linked_image" and re.match(r'FBim', value):
# FIXME make sure this image url is perm
image_url = 'http://flybase.org/tmp-shared/reports/'+value+'.png'
if sid is not None:
model.addDepiction(sid, image_url)
# TODO should this be a Reference object?
# TODO add the stockprop_pub table when there is data to pull
if not self.test_mode and limit is not None and line_counter > limit:
break
return | 0.00227 |
def _parse_fc(self, f, natom, dim):
"""Parse force constants part
Physical unit of force cosntants in the file is Ry/au^2.
"""
ndim = np.prod(dim)
fc = np.zeros((natom, natom * ndim, 3, 3), dtype='double', order='C')
for k, l, i, j in np.ndindex((3, 3, natom, natom)):
line = f.readline()
for i_dim in range(ndim):
line = f.readline()
# fc[i, j * ndim + i_dim, k, l] = float(line.split()[3])
fc[j, i * ndim + i_dim, l, k] = float(line.split()[3])
return fc | 0.003407 |
def magic_memit(self, line=''):
"""Measure memory usage of a Python statement
Usage, in line mode:
%memit [-r<R>t<T>i<I>] statement
Options:
-r<R>: repeat the loop iteration <R> times and take the best result.
Default: 1
-t<T>: timeout after <T> seconds. Default: None
-i<I>: Get time information at an interval of I times per second.
Defaults to 0.1 so that there is ten measurements per second.
-c: If present, add the memory usage of any children process to the report.
Examples
--------
::
In [1]: import numpy as np
In [2]: %memit np.zeros(1e7)
maximum of 1: 76.402344 MiB per loop
In [3]: %memit np.ones(1e6)
maximum of 1: 7.820312 MiB per loop
In [4]: %memit -r 10 np.empty(1e8)
maximum of 10: 0.101562 MiB per loop
"""
opts, stmt = self.parse_options(line, 'r:t:i:c', posix=False, strict=False)
repeat = int(getattr(opts, 'r', 1))
if repeat < 1:
repeat == 1
timeout = int(getattr(opts, 't', 0))
if timeout <= 0:
timeout = None
interval = float(getattr(opts, 'i', 0.1))
include_children = 'c' in opts
# I've noticed we get less noisier measurements if we run
# a garbage collection first
import gc
gc.collect()
mem_usage = 0
counter = 0
baseline = memory_usage()[0]
while counter < repeat:
counter += 1
tmp = memory_usage((_func_exec, (stmt, self.shell.user_ns)),
timeout=timeout, interval=interval, max_usage=True,
include_children=include_children)
mem_usage = max(mem_usage, tmp[0])
if mem_usage:
print('peak memory: %.02f MiB, increment: %.02f MiB' %
(mem_usage, mem_usage - baseline))
else:
print('ERROR: could not read memory usage, try with a lower interval '
'or more iterations') | 0.000521 |
def store(self, extractions: List[Extraction], attribute: str, group_by_tags: bool = True) -> None:
"""
Records extractions in the container, and for each individual extraction inserts a
ProvenanceRecord to record where the extraction is stored.
Records the "output_segment" in the provenance.
Extractions are always recorded in a list.
Errors out if the segment is primitive, such as a string.
Args:
extractions (List[Extraction]):
attribute (str): where to store the extractions.
group_by_tags (bool): Set to True to use tags as sub-keys, and values of Extractions
with the same tag will be stored in a list as the value of the corresponding key.
(if none of the Extractions has 'tag', do not group by tags)
Returns:
"""
if not isinstance(self._value, dict):
raise StoreExtractionError("segment is type: " + str(type(self._value)))
if not len(extractions):
return
if group_by_tags:
try:
next(x for x in extractions if x.tag) # if there is at least one extraction with a tag
if attribute not in self._extractions:
self._extractions[attribute] = set([])
self._value[attribute] = {}
extraction_provenances = {}
for e in extractions:
tag = e.tag if e.tag else 'NO_TAGS'
if tag not in self.value[attribute]:
self.value[attribute][tag] = [e.value]
else:
if e.value not in self.value[attribute][tag]:
self.value[attribute][tag].append(e.value)
# TODO: handle provenance of non literals
if isinstance(e.value, Number) or isinstance(e.value, str):
extraction_provenances[e.value] = e.prov_id
self._extractions[attribute] = self._extractions[attribute].union(extractions)
new_id = self._document.provenance_id_index # for the purpose of provenance hierarrchy tracking
storage_provenance_record: StorageProvenanceRecord = StorageProvenanceRecord(new_id, self.json_path,
attribute,
extraction_provenances,
self.document)
self._document.provenance_id_index_incrementer()
self._document.provenances[new_id] = storage_provenance_record
self.create_storage_provenance(storage_provenance_record)
return
except StopIteration:
pass
if attribute not in self._extractions:
self._extractions[attribute] = set([])
self._value[attribute] = list()
self._extractions[attribute] = self._extractions[attribute].union(extractions)
extraction_provenances = dict()
for a_extraction in extractions:
# TODO: handle provenance of non literals
if isinstance(a_extraction.value, Number) or isinstance(a_extraction.value, str):
extraction_provenances[a_extraction.value] = a_extraction.prov_id
if a_extraction.value not in self._value[attribute]:
self._value[attribute].append(a_extraction.value)
new_id = self._document.provenance_id_index # for the purpose of provenance hierarchy tracking
storage_provenance_record: StorageProvenanceRecord = StorageProvenanceRecord(new_id, self.json_path, attribute,
extraction_provenances,
self.document)
self._document.provenance_id_index_incrementer()
self._document.provenances[new_id] = storage_provenance_record
self.create_storage_provenance(storage_provenance_record) | 0.004945 |
def bind(self, handler, argspec):
"""
:param handler: a function with
:param argspec:
:return:
"""
self.handlers[argspec.key].append((handler, argspec)) | 0.01 |
def add_leaf(self, value, do_hash=False):
"""
Add a leaf to the tree.
:param value: hash value (as a Buffer) or hex string
:param do_hash: whether to hash value
"""
self.tree['is_ready'] = False
self._add_leaf(value, do_hash) | 0.007117 |
def approximate_density(
dist,
xloc,
parameters=None,
cache=None,
eps=1.e-7
):
"""
Approximate the probability density function.
Args:
dist : Dist
Distribution in question. May not be an advanced variable.
xloc : numpy.ndarray
Location coordinates. Requires that xloc.shape=(len(dist), K).
eps : float
Acceptable error level for the approximations
retall : bool
If True return Graph with the next calculation state with the
approximation.
Returns:
numpy.ndarray: Local probability density function with
``out.shape == xloc.shape``. To calculate actual density function,
evaluate ``numpy.prod(out, 0)``.
Example:
>>> distribution = chaospy.Normal(1000, 10)
>>> xloc = numpy.array([[990, 1000, 1010]])
>>> print(numpy.around(approximate_density(distribution, xloc), 4))
[[0.0242 0.0399 0.0242]]
>>> print(numpy.around(distribution.pdf(xloc), 4))
[[0.0242 0.0399 0.0242]]
"""
if parameters is None:
parameters = dist.prm.copy()
if cache is None:
cache = {}
xloc = numpy.asfarray(xloc)
lo, up = numpy.min(xloc), numpy.max(xloc)
mu = .5*(lo+up)
eps = numpy.where(xloc < mu, eps, -eps)*xloc
floc = evaluation.evaluate_forward(
dist, xloc, parameters=parameters.copy(), cache=cache.copy())
for d in range(len(dist)):
xloc[d] += eps[d]
tmp = evaluation.evaluate_forward(
dist, xloc, parameters=parameters.copy(), cache=cache.copy())
floc[d] -= tmp[d]
xloc[d] -= eps[d]
floc = numpy.abs(floc / eps)
return floc | 0.000571 |
def load_config_from_files(filenames=None):
"""Load D-Wave Cloud Client configuration from a list of files.
.. note:: This method is not standardly used to set up D-Wave Cloud Client configuration.
It is recommended you use :meth:`.Client.from_config` or
:meth:`.config.load_config` instead.
Configuration files comply with standard Windows INI-like format,
parsable with Python's :mod:`configparser`. A section called
``defaults`` contains default values inherited by other sections.
Each filename in the list (each configuration file loaded) progressively upgrades
the final configuration, on a key by key basis, per each section.
Args:
filenames (list[str], default=None):
D-Wave Cloud Client configuration files (paths and names).
If ``None``, searches for a configuration file named ``dwave.conf``
in all system-wide configuration directories, in the user-local
configuration directory, and in the current working directory,
following the user/system configuration paths of :func:`get_configfile_paths`.
Returns:
:obj:`~configparser.ConfigParser`:
:class:`dict`-like mapping of configuration sections (profiles) to
mapping of per-profile keys holding values.
Raises:
:exc:`~dwave.cloud.exceptions.ConfigFileReadError`:
Config file specified or detected could not be opened or read.
:exc:`~dwave.cloud.exceptions.ConfigFileParseError`:
Config file parse failed.
Examples:
This example loads configurations from two files. One contains a default
section with key/values that are overwritten by any profile section that
contains that key/value; for example, profile dw2000b in file dwave_b.conf
overwrites the default URL and client type, which profile dw2000a inherits
from the defaults section, while profile dw2000a overwrites the API token that
profile dw2000b inherits.
The files, which are located in the current working directory, are
(1) dwave_a.conf::
[defaults]
endpoint = https://url.of.some.dwavesystem.com/sapi
client = qpu
token = ABC-123456789123456789123456789
[dw2000a]
solver = EXAMPLE_2000Q_SYSTEM
token = DEF-987654321987654321987654321
and (2) dwave_b.conf::
[dw2000b]
endpoint = https://url.of.some.other.dwavesystem.com/sapi
client = sw
solver = EXAMPLE_2000Q_SYSTEM
The following example code loads configuration from both these files, with
the defined overrides and inheritance.
.. code:: python
>>> import dwave.cloud as dc
>>> import sys
>>> configuration = dc.config.load_config_from_files(["./dwave_a.conf", "./dwave_b.conf"]) # doctest: +SKIP
>>> configuration.write(sys.stdout) # doctest: +SKIP
[defaults]
endpoint = https://url.of.some.dwavesystem.com/sapi
client = qpu
token = ABC-123456789123456789123456789
[dw2000a]
solver = EXAMPLE_2000Q_SYSTEM
token = DEF-987654321987654321987654321
[dw2000b]
endpoint = https://url.of.some.other.dwavesystem.com/sapi
client = sw
solver = EXAMPLE_2000Q_SYSTEM
"""
if filenames is None:
filenames = get_configfile_paths()
config = configparser.ConfigParser(default_section="defaults")
for filename in filenames:
try:
with open(filename, 'r') as f:
config.read_file(f, filename)
except (IOError, OSError):
raise ConfigFileReadError("Failed to read {!r}".format(filename))
except configparser.Error:
raise ConfigFileParseError("Failed to parse {!r}".format(filename))
return config | 0.002757 |
def del_stmt(self, stmt_loc, exprs):
# Python uses exprlist here, but does *not* obey the usual
# tuple-wrapping semantics, so we embed the rule directly.
"""del_stmt: 'del' exprlist"""
return ast.Delete(targets=[self._assignable(expr, is_delete=True) for expr in exprs],
loc=stmt_loc.join(exprs[-1].loc), keyword_loc=stmt_loc) | 0.015584 |
def _fixIndex(self, index, truncate=False):
"""
@param truncate: If true, negative indices which go past the
beginning of the list will be evaluated as zero.
For example::
>>> L = List([1,2,3,4,5])
>>> len(L)
5
>>> L._fixIndex(-9, truncate=True)
0
"""
assert not isinstance(index, slice), 'slices are not supported (yet)'
if index < 0:
index += self.length
if index < 0:
if not truncate:
raise IndexError('stored List index out of range')
else:
index = 0
return index | 0.002608 |
def paint(self):
"""
Renders a javascript snippet suitable for use as a mapbox-gl fill paint entry
Returns:
A dict that can be converted to a mapbox-gl javascript paint snippet
"""
snippet = {
'fill-opacity': VectorStyle.get_style_value(self.opacity),
'fill-color': VectorStyle.get_style_value(self.color),
'fill-outline-color': VectorStyle.get_style_value(self.outline_color)
}
if self.translate:
snippet['fill-translate'] = self.translate
return snippet | 0.008606 |
def paramiko_tunnel(lport, rport, server, remoteip='127.0.0.1', keyfile=None, password=None, timeout=60):
"""launch a tunner with paramiko in a subprocess. This should only be used
when shell ssh is unavailable (e.g. Windows).
This creates a tunnel redirecting `localhost:lport` to `remoteip:rport`,
as seen from `server`.
If you are familiar with ssh tunnels, this creates the tunnel:
ssh server -L localhost:lport:remoteip:rport
keyfile and password may be specified, but ssh config is checked for defaults.
Parameters
----------
lport : int
local port for connecting to the tunnel from this machine.
rport : int
port on the remote machine to connect to.
server : str
The ssh server to connect to. The full ssh server string will be parsed.
user@server:port
remoteip : str [Default: 127.0.0.1]
The remote ip, specifying the destination of the tunnel.
Default is localhost, which means that the tunnel would redirect
localhost:lport on this machine to localhost:rport on the *server*.
keyfile : str; path to public key file
This specifies a key to be used in ssh login, default None.
Regular default ssh keys will be used without specifying this argument.
password : str;
Your ssh password to the ssh server. Note that if this is left None,
you will be prompted for it if passwordless key based login is unavailable.
timeout : int [default: 60]
The time (in seconds) after which no activity will result in the tunnel
closing. This prevents orphaned tunnels from running forever.
"""
if paramiko is None:
raise ImportError("Paramiko not available")
if password is None:
if not _try_passwordless_paramiko(server, keyfile):
password = getpass("%s's password: "%(server))
p = Process(target=_paramiko_tunnel,
args=(lport, rport, server, remoteip),
kwargs=dict(keyfile=keyfile, password=password))
p.daemon=False
p.start()
atexit.register(_shutdown_process, p)
return p | 0.005432 |
def stringify(element, newlines=True):
"""
Return the raw text version of an elements (and its children element).
Example:
>>> from panflute import *
>>> e1 = Emph(Str('Hello'), Space, Str('world!'))
>>> e2 = Strong(Str('Bye!'))
>>> para = Para(e1, Space, e2)
>>> stringify(para)
'Hello world! Bye!\n\n'
:param newlines: add a new line after a paragraph (default True)
:type newlines: :class:`bool`
:rtype: :class:`str`
"""
def attach_str(e, doc, answer):
if hasattr(e, 'text'):
ans = e.text
elif isinstance(e, HorizontalSpaces):
ans = ' '
elif isinstance(e, VerticalSpaces) and newlines:
ans = '\n\n'
elif type(e) == Citation:
ans = ''
else:
ans = ''
answer.append(ans)
answer = []
f = partial(attach_str, answer=answer)
element.walk(f)
return ''.join(answer) | 0.001032 |
def which(program, environ=None):
"""
Find out if an executable exists in the supplied PATH.
If so, the absolute path to the executable is returned.
If not, an exception is raised.
:type string
:param program: Executable to be checked for
:param dict
:param environ: Any additional ENV variables required, specifically PATH
:return string|:class:`command.CommandException` Returns the location if found, otherwise raises exception
"""
def is_exe(path):
"""
Helper method to check if a file exists and is executable
"""
return isfile(path) and os.access(path, os.X_OK)
if program is None:
raise CommandException("Invalid program name passed")
fpath, fname = split(program)
if fpath:
if is_exe(program):
return program
else:
if environ is None:
environ = os.environ
for path in environ['PATH'].split(os.pathsep):
exe_file = join(path, program)
if is_exe(exe_file):
return exe_file
raise CommandException("Could not find %s" % program) | 0.001768 |
def delete(id):
"""Delete a post.
Ensures that the post exists and that the logged in user is the
author of the post.
"""
post = get_post(id)
db.session.delete(post)
db.session.commit()
return redirect(url_for("blog.index")) | 0.003891 |
def accel_ES(q: np.ndarray):
"""
Compute the gravitational accelerations in the earth-sun system.
q in row vector of 6 elements: sun (x, y, z), earth (x, y, z)
"""
# Number of celestial bodies
num_bodies: int = 2
# Number of dimensions in arrays; 3 spatial dimensions times the number of bodies
dims = 3 * num_bodies
# Body 0 is the sun; Body 1 is the earth
m0 = mass[0]
m1 = mass[1]
# Extract position of the sun and earth as 3-vectors
pos_0 = q[slices[0]]
pos_1 = q[slices[1]]
# Displacement vector from sun to earth
dv_01: np.ndarray = pos_1 - pos_0
# Distance from sun to earth
r_01: float = np.linalg.norm(dv_01)
# Unit vector pointing from sun to earth
udv_01 = dv_01 / r_01
# The force between these has magnitude G*m0*m1 / r^2
f_01: float = (G * m0 * m1) / (r_01 ** 2)
# Initialize acceleration as 6x1 array
a: np.ndarray = np.zeros(dims)
# The force vectors are attractive
a[slices[0]] += f_01 * udv_01 / m0
a[slices[1]] -= f_01 * udv_01 / m1
# Return the acceleration vector
return a | 0.004421 |
def buffer(self, item):
"""
Receive an item and write it.
"""
key = self.get_key_from_item(item)
if not self.grouping_info.is_first_file_item(key):
self.items_group_files.add_item_separator_to_file(key)
self.grouping_info.ensure_group_info(key)
self.items_group_files.add_item_to_file(item, key) | 0.00551 |
def find_bright_peaks(self, data, threshold=None, sigma=5, radius=5):
"""
Find bright peak candidates in (data). (threshold) specifies a
threshold value below which an object is not considered a candidate.
If threshold is blank, a default is calculated using (sigma).
(radius) defines a pixel radius for determining local maxima--if the
desired objects are larger in size, specify a larger radius.
The routine returns a list of candidate object coordinate tuples
(x, y) in data.
"""
if threshold is None:
# set threshold to default if none provided
threshold = self.get_threshold(data, sigma=sigma)
self.logger.debug("threshold defaults to %f (sigma=%f)" % (
threshold, sigma))
#self.logger.debug("filtering")
data_max = filters.maximum_filter(data, radius)
maxima = (data == data_max)
diff = data_max > threshold
maxima[diff == 0] = 0
#self.logger.debug("finding")
labeled, num_objects = ndimage.label(maxima)
slices = ndimage.find_objects(labeled)
peaks = []
for dy, dx in slices:
xc = (dx.start + dx.stop - 1) / 2.0
yc = (dy.start + dy.stop - 1) / 2.0
# This is only an approximate center; use FWHM or centroid
# calculation to refine further
peaks.append((xc, yc))
self.logger.debug("peaks=%s" % (str(peaks)))
return peaks | 0.002632 |
def run_jar(self, mem=None):
"""
Special case of run() when the executable is a JAR file.
"""
cmd = config.get_command('java')
if mem:
cmd.append('-Xmx%s' % mem)
cmd.append('-jar')
cmd += self.cmd
self.run(cmd) | 0.006993 |
def cli(ctx, email, first_name, last_name, password, role="user", metadata={}):
"""Create a new user
Output:
an empty dictionary
"""
return ctx.gi.users.create_user(email, first_name, last_name, password, role=role, metadata=metadata) | 0.007937 |
def add_resource_context(router: web.AbstractRouter,
url_prefix: str = None,
name_prefix: str = None) -> Iterator[Any]:
"""Context manager for adding resources for given router.
Main goal of context manager to easify process of adding resources with
routes to the router. This also allow to reduce amount of repeats, when
supplying new resources by reusing URL & name prefixes for all routes
inside context manager.
Behind the scene, context manager returns a function which calls::
resource = router.add_resource(url, name)
resource.add_route(method, handler)
**Usage**::
with add_resource_context(app.router, '/api', 'api') as add_resource:
add_resource('/', get=views.index)
add_resource('/news', get=views.list_news, post=views.create_news)
:param router: Route to add resources to.
:param url_prefix: If supplied prepend this prefix to each resource URL.
:param name_prefix: If supplied prepend this prefix to each resource name.
"""
def add_resource(url: str,
get: View = None,
*,
name: str = None,
**kwargs: Any) -> web.Resource:
"""Inner function to create resource and add necessary routes to it.
Support adding routes of all methods, supported by aiohttp, as
GET/POST/PUT/PATCH/DELETE/HEAD/OPTIONS/*, e.g.,
::
with add_resource_context(app.router) as add_resource:
add_resource('/', get=views.get, post=views.post)
add_resource('/wildcard', **{'*': views.wildcard})
:param url:
Resource URL. If ``url_prefix`` setup in context it will be
prepended to URL with ``/``.
:param get:
GET handler. Only handler to be setup without explicit call.
:param name: Resource name.
:type name: str
:rtype: aiohttp.web.Resource
"""
kwargs['get'] = get
if url_prefix:
url = '/'.join((url_prefix.rstrip('/'), url.lstrip('/')))
if not name and get:
name = get.__name__
if name_prefix and name:
name = '.'.join((name_prefix.rstrip('.'), name.lstrip('.')))
resource = router.add_resource(url, name=name)
for method, handler in kwargs.items():
if handler is None:
continue
resource.add_route(method.upper(), handler)
return resource
yield add_resource | 0.000387 |
def top_comments(self):
"""Return a markdown representation of the top comments."""
num = min(10, len(self.comments))
if num <= 0:
return ''
top_comments = sorted(
self.comments, key=lambda x: (-x.score, str(x.author)))[:num]
retval = self.post_header.format('Top Comments')
for comment in top_comments:
title = self._safe_title(comment.submission)
retval += tt('1. {}: {}\'s [comment]({}) in {}\n').format(
self._points(comment.score), self._user(comment.author),
self._permalink(comment), title)
return tt('{}\n').format(retval) | 0.002999 |
def slug(self, language=None, fallback=True):
"""
Return the slug of the page depending on the given language.
:param language: wanted language, if not defined default is used.
:param fallback: if ``True``, the slug will also be searched in other \
languages.
"""
slug = self.get_content(language, 'slug', language_fallback=fallback)
return slug | 0.004854 |
def max_posterior(lnps_per_walker, dim):
"""Burn in based on samples being within dim/2 of maximum posterior.
Parameters
----------
lnps_per_walker : 2D array
Array of values that are proportional to the log posterior values. Must
have shape ``nwalkers x niterations``.
dim : int
The dimension of the parameter space.
Returns
-------
burn_in_idx : array of int
The burn in indices of each walker. If a walker is not burned in, its
index will be be equal to the length of the chain.
is_burned_in : array of bool
Whether or not a walker is burned in.
"""
if len(lnps_per_walker.shape) != 2:
raise ValueError("lnps_per_walker must have shape "
"nwalkers x niterations")
# find the value to compare against
max_p = lnps_per_walker.max()
criteria = max_p - dim/2.
nwalkers, _ = lnps_per_walker.shape
burn_in_idx = numpy.empty(nwalkers, dtype=int)
is_burned_in = numpy.empty(nwalkers, dtype=bool)
# find the first iteration in each chain where the logpost has exceeded
# max_p - dim/2
for ii in range(nwalkers):
chain = lnps_per_walker[ii, :]
passedidx = numpy.where(chain >= criteria)[0]
is_burned_in[ii] = passedidx.size > 0
if is_burned_in[ii]:
burn_in_idx[ii] = passedidx[0]
else:
burn_in_idx[ii] = NOT_BURNED_IN_ITER
return burn_in_idx, is_burned_in | 0.000676 |
def _hab_s(s):
"""Define the boundary between Region 2a and 2b, h=f(s)
Parameters
----------
s : float
Specific entropy, [kJ/kgK]
Returns
-------
h : float
Specific enthalpy, [kJ/kg]
References
----------
IAPWS, Revised Supplementary Release on Backward Equations for Pressure
as a Function of Enthalpy and Entropy p(h,s) for Regions 1 and 2 of the
IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of
Water and Steam, http://www.iapws.org/relguide/Supp-PHS12-2014.pdf, Eq 2
Examples
--------
>>> _hab_s(7)
3376.437884
"""
smin = _Region2(_TSat_P(4), 4)["s"]
smax = _Region2(1073.15, 4)["s"]
if s < smin:
h = 0
elif s > smax:
h = 5000
else:
h = -0.349898083432139e4 + 0.257560716905876e4*s - \
0.421073558227969e3*s**2+0.276349063799944e2*s**3
return h | 0.001086 |
def generate_hash(data: dict, token: str) -> str:
"""
Generate secret hash
:param data:
:param token:
:return:
"""
secret = hashlib.sha256()
secret.update(token.encode('utf-8'))
sorted_params = collections.OrderedDict(sorted(data.items()))
msg = '\n'.join("{}={}".format(k, v) for k, v in sorted_params.items() if k != 'hash')
return hmac.new(secret.digest(), msg.encode('utf-8'), digestmod=hashlib.sha256).hexdigest() | 0.006479 |
def get_plugins() -> Dict[str, pkg_resources.EntryPoint]:
"""
Get all available plugins for unidown.
:return: plugin name list
:rtype: Dict[str, ~pkg_resources.EntryPoint]
"""
return {entry.name: entry for entry in pkg_resources.iter_entry_points('unidown.plugin')} | 0.009554 |
def get_cache_path(profile_name):
'''获取这个帐户的缓存目录, 如果不存在, 就创建它'''
path = os.path.join(CACHE_DIR, profile_name, 'cache')
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
return path | 0.00463 |
def create_access_token(self, request, credentials):
"""Create and save a new access token.
Similar to OAuth 2, indication of granted scopes will be included as a
space separated list in ``oauth_authorized_realms``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:returns: The token as an urlencoded string.
"""
request.realms = self.request_validator.get_realms(
request.resource_owner_key, request)
token = {
'oauth_token': self.token_generator(),
'oauth_token_secret': self.token_generator(),
# Backport the authorized scopes indication used in OAuth2
'oauth_authorized_realms': ' '.join(request.realms)
}
token.update(credentials)
self.request_validator.save_access_token(token, request)
return urlencode(token.items()) | 0.002186 |
def translate(env, func, *args, **kwargs):
"""
Given a shellcode environment, a function and its parameters, translate
the function to a list of shellcode operations ready to be compiled or
assembled using :meth:`~pwnypack.shellcode.base.BaseEnvironment.compile`
or :meth:`~pwnypack.shellcode.base.BaseEnvironment.assemble`.
Arguments:
env(~pwnypack.shellcode.base.Base): An instance of a shellcode
environment.
func(callable): The function to translate to shellcode.
args(...): The positional arguments for the function.
kwargs(...): The keyword arguments for the function.
Returns:
list: The high-level shellcode operations.
"""
func_code = six.get_function_code(func)
func_globals = dict(__builtins__)
func_globals.update(six.get_function_globals(func))
ops = bc.disassemble(func_code.co_code)
program = []
f_args = inspect.getcallargs(func, *args, **kwargs)
variables = dict(
(func_code.co_varnames.index(arg_name), arg_value)
for arg_name, arg_value in six.iteritems(f_args)
)
stack = []
for op in ops:
if op.name == 'LOAD_CONST':
stack.append(func_code.co_consts[op.arg])
elif op.name == 'LOAD_GLOBAL':
global_name = func_code.co_names[op.arg]
stack.append(getattr(env, global_name, func_globals.get(global_name)))
elif op.name == 'LOAD_FAST':
var_name = func_code.co_varnames[op.arg]
stack.append(getattr(env, var_name, variables.get(op.arg)))
elif op.name == 'BUILD_LIST':
items = stack[-op.arg:]
del stack[-op.arg:]
stack.append(items)
elif op.name == 'LOAD_ATTR':
obj = stack.pop()
stack.append(getattr(obj, func_code.co_names[op.arg]))
elif op.name == 'CALL_FUNCTION':
nargs = op.arg & 0xff
nkwargs = op.arg >> 8
if nkwargs:
f_kwargs = dict(zip(stack[-nkwargs * 2::2], stack[-nkwargs * 2 + 1::2]))
del stack[-nkwargs * 2:]
else:
f_kwargs = {}
if nargs:
f_args = stack[-nargs:]
del stack[-nargs:]
else:
f_args = []
f = stack.pop()
if isinstance(f, Fragment):
stack.append(f(env, *f_args, **f_kwargs))
else:
stack.append(f(*f_args, **f_kwargs))
elif op.name == 'STORE_FAST':
value = stack.pop()
var_name = func_code.co_varnames[op.arg]
var = getattr(env, var_name, variables.get(op.arg, None))
if isinstance(var, Register):
program.append(LoadRegister(var, value))
else:
variables[op.arg] = value
elif op.name == 'POP_TOP':
value = stack.pop()
if isinstance(value, SyscallInvoke):
program.append(value)
elif isinstance(value, list):
program.extend(value)
else:
raise ValueError('No idea how to compile %s' % (value,))
elif op.name == 'RETURN_VALUE':
stack.pop()
elif op.name == 'DUP_TOP':
value = stack[-1]
if isinstance(value, SyscallInvoke):
stack.insert(-1, env.SYSCALL_RET_REG)
else:
stack.append(value)
elif op.name == 'BINARY_SUBSCR':
index = stack.pop()
value = stack.pop()
stack.append(value[index])
elif op.name == 'STORE_SUBSCR':
index = stack.pop()
value = stack.pop()
new_value = stack.pop()
var = value[index]
if isinstance(var, Register):
program.append(LoadRegister(var, new_value))
else:
value[index] = new_value
elif op.name == 'INPLACE_ADD':
value = stack.pop()
reg = stack.pop()
if not isinstance(reg, Register):
raise TypeError('In-place addition is only supported on registers')
program.extend(env.reg_add(reg, value))
stack.append(reg)
elif op.name == 'INPLACE_SUBTRACT':
value = stack.pop()
reg = stack.pop()
if not isinstance(reg, Register):
raise TypeError('In-place subtraction is only supported on registers')
program.extend(env.reg_sub(reg, value))
stack.append(reg)
else:
raise RuntimeError('Unsupported opcode: %s' % op.name)
return program | 0.001065 |
def create_index_table(environ, envdir):
''' create an html table
Parameters:
environ (dict):
A tree environment dictionary
envdir (str):
The filepath for the env directory
Returns:
An html table definition string
'''
table_header = """<table id="list" cellpadding="0.1em" cellspacing="0">
<colgroup><col width="55%"/><col width="20%"/><col width="25%"/></colgroup>
<thead>
<tr><th><a href="?C=N&O=A">File Name</a> <a href="?C=N&O=D"> ↓ </a></th><th><a href="?C=S&O=A">File Size</a> <a href="?C=S&O=D"> ↓ </a></th><th><a href="?C=M&O=A">Date</a> <a href="?C=M&O=D"> ↓ </a></th></tr>
</thead><tbody>
<tr><td><a href="../">Parent directory/</a></td><td>-</td><td>-</td></tr>"""
table_footer = """</tbody></table>"""
# create table
table = table_header
# loop over the environment
for section, values in environ.items():
if section == 'default':
continue
for tree_name, tree_path in values.items():
skipmsg = 'Skipping {0} for {1}'.format(tree_name, section)
if '_root' in tree_name:
continue
# create the src and target links
src = tree_path
link = os.path.join(envdir, tree_name.upper())
# get the local time of the symlink
try:
stattime = time.strftime('%d-%b-%Y %H:%M', time.localtime(os.stat(src).st_mtime))
except OSError:
print("{0} does not appear to exist, skipping...".format(src))
_remove_link(link)
continue
# skip the sas_base_dir
if section == 'general' and 'sas_base_dir' in tree_name:
print(skipmsg)
continue
# only create symlinks
if section == 'general' and tree_name in ['cas_load', 'staging_data']:
# only create links here if the target exist
if os.path.exists(src):
make_symlink(src, link)
else:
print(skipmsg)
else:
print('Processing {0} for {1}'.format(tree_name, section))
make_symlink(src, link)
# create the table entry
if os.path.exists(link):
table += ' <tr><td><a href="{0}/">{0}/</a></td><td>-</td><td>{1}</td></tr>\n'.format(tree_name.upper(), stattime)
table += table_footer
return table | 0.003116 |
def temporarily_enabled(self):
"""
Temporarily enable the cache (useful for testing)
"""
old_setting = self.options.enabled
self.enable()
try:
yield
finally:
self.options.enabled = old_setting | 0.007353 |
def send_message(self, message):
"""Send chat message to this steam user
:param message: message to send
:type message: str
"""
self._steam.send(MsgProto(EMsg.ClientFriendMsg), {
'steamid': self.steam_id,
'chat_entry_type': EChatEntryType.ChatMsg,
'message': message.encode('utf8'),
}) | 0.005348 |
def get_bookmark(self, bookmark_id):
"""
Get a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to retrieve.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.get(url) | 0.005797 |
def plot(result_pickle_file_path, show, plot_save_file):
"""
[sys_analyser] draw result DataFrame
"""
import pandas as pd
from .plot import plot_result
result_dict = pd.read_pickle(result_pickle_file_path)
plot_result(result_dict, show, plot_save_file) | 0.003559 |
def _to_ned(self):
"""
Switches the reference frame to NED
"""
if self.ref_frame is 'USE':
# Rotate
return utils.use_to_ned(self.tensor), \
utils.use_to_ned(self.tensor_sigma)
elif self.ref_frame is 'NED':
# Alreadt NED
return self.tensor, self.tensor_sigma
else:
raise ValueError('Reference frame %s not recognised - cannot '
'transform to NED!' % self.ref_frame) | 0.003868 |
def _set_fill_word(self, v, load=False):
"""
Setter method for fill_word, mapped from YANG variable /interface/fc_port/fill_word (fc-fillword-cfg-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_fill_word is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fill_word() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'idle-idle': {'value': 0}, u'arbff-arbff': {'value': 1}, u'idle-arbff': {'value': 2}, u'aa-then-ia': {'value': 3}},), default=unicode("idle-idle"), is_leaf=True, yang_name="fill-word", rest_name="fill-word", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Fill Word', u'hidden': u'full', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='fc-fillword-cfg-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fill_word must be of a type compatible with fc-fillword-cfg-type""",
'defined-type': "brocade-interface:fc-fillword-cfg-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'idle-idle': {'value': 0}, u'arbff-arbff': {'value': 1}, u'idle-arbff': {'value': 2}, u'aa-then-ia': {'value': 3}},), default=unicode("idle-idle"), is_leaf=True, yang_name="fill-word", rest_name="fill-word", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Fill Word', u'hidden': u'full', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='fc-fillword-cfg-type', is_config=True)""",
})
self.__fill_word = t
if hasattr(self, '_set'):
self._set() | 0.004373 |
def define_from_fits(cls, fitsobj, extnum=0):
"""Define class object from header information in FITS file.
Parameters
----------
fitsobj: file object
FITS file whose header contains the DTU information
needed to initialise the members of this class.
extnum : int
Extension number (first extension is 0)
"""
# read input FITS file
with fits.open(fitsobj) as hdulist:
image_header = hdulist[extnum].header
return cls.define_from_header(image_header) | 0.003484 |
def merged_args_dicts(global_args, subcommand_args):
'''We deal with docopt args from the toplevel peru parse and the subcommand
parse. We don't want False values for a flag in the subcommand to override
True values if that flag was given at the top level. This function
specifically handles that case.'''
merged = global_args.copy()
for key, val in subcommand_args.items():
if key not in merged:
merged[key] = val
elif type(merged[key]) is type(val) is bool:
merged[key] = merged[key] or val
else:
raise RuntimeError("Unmergable args.")
return merged | 0.001565 |
def travis_build_package():
"""Assumed called on Travis, to prepare a package to be deployed
This method prints on stdout for Travis.
Return is obj to pass to sys.exit() directly
"""
travis_tag = os.environ.get('TRAVIS_TAG')
if not travis_tag:
print("TRAVIS_TAG environment variable is not present")
return "TRAVIS_TAG environment variable is not present"
try:
name, version = travis_tag.split("_")
except ValueError:
print("TRAVIS_TAG is not '<package_name>_<version>' (tag is: {})".format(travis_tag))
return "TRAVIS_TAG is not '<package_name>_<version>' (tag is: {})".format(travis_tag)
try:
version = Version(version)
except InvalidVersion:
print("Version must be a valid PEP440 version (version is: {})".format(version))
return "Version must be a valid PEP440 version (version is: {})".format(version)
if name.lower() in OMITTED_RELEASE_PACKAGES:
print("The input package {} has been disabled for release from Travis.CI.".format(name))
return
abs_dist_path = Path(os.environ['TRAVIS_BUILD_DIR'], 'dist')
create_package(name, str(abs_dist_path))
print("Produced:\n{}".format(list(abs_dist_path.glob('*'))))
pattern = "*{}*".format(version)
packages = list(abs_dist_path.glob(pattern))
if not packages:
return "Package version does not match tag {}, abort".format(version)
pypi_server = os.environ.get("PYPI_SERVER", "default PyPI server")
print("Package created as expected and will be pushed to {}".format(pypi_server)) | 0.004386 |
def _main_loop(self):
'''
The internal while true main loop for the redis monitor
'''
self.logger.debug("Running main loop")
old_time = 0
while True:
for plugin_key in self.plugins_dict:
obj = self.plugins_dict[plugin_key]
self._process_plugin(obj)
if self.settings['STATS_DUMP'] != 0:
new_time = int(old_div(time.time(), self.settings['STATS_DUMP']))
# only log every X seconds
if new_time != old_time:
self._dump_stats()
if self.settings['STATS_DUMP_CRAWL']:
self._dump_crawl_stats()
if self.settings['STATS_DUMP_QUEUE']:
self._dump_queue_stats()
old_time = new_time
self._report_self()
time.sleep(self.settings['SLEEP_TIME']) | 0.003205 |
def parse_datetime_range(time_filter):
"""
Parse the url param to python objects.
From what time range to divide by a.time.gap into intervals.
Defaults to q.time and otherwise 90 days.
Validate in API: re.search("\\[(.*) TO (.*)\\]", value)
:param time_filter: [2013-03-01 TO 2013-05-01T00:00:00]
:return: datetime.datetime(2013, 3, 1, 0, 0), datetime.datetime(2013, 5, 1, 0, 0)
"""
if not time_filter:
time_filter = "[* TO *]"
start, end = parse_solr_time_range_as_pair(time_filter)
start, end = parse_datetime(start), parse_datetime(end)
return start, end | 0.003252 |
async def service_status(self, name):
"""Pull the current status of a service by name.
Returns:
dict: A dictionary of service status
"""
return await self.send_command(OPERATIONS.CMD_QUERY_STATUS, {'name': name},
MESSAGES.QueryStatusResponse, timeout=5.0) | 0.011765 |
def _safe_squeeze(arr, *args, **kwargs):
"""
numpy.squeeze will reduce a 1-item array down to a zero-dimensional "array",
which is not necessarily desirable.
This function does the squeeze operation, but ensures that there is at least
1 dimension in the output.
"""
out = np.squeeze(arr, *args, **kwargs)
if np.ndim(out) == 0:
out = out.reshape((1,))
return out | 0.007407 |
def setFontStrikeOut(self, strikeOut):
"""
Sets whether or not this editor is currently striking out the text.
:param strikeOut | <bool>
"""
font = self.currentFont()
font.setStrikeOut(strikeOut)
self.setCurrentFont(font) | 0.010033 |
def make_setup_state(
self,
app: 'Quart',
first_registration: bool,
*,
url_prefix: Optional[str]=None,
) -> 'BlueprintSetupState':
"""Return a blueprint setup state instance.
Arguments:
first_registration: True if this is the first registration
of this blueprint on the app.
url_prefix: An optional prefix to all rules
"""
return BlueprintSetupState(self, app, first_registration, url_prefix=url_prefix) | 0.011111 |
def handle_aggregated_quotas(sender, instance, **kwargs):
""" Call aggregated quotas fields update methods """
quota = instance
# aggregation is not supported for global quotas.
if quota.scope is None:
return
quota_field = quota.get_field()
# usage aggregation should not count another usage aggregator field to avoid calls duplication.
if isinstance(quota_field, fields.UsageAggregatorQuotaField) or quota_field is None:
return
signal = kwargs['signal']
for aggregator_quota in quota_field.get_aggregator_quotas(quota):
field = aggregator_quota.get_field()
if signal == signals.post_save:
field.post_child_quota_save(aggregator_quota.scope, child_quota=quota, created=kwargs.get('created'))
elif signal == signals.pre_delete:
field.pre_child_quota_delete(aggregator_quota.scope, child_quota=quota) | 0.005568 |
def get_grammar(self):
"""
Returns the grammar of the UAI file.
"""
network_name = Word(alphas).setResultsName('network_name')
no_variables = Word(nums).setResultsName('no_variables')
grammar = network_name + no_variables
self.no_variables = int(grammar.parseString(self.network)['no_variables'])
domain_variables = (Word(nums)*self.no_variables).setResultsName('domain_variables')
grammar += domain_variables
no_functions = Word(nums).setResultsName('no_functions')
grammar += no_functions
self.no_functions = int(grammar.parseString(self.network)['no_functions'])
integer = Word(nums).setParseAction(lambda t: int(t[0]))
for function in range(0, self.no_functions):
scope_grammar = Word(nums).setResultsName('fun_scope_' + str(function))
grammar += scope_grammar
function_scope = grammar.parseString(self.network)['fun_scope_' + str(function)]
function_grammar = ((integer)*int(function_scope)).setResultsName('fun_' + str(function))
grammar += function_grammar
floatnumber = Combine(Word(nums) + Optional(Literal(".") + Optional(Word(nums))))
for function in range(0, self.no_functions):
no_values_grammar = Word(nums).setResultsName('fun_no_values_' + str(function))
grammar += no_values_grammar
no_values = grammar.parseString(self.network)['fun_no_values_' + str(function)]
values_grammar = ((floatnumber)*int(no_values)).setResultsName('fun_values_' + str(function))
grammar += values_grammar
return grammar | 0.007181 |
def delete_pipeline(app='', pipeline_name=''):
"""Delete _pipeline_name_ from _app_."""
safe_pipeline_name = normalize_pipeline_name(name=pipeline_name)
LOG.warning('Deleting Pipeline: %s', safe_pipeline_name)
url = '{host}/pipelines/{app}/{pipeline}'.format(host=API_URL, app=app, pipeline=safe_pipeline_name)
response = requests.delete(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
if not response.ok:
LOG.debug('Delete response code: %d', response.status_code)
if response.status_code == requests.status_codes.codes['method_not_allowed']:
raise SpinnakerPipelineDeletionFailed('Failed to delete "{0}" from "{1}", '
'possibly invalid Pipeline name.'.format(safe_pipeline_name, app))
else:
LOG.debug('Pipeline missing, no delete required.')
LOG.debug('Deleted "%s" Pipeline response:\n%s', safe_pipeline_name, response.text)
return response.text | 0.007099 |
def main_btn_clicked(self, widget, data=None):
"""
Button switches to Dev Assistant GUI main window
"""
self.remove_link_button()
data = dict()
data['debugging'] = self.debugging
self.run_window.hide()
self.parent.open_window(widget, data) | 0.006601 |
def create_cloud(self):
"""
Create instances for the cloud providers
"""
instances = []
for i in range(self.settings['NUMBER_NODES']):
new_instance = Instance.new(settings=self.settings, cluster=self)
instances.append(new_instance)
create_nodes = [instance.create(suffix=i) for i, instance in enumerate(instances)]
fetch_nodes = [instance.node for instance in instances]
self.driver.wait_until_running(fetch_nodes)
node_ids = [node.id for node in fetch_nodes]
all_nodes = self.driver.list_nodes()
new_nodes = [node for node in all_nodes if node.id in node_ids]
for instance, node in zip(instances, new_nodes):
instance.node = node
self.instances = instances | 0.00375 |
def _print_figures(figures, arguments='', file_format='pdf', target_width=8.5, target_height=11.0, target_pad=0.5):
"""
figure printing loop designed to be launched in a separate thread.
"""
for fig in figures:
# get the temp path
temp_path = _os.path.join(_settings.path_home, "temp")
# make the temp folder
_settings.MakeDir(temp_path)
# output the figure to postscript
path = _os.path.join(temp_path, "graph."+file_format)
# get the dimensions of the figure in inches
w=fig.get_figwidth()
h=fig.get_figheight()
# we're printing to 8.5 x 11, so aim for 7.5 x 10
target_height = target_height-2*target_pad
target_width = target_width -2*target_pad
# depending on the aspect we scale by the vertical or horizontal value
if 1.0*h/w > target_height/target_width:
# scale down according to the vertical dimension
new_h = target_height
new_w = w*target_height/h
else:
# scale down according to the hozo dimension
new_w = target_width
new_h = h*target_width/w
fig.set_figwidth(new_w)
fig.set_figheight(new_h)
# save it
fig.savefig(path, bbox_inches=_pylab.matplotlib.transforms.Bbox(
[[-target_pad, new_h-target_height-target_pad],
[target_width-target_pad, target_height-target_pad]]))
# set it back
fig.set_figheight(h)
fig.set_figwidth(w)
if not arguments == '':
c = _settings['instaprint'] + ' ' + arguments + ' "' + path + '"'
else:
c = _settings['instaprint'] + ' "' + path + '"'
print(c)
_os.system(c) | 0.003973 |
def update_redirect_to_from_json(page, redirect_to_complete_slugs):
"""
The second pass of create_and_update_from_json_data
used to update the redirect_to field.
Returns a messages list to be appended to the messages from the
first pass.
"""
messages = []
s = ''
for lang, s in list(redirect_to_complete_slugs.items()):
r = Page.objects.from_path(s, lang, exclude_drafts=False)
if r:
page.redirect_to = r
page.save()
break
else:
messages.append(_("Could not find page for redirect-to field"
" '%s'") % (s,))
return messages | 0.003125 |
def ahrs2_send(self, roll, pitch, yaw, altitude, lat, lng, force_mavlink1=False):
'''
Status of secondary AHRS filter if available
roll : Roll angle (rad) (float)
pitch : Pitch angle (rad) (float)
yaw : Yaw angle (rad) (float)
altitude : Altitude (MSL) (float)
lat : Latitude in degrees * 1E7 (int32_t)
lng : Longitude in degrees * 1E7 (int32_t)
'''
return self.send(self.ahrs2_encode(roll, pitch, yaw, altitude, lat, lng), force_mavlink1=force_mavlink1) | 0.006757 |
def parse_type_comment(type_comment):
"""Parse a type comment string into AST nodes."""
try:
result = ast3.parse(type_comment, '<type_comment>', 'eval')
except SyntaxError:
raise ValueError(f"invalid type comment: {type_comment!r}") from None
assert isinstance(result, ast3.Expression)
return result.body | 0.002933 |
def remove_bounding_box(self, loc=None):
"""
Removes bounding box from the active renderer.
Parameters
----------
loc : int, tuple, or list
Index of the renderer to add the actor to. For example,
``loc=2`` or ``loc=(1, 1)``. If None, selects the last
active Renderer.
"""
self._active_renderer_index = self.loc_to_index(loc)
renderer = self.renderers[self._active_renderer_index]
renderer.remove_bounding_box() | 0.003839 |
def excel_key(index):
"""create a key for index by converting index into a base-26 number, using A-Z as the characters."""
X = lambda n: ~n and X((n // 26)-1) + chr(65 + (n % 26)) or ''
return X(int(index)) | 0.013761 |
def initialize_gdt_x86(self,state,concrete_target):
"""
Create a GDT in the state memory and populate the segment registers.
Rehook the vsyscall address using the real value in the concrete process memory
:param state: state which will be modified
:param concrete_target: concrete target that will be used to read the fs register
:return:
"""
_l.debug("Creating fake Global Descriptor Table and synchronizing gs segment register")
gs = self._read_gs_register_x86(concrete_target)
gdt = self.generate_gdt(0x0, gs)
self.setup_gdt(state, gdt)
# Synchronize the address of vsyscall in simprocedures dictionary with the concrete value
_vsyscall_address = concrete_target.read_memory(gs + 0x10, state.project.arch.bits / 8)
_vsyscall_address = struct.unpack(state.project.arch.struct_fmt(), _vsyscall_address)[0]
state.project.rehook_symbol(_vsyscall_address, '_vsyscall')
return gdt | 0.009747 |
def write_java_message(key,val,text_file):
"""
Loop through all java messages that are not associated with a unit test and
write them into a log file.
Parameters
----------
key : str
9.general_bad_java_messages
val : list of list of str
contains the bad java messages and the message types.
:return: none
"""
text_file.write(key)
text_file.write('\n')
if (len(val[0]) > 0) and (len(val) >= 3):
for index in range(len(val[0])):
text_file.write("Java Message Type: ")
text_file.write(val[1][index])
text_file.write('\n')
text_file.write("Java Message: ")
for jmess in val[2][index]:
text_file.write(jmess)
text_file.write('\n')
text_file.write('\n \n') | 0.00361 |
def inverse_distance_to_points(points, values, xi, r, gamma=None, kappa=None, min_neighbors=3,
kind='cressman'):
r"""Generate an inverse distance weighting interpolation to the given points.
Values are assigned to the given interpolation points based on either [Cressman1959]_ or
[Barnes1964]_. The Barnes implementation used here based on [Koch1983]_.
Parameters
----------
points: array_like, shape (n, 2)
Coordinates of the data points.
values: array_like, shape (n,)
Values of the data points.
xi: array_like, shape (M, 2)
Points to interpolate the data onto.
r: float
Radius from grid center, within which observations
are considered and weighted.
gamma: float
Adjustable smoothing parameter for the barnes interpolation. Default None.
kappa: float
Response parameter for barnes interpolation. Default None.
min_neighbors: int
Minimum number of neighbors needed to perform barnes or cressman interpolation
for a point. Default is 3.
kind: str
Specify what inverse distance weighting interpolation to use.
Options: 'cressman' or 'barnes'. Default 'cressman'
Returns
-------
img: (M,) ndarray
Array representing the interpolated values for each input point in `xi`
See Also
--------
inverse_distance_to_grid
"""
obs_tree = cKDTree(points)
indices = obs_tree.query_ball_point(xi, r=r)
img = np.empty(shape=(xi.shape[0]), dtype=values.dtype)
img.fill(np.nan)
for idx, (matches, grid) in enumerate(zip(indices, xi)):
if len(matches) >= min_neighbors:
x1, y1 = obs_tree.data[matches].T
values_subset = values[matches]
dists = geometry.dist_2(grid[0], grid[1], x1, y1)
if kind == 'cressman':
img[idx] = cressman_point(dists, values_subset, r)
elif kind == 'barnes':
img[idx] = barnes_point(dists, values_subset, kappa, gamma)
else:
raise ValueError(str(kind) + ' interpolation not supported.')
return img | 0.002761 |
def _clean_isbn(isbn):
"""
Remove all non-digit and non "x" characters from given string.
Args:
isbn (str): isbn string, which will be cleaned.
Returns:
list: array of numbers (if "x" is found, it is converted to 10).
"""
if isinstance(isbn, basestring):
isbn = list(isbn.lower())
# filter digits and "x"
isbn = filter(lambda x: x.isdigit() or x == "x", isbn)
# convert ISBN to numbers
return map(lambda x: 10 if x == "x" else int(x), isbn) | 0.001938 |
def _traverse_nodes(self):
""" Debugging function (exposes cython nodes as dummy nodes) """
node = self.root
stack = []
while stack or node is not None:
if node is not None:
stack.append(node)
node = node.left
else:
node = stack.pop()
yield node
node = node.right | 0.005013 |
def get_attribute_classes() -> Dict[str, Attribute]:
"""
Lookup all builtin Attribute subclasses, load them, and return a dict
"""
attribute_children = pkgutil.iter_modules(
importlib.import_module('jawa.attributes').__path__,
prefix='jawa.attributes.'
)
result = {}
for _, name, _ in attribute_children:
classes = inspect.getmembers(
importlib.import_module(name),
lambda c: (
inspect.isclass(c) and issubclass(c, Attribute) and
c is not Attribute
)
)
for class_name, class_ in classes:
attribute_name = getattr(class_, 'ATTRIBUTE_NAME', class_name[:-9])
result[attribute_name] = class_
return result | 0.001302 |
def get_stats(self):
"""Retrieves the bus statistics.
Use like so:
>>> stats = bus.get_stats()
>>> print(stats)
std_data: 0, std_remote: 0, ext_data: 0, ext_remote: 0, err_frame: 0, bus_load: 0.0%, overruns: 0
:returns: bus statistics.
:rtype: can.interfaces.kvaser.structures.BusStatistics
"""
canRequestBusStatistics(self._write_handle)
stats = structures.BusStatistics()
canGetBusStatistics(self._write_handle,
ctypes.pointer(stats),
ctypes.sizeof(stats))
return stats | 0.004785 |
def _get_config(self, host, port, unix_socket, auth, config_key):
"""Return config string from specified Redis instance and config key
:param str host: redis host
:param int port: redis port
:param str host: redis config_key
:rtype: str
"""
client = self._client(host, port, unix_socket, auth)
if client is None:
return None
config_value = client.config_get(config_key)
del client
return config_value | 0.004237 |
def verified(self, institute_id):
"""Return all verified variants for a given institute
Args:
institute_id(str): institute id
Returns:
res(list): a list with validated variants
"""
query = {
'verb' : 'validate',
'institute' : institute_id,
}
res = []
validate_events = self.event_collection.find(query)
for validated in list(validate_events):
case_id = validated['case']
var_obj = self.variant(case_id=case_id, document_id=validated['variant_id'])
case_obj = self.case(case_id=case_id)
if not case_obj or not var_obj:
continue # Take into account that stuff might have been removed from database
var_obj['case_obj'] = {
'display_name' : case_obj['display_name'],
'individuals' : case_obj['individuals']
}
res.append(var_obj)
return res | 0.009018 |
def _set_client(self, v, load=False):
"""
Setter method for client, mapped from YANG variable /rbridge_id/ssh/client (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_client is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_client() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=client.client, is_container='container', presence=False, yang_name="client", rest_name="client", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure SSH Client', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """client must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=client.client, is_container='container', presence=False, yang_name="client", rest_name="client", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure SSH Client', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-sec-services', defining_module='brocade-sec-services', yang_type='container', is_config=True)""",
})
self.__client = t
if hasattr(self, '_set'):
self._set() | 0.006135 |
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except: # pylint:disable=bare-except
stream = BufferedStream(stream)
return stream | 0.006316 |
def _get_columns(self, blueprint):
"""
Get the blueprint's columns definitions.
:param blueprint: The blueprint
:type blueprint: Blueprint
:rtype: list
"""
columns = []
for column in blueprint.get_added_columns():
sql = self.wrap(column) + ' ' + self._get_type(column)
columns.append(self._add_modifiers(sql, blueprint, column))
return columns | 0.004505 |
def stop_serving(self):
"""Stop the serving container.
The serving container runs in async mode to allow the SDK to do other tasks.
"""
if self.container:
self.container.down()
self.container.join()
self._cleanup()
# for serving we can delete everything in the container root.
_delete_tree(self.container_root) | 0.007595 |
def auto_correlation(sequence):
"""
test for the autocorrelation of a sequence between t and t - 1
as the 'auto_correlation' it is less likely that the sequence is
generated randomly.
:param sequence: any iterable with at most 2 values that can be turned
into a float via np.float . e.g.
'1001001'
[1, 0, 1, 0, 1]
[1.2,.1,.5,1]
:rtype: returns a dict of the linear regression stats of sequence[1:] vs.
sequence[:-1]
>>> result = auto_correlation('00000001111111111100000000')
>>> result['p'] < 0.05
True
>>> result['auto_correlation']
0.83766233766233755
"""
if isinstance(sequence, basestring):
sequence = map(int, sequence)
seq = np.array(list(sequence), dtype=np.float)
dseq = np.column_stack((seq[1:], seq[:-1]))
slope, intercept, r, ttp, see = linregress(seq[1:], seq[:-1])
cc = np.corrcoef(dseq, rowvar=0)[0][1]
return {'slope': slope, 'intercept': intercept, 'r-squared': r ** 2,
'p': ttp, 'see': see, 'auto_correlation': cc} | 0.000891 |
def _parse(value, strict=True):
"""
Preliminary duration value parser
strict=True (by default) raises StrictnessError if either hours,
minutes or seconds in duration value exceed allowed values
"""
pattern = r'(?:(?P<hours>\d+):)?(?P<minutes>\d+):(?P<seconds>\d+)'
match = re.match(pattern, value)
if not match:
raise ValueError('Invalid duration value: %s' % value)
hours = safe_int(match.group('hours'))
minutes = safe_int(match.group('minutes'))
seconds = safe_int(match.group('seconds'))
check_tuple((hours, minutes, seconds,), strict)
return (hours, minutes, seconds,) | 0.001572 |
def _call_marginalizevlos(self,o,integrate_method='dopr54_c',**kwargs):
"""Call the DF, marginalizing over line-of-sight velocity"""
#Get d, l, vperp
l= o.ll(obs=[1.,0.,0.],ro=1.)*_DEGTORAD
vperp= o.vll(ro=1.,vo=1.,obs=[1.,0.,0.,0.,0.,0.])
R= o.R(use_physical=False)
phi= o.phi(use_physical=False)
#Get local circular velocity, projected onto the perpendicular
#direction
if isinstance(self._pot,list):
vcirc= calcRotcurve([p for p in self._pot if not p.isNonAxi],R)[0]
else:
vcirc= calcRotcurve(self._pot,R)[0]
vcircperp= vcirc*math.cos(phi+l)
#Marginalize
alphaperp= math.pi/2.+phi+l
if not 'nsigma' in kwargs or ('nsigma' in kwargs and \
kwargs['nsigma'] is None):
nsigma= _NSIGMA
else:
nsigma= kwargs['nsigma']
kwargs.pop('nsigma',None)
if math.fabs(math.sin(alphaperp)) < math.sqrt(1./2.):
sigmaR1= nu.sqrt(self._initdf.sigmaT2(R,phi=phi,
use_physical=False)) #slight abuse
va= vcirc-self._initdf.meanvT(R,phi=phi,use_physical=False)
cosalphaperp= math.cos(alphaperp)
tanalphaperp= math.tan(alphaperp)
#we can reuse the VperpIntegrand, since it is just another angle
return integrate.quad(_marginalizeVperpIntegrandSinAlphaSmall,
-va/sigmaR1-nsigma,
-va/sigmaR1+nsigma,
args=(self,R,cosalphaperp,tanalphaperp,
vperp-vcircperp,vcirc,
sigmaR1,phi),
**kwargs)[0]/math.fabs(cosalphaperp)*sigmaR1
else:
sigmaR1= nu.sqrt(self._initdf.sigmaR2(R,phi=phi,
use_physical=False))
sinalphaperp= math.sin(alphaperp)
cotalphaperp= 1./math.tan(alphaperp)
#we can reuse the VperpIntegrand, since it is just another angle
return integrate.quad(_marginalizeVperpIntegrandSinAlphaLarge,
-nsigma,nsigma,
args=(self,R,sinalphaperp,cotalphaperp,
vperp-vcircperp,vcirc,sigmaR1,phi),
**kwargs)[0]/math.fabs(sinalphaperp)*sigmaR1 | 0.02644 |
def list_uncollated_submission_versions(self, course_id, ascending=None, assignment_id=None, user_id=None):
"""
List uncollated submission versions.
Gives a paginated, uncollated list of submission versions for all matching
submissions in the context. This SubmissionVersion objects will not include
the +new_grade+ or +previous_grade+ keys, only the +grade+; same for
+graded_at+ and +grader+.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""The id of the contextual course for this API call"""
path["course_id"] = course_id
# OPTIONAL - assignment_id
"""The ID of the assignment for which you want to see submissions. If
absent, versions of submissions from any assignment in the course are
included."""
if assignment_id is not None:
params["assignment_id"] = assignment_id
# OPTIONAL - user_id
"""The ID of the user for which you want to see submissions. If absent,
versions of submissions from any user in the course are included."""
if user_id is not None:
params["user_id"] = user_id
# OPTIONAL - ascending
"""Returns submission versions in ascending date order (oldest first). If
absent, returns submission versions in descending date order (newest
first)."""
if ascending is not None:
params["ascending"] = ascending
self.logger.debug("GET /api/v1/courses/{course_id}/gradebook_history/feed with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/gradebook_history/feed".format(**path), data=data, params=params, all_pages=True) | 0.004278 |
def abort(self, exception=exc.ConnectError):
"""
Aborts a connection and puts all pending futures into an error state.
If ``sys.exc_info()`` is set (i.e. this is being called in an exception
handler) then pending futures will have that exc info set. Otherwise
the given ``exception`` parameter is used (defaults to
``ConnectError``).
"""
log.warn("Aborting connection to %s:%s", self.host, self.port)
def abort_pending(f):
exc_info = sys.exc_info()
if any(exc_info):
f.set_exc_info(exc_info)
else:
f.set_exception(exception(self.host, self.port))
for pending in self.drain_all_pending():
abort_pending(pending) | 0.002584 |
def get_agent_queues(self, project=None, queue_name=None, action_filter=None):
"""GetAgentQueues.
[Preview API] Get a list of agent queues.
:param str project: Project ID or project name
:param str queue_name: Filter on the agent queue name
:param str action_filter: Filter by whether the calling user has use or manage permissions
:rtype: [TaskAgentQueue]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if queue_name is not None:
query_parameters['queueName'] = self._serialize.query('queue_name', queue_name, 'str')
if action_filter is not None:
query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str')
response = self._send(http_method='GET',
location_id='900fa995-c559-4923-aae7-f8424fe4fbea',
version='5.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TaskAgentQueue]', self._unwrap_collection(response)) | 0.006324 |
def list_settings(self):
"""
Get list of all appropriate settings and their default values.
"""
result = super().list_settings()
result.append((self.SETTING_TEXT_HIGHLIGHT, None))
return result | 0.008299 |
def _search_ldap(self, ldap, con, username):
"""
Searches LDAP for user, assumes ldap_search is set.
:param ldap: The ldap module reference
:param con: The ldap connection
:param username: username to match with auth_ldap_uid_field
:return: ldap object array
"""
if self.auth_ldap_append_domain:
username = username + "@" + self.auth_ldap_append_domain
if self.auth_ldap_search_filter:
filter_str = "(&%s(%s=%s))" % (
self.auth_ldap_search_filter,
self.auth_ldap_uid_field,
username,
)
else:
filter_str = "(%s=%s)" % (self.auth_ldap_uid_field, username)
user = con.search_s(
self.auth_ldap_search,
ldap.SCOPE_SUBTREE,
filter_str,
[
self.auth_ldap_firstname_field,
self.auth_ldap_lastname_field,
self.auth_ldap_email_field,
],
)
if user:
if not user[0][0]:
return None
return user | 0.001748 |
def main(_):
"""Convert a file to examples."""
if FLAGS.subword_text_encoder_filename:
encoder = text_encoder.SubwordTextEncoder(
FLAGS.subword_text_encoder_filename)
elif FLAGS.token_text_encoder_filename:
encoder = text_encoder.TokenTextEncoder(FLAGS.token_text_encoder_filename)
elif FLAGS.byte_text_encoder:
encoder = text_encoder.ByteTextEncoder()
else:
encoder = None
reader = tf.python_io.tf_record_iterator(FLAGS.input_filename)
total_sequences = 0
total_input_tokens = 0
total_target_tokens = 0
nonpadding_input_tokens = 0
nonpadding_target_tokens = 0
max_input_length = 0
max_target_length = 0
for record in reader:
x = tf.train.Example()
x.ParseFromString(record)
inputs = [int(i) for i in x.features.feature["inputs"].int64_list.value]
targets = [int(i) for i in x.features.feature["targets"].int64_list.value]
if FLAGS.print_inputs:
print("INPUTS:\n" + encoder.decode(inputs) if encoder else inputs)
if FLAGS.print_targets:
print("TARGETS:\n" + encoder.decode(targets) if encoder else targets)
nonpadding_input_tokens += len(inputs) - inputs.count(0)
nonpadding_target_tokens += len(targets) - targets.count(0)
total_input_tokens += len(inputs)
total_target_tokens += len(targets)
total_sequences += 1
max_input_length = max(max_input_length, len(inputs))
max_target_length = max(max_target_length, len(targets))
if FLAGS.print_all:
for k, v in six.iteritems(x.features.feature):
print("%s: %s" % (k, v.int64_list.value))
print("total_sequences: %d" % total_sequences)
print("total_input_tokens: %d" % total_input_tokens)
print("total_target_tokens: %d" % total_target_tokens)
print("nonpadding_input_tokens: %d" % nonpadding_input_tokens)
print("nonpadding_target_tokens: %d" % nonpadding_target_tokens)
print("max_input_length: %d" % max_input_length)
print("max_target_length: %d" % max_target_length) | 0.012716 |
def setAccelerometerSensitivity(self, value):
"""
Sets the accelerometer sensitivity to 2, 4, 8 or 16 according to the given value. Throws an ArgumentError if
the value provided is not valid.
:param value: the target sensitivity.
"""
# note that this implicitly disables the self tests on each axis
# i.e. the full byte is actually 000[accel]000 where the 1st 3 are the accelerometer self tests, the next two
# values are the actual sensitivity and the last 3 are unused
# the 2 [accel] bits are translated by the device as follows; 00 = 2g, 01 = 4g, 10 = 8g, 11 = 16g
# in binary we get 2 = 0, 4 = 1000, 8 = 10000, 16 = 11000
# so the 1st 3 bits are always 0
try:
self.i2c_io.write(self.MPU6050_ADDRESS,
self.MPU6050_RA_ACCEL_CONFIG,
{2: 0, 4: 8, 8: 16, 16: 24}[value])
self._accelerationFactor = value / 32768.0
self.accelerometerSensitivity = value
logger.debug("Set accelerometer sensitivity = %d", value)
except KeyError:
raise ArgumentError(value + " is not a valid sensitivity (2,4,8,18)") | 0.004918 |
def load_ui_wrapper(uifile, base_instance=None):
"""Load a Qt Designer .ui file and returns an instance of the user interface
Args:
uifile (str): Absolute path to .ui file
base_instance (QWidget): The widget into which UI widgets are loaded
Returns:
function: pyside_load_ui or uic.loadUi
"""
if 'PySide' in __binding__:
return pyside_load_ui(uifile, base_instance)
elif 'PyQt' in __binding__:
uic = __import__(__binding__ + ".uic").uic
return uic.loadUi(uifile, base_instance) | 0.003623 |
def parse_cookies(self, req, name, field):
"""Pull the value from the cookiejar."""
return core.get_value(req.cookies, name, field) | 0.013605 |
def connect(self):
'initialize ldap connection and set options'
log.debug("Connecting to ldap server %s" % self.config['URI'])
self.conn = ldap.initialize(self.config['URI'])
# There are some settings that can't be changed at runtime without a context restart.
# It's possible to refresh the context and apply the settings by setting OPT_X_TLS_NEWCTX
# to 0, but this needs to be the last option set, and since the config dictionary is not
# sorted, this is not necessarily true. Sort the list of options so that if OPT_X_TLS_NEWCTX
# is present, it is applied last.
options = self.config.get('OPTIONS', {}).items()
options.sort(key=lambda x: x[0] == 'OPT_X_TLS_NEWCTX')
for opt, value in options:
if isinstance(opt, str):
opt = getattr(ldap, opt)
try:
if isinstance(value, str):
value = getattr(ldap, value)
except AttributeError:
pass
self.conn.set_option(opt, value)
if self.config.get('START_TLS'):
log.debug("Starting TLS")
self.conn.start_tls_s() | 0.005034 |
def print_projects(self, projects):
"""Print method for projects.
"""
for project in projects:
print('{}: {}'.format(project.name, project.id)) | 0.011173 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'score') and self.score is not None:
_dict['score'] = self.score
if hasattr(self, 'sentence') and self.sentence is not None:
_dict['sentence'] = self.sentence
if hasattr(self, 'type') and self.type is not None:
_dict['type'] = self.type
if hasattr(self, 'arguments') and self.arguments is not None:
_dict['arguments'] = [x._to_dict() for x in self.arguments]
return _dict | 0.003454 |
def preview_filter_from_query(query, id_field="id", field_map={}):
"""This filter includes the "excluded_ids" so they still show up in the editor."""
f = groups_filter_from_query(query, field_map=field_map)
# NOTE: we don't exclude the excluded ids here so they show up in the editor
# include these, please
included_ids = query.get("included_ids")
if included_ids:
if f:
f |= Terms(pk=included_ids)
else:
f = Terms(pk=included_ids)
return f | 0.005894 |
def from_p12_keyfile(cls, service_account_email, filename,
private_key_password=None, scopes='',
token_uri=oauth2client.GOOGLE_TOKEN_URI,
revoke_uri=oauth2client.GOOGLE_REVOKE_URI):
"""Factory constructor from JSON keyfile.
Args:
service_account_email: string, The email associated with the
service account.
filename: string, The location of the PKCS#12 keyfile.
private_key_password: string, (Optional) Password for PKCS#12
private key. Defaults to ``notasecret``.
scopes: List or string, (Optional) Scopes to use when acquiring an
access token.
token_uri: string, URI for token endpoint. For convenience defaults
to Google's endpoints but any OAuth 2.0 provider can be
used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0
provider can be used.
Returns:
ServiceAccountCredentials, a credentials object created from
the keyfile.
Raises:
NotImplementedError if pyOpenSSL is not installed / not the
active crypto library.
"""
with open(filename, 'rb') as file_obj:
private_key_pkcs12 = file_obj.read()
return cls._from_p12_keyfile_contents(
service_account_email, private_key_pkcs12,
private_key_password=private_key_password, scopes=scopes,
token_uri=token_uri, revoke_uri=revoke_uri) | 0.002887 |
def create(ctx):
""" Create default config file
"""
import shutil
this_dir, this_filename = os.path.split(__file__)
default_config_file = os.path.join(this_dir, "apis/example-config.yaml")
config_file = ctx.obj["configfile"]
shutil.copyfile(default_config_file, config_file)
print_message("Config file created: {}".format(config_file)) | 0.002717 |
def clean_email(self):
""" Validate that the e-mail address is unique. """
if get_user_model().objects.filter(email__iexact=self.cleaned_data['email']):
if userena_settings.USERENA_ACTIVATION_REQUIRED and UserenaSignup.objects.filter(user__email__iexact=self.cleaned_data['email']).exclude(activation_key=userena_settings.USERENA_ACTIVATED):
raise forms.ValidationError(_('This email is already in use but not confirmed. Please check your email for verification steps.'))
raise forms.ValidationError(_('This email is already in use. Please supply a different email.'))
return self.cleaned_data['email'] | 0.009009 |
def oneup(self, window_name, object_name, iterations):
"""
Press scrollbar up with number of iterations
@param window_name: Window name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to type in, either full name,
LDTP's name convention, or a Unix glob.
@type object_name: string
@param interations: iterations to perform on slider increase
@type iterations: integer
@return: 1 on success.
@rtype: integer
"""
if not self.verifyscrollbarvertical(window_name, object_name):
raise LdtpServerException('Object not vertical scrollbar')
object_handle = self._get_object_handle(window_name, object_name)
i = 0
minValue = 1.0 / 8
flag = False
while i < iterations:
if object_handle.AXValue <= 0:
raise LdtpServerException('Minimum limit reached')
object_handle.AXValue -= minValue
time.sleep(1.0 / 100)
flag = True
i += 1
if flag:
return 1
else:
raise LdtpServerException('Unable to decrease scrollbar') | 0.00159 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.