text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _check_remote_option(self, option):
"""Test the status of remote negotiated Telnet options."""
if not self.telnet_opt_dict.has_key(option):
self.telnet_opt_dict[option] = TelnetOption()
return self.telnet_opt_dict[option].remote_option | 0.010909 |
def update(m, k, f, *args):
"""Updates the value for key k in associative data structure m with the return value from
calling f(old_v, *args). If m is None, use an empty map. If k is not in m, old_v will be
None."""
if m is None:
return lmap.Map.empty().assoc(k, f(None, *args))
if isinstance(m, IAssociative):
old_v = m.entry(k)
new_v = f(old_v, *args)
return m.assoc(k, new_v)
raise TypeError(
f"Object of type {type(m)} does not implement Associative interface"
) | 0.005618 |
def delaunay3D(dataset, alpha=0, tol=None, boundary=True):
"""Create 3D Delaunay triangulation of input points."""
deln = vtk.vtkDelaunay3D()
deln.SetInputData(dataset)
deln.SetAlpha(alpha)
if tol:
deln.SetTolerance(tol)
deln.SetBoundingTriangulation(boundary)
deln.Update()
return deln.GetOutput() | 0.005348 |
def hdf5_storable(type_or_storable, *args, **kwargs):
'''Registers a `Storable` instance in the global service.'''
if not isinstance(type_or_storable, Storable):
type_or_storable = default_storable(type_or_storable)
hdf5_service.registerStorable(type_or_storable, *args, **kwargs) | 0.003333 |
def find_path(self, node_source, node_target, type='nodes'):
"""Determines shortest path
Determines the shortest path from `node_source` to
`node_target` in _graph using networkx' shortest path
algorithm.
Args
----
node_source: GridDing0
source node, member of _graph
node_target: GridDing0
target node, member of _graph
type : str
Specify if nodes or edges should be returned. Default
is `nodes`
Returns
-------
:any:`list` of :obj:`GridDing0`
path: shortest path from `node_source` to `node_target` (list of nodes in _graph)
Notes
-----
WARNING: The shortest path is calculated using the count of hops, not the actual line lengths!
As long as the circuit breakers are open, this works fine since there's only one path. But if
they are closed, there are 2 possible paths. The result is a path which have min. count of hops
but might have a longer total path length than the second sone.
See networkx' function shortest_path() function for details on how the path is calculated.
"""
if (node_source in self._graph.nodes()) and (node_target in self._graph.nodes()):
path = nx.shortest_path(self._graph, node_source, node_target)
else:
raise Exception('At least one of the nodes is not a member of graph.')
if type == 'nodes':
return path
elif type == 'edges':
return [_ for _ in self._graph.edges(nbunch=path, data=True)
if (_[0] in path and _[1] in path)]
else:
raise ValueError('Please specify type as nodes or edges') | 0.005105 |
def publishApp(self, app_info, map_info=None, fsInfo=None):
"""Publishes apps to AGOL/Portal
Args:
app_info (list): A list of JSON configuration apps to publish.
map_info (list): Defaults to ``None``.
fsInfo (list): Defaults to ``None``.
Returns:
dict: A dictionary of results objects.
"""
if self.securityhandler is None:
print ("Security handler required")
return
appDet = None
try:
app_results = []
if isinstance(app_info, list):
for appDet in app_info:
app_results.append(self._publishAppLogic(appDet=appDet,map_info=map_info,fsInfo=fsInfo))
else:
app_results.append(self._publishAppLogic(appDet=app_info,map_info=map_info,fsInfo=fsInfo))
return app_results
except (common.ArcRestHelperError) as e:
raise e
except Exception as e:
line, filename, synerror = trace()
raise common.ArcRestHelperError({
"function": "publishApp",
"line": line,
"filename": filename,
"synerror": synerror,
})
finally:
appDet = None
del appDet
gc.collect() | 0.00807 |
def modifyDatastream(self, pid, dsID, dsLabel=None, mimeType=None, logMessage=None, dsLocation=None,
altIDs=None, versionable=None, dsState=None, formatURI=None, checksumType=None,
checksum=None, content=None, force=False):
'''Modify an existing datastream, similar to :meth:`addDatastraem`.
Content can be specified by either a URI location or as
string content or file-like object; if content is not specified,
datastream metadata will be updated without modifying the content.
On success, the returned response should have a status code 200;
on failure, the response body may include an error message.
:param pid: object pid
:param dsID: id for the new datastream
:param dslabel: label for the new datastream (optional)
:param mimeType: mimetype for the new datastream (optional)
:param logMessage: log message for the object history (optional)
:param dsLocation: URL where the content should be ingested from (optional)
:param altIDs: alternate ids (optional)
:param versionable: configure datastream versioning (optional)
:param dsState: datastream state (optional)
:param formatURI: datastream format (optional)
:param checksumType: checksum type (optional)
:param checksum: checksum (optional)
:param content: datastream content, as a file-like object or
characterdata (optional)
:param force: force the update (default: False)
:rtype: :class:`requests.models.Response`
'''
# /objects/{pid}/datastreams/{dsID} ? [dsLocation] [altIDs] [dsLabel]
# [versionable] [dsState] [formatURI] [checksumType] [checksum]
# [mimeType] [logMessage] [force] [ignoreContent]
# NOTE: not implementing ignoreContent (unneeded)
# Unlike addDatastream, if checksum is sent without checksum
# type, Fedora honors it (*does* error on invalid checksum
# with no checksum type) - it seems to use the existing
# checksum type if a new type is not specified.
http_args = {}
if dsLabel:
http_args['dsLabel'] = dsLabel
if mimeType:
http_args['mimeType'] = mimeType
if logMessage:
http_args['logMessage'] = logMessage
if dsLocation:
http_args['dsLocation'] = dsLocation
if altIDs:
http_args['altIDs'] = altIDs
if versionable is not None:
http_args['versionable'] = versionable
if dsState:
http_args['dsState'] = dsState
if formatURI:
http_args['formatURI'] = formatURI
if checksumType:
http_args['checksumType'] = checksumType
if checksum:
http_args['checksum'] = checksum
if force:
http_args['force'] = force
content_args = {}
if content:
# content can be either a string or a file-like object
if hasattr(content, 'read'): # allow content to be a file
# warn about missing checksums for files
if not checksum:
logger.warning("Updating datastream %s/%s with a file, but no checksum passed",
pid, dsID)
# either way (string or file-like object), set content as request data
# (file-like objects supported in requests as of 0.13.1)
content_args['data'] = content
url = 'objects/%s/datastreams/%s' % (pid, dsID)
return self.put(url, params=http_args, **content_args) | 0.002483 |
def _parse_team_abbreviation(self, stats):
"""
Parse the team abbreviation.
The team abbreviation is embedded within the team name tag and should
be special-parsed to extract it.
Parameters
----------
stats : PyQuery object
A PyQuery object containing the HTML from the player's stats page.
Returns
-------
string
Returns a string of the team's abbreviation, such as 'PURDUE' for
the Purdue Boilermakers.
"""
team_tag = stats(PLAYER_SCHEME['team_abbreviation'])
team = re.sub(r'.*/cbb/schools/', '', str(team_tag('a')))
team = re.sub(r'/.*', '', team)
return team | 0.00277 |
def refresh_config(self):
'''
__NB__ This *must* be called from a *different* thread than the GUI/Gtk thread.
'''
from gi.repository import Clutter, Gst, GstVideo, ClutterGst
from path_helpers import path
from .warp import bounding_box_from_allocation
if self.config_requested is not None:
sink = ClutterGst.VideoSink.new(self.pipeline_actor.texture)
sink.set_property('sync', True)
sink.set_property('qos', True)
if self.record_path is not None:
record_path = path(self.record_path)
warp_path = record_path.parent.joinpath(record_path.namebase +
'.h5')
# Parent allocation
parent_bbox = \
bounding_box_from_allocation(self.warp_actor
.get_allocation_geometry())
# Child allocation
child_bbox = \
bounding_box_from_allocation(self.warp_actor.actor
.get_allocation_geometry())
common_settings = dict(format='table', data_columns=True,
complib='zlib', complevel=6)
parent_bbox.to_hdf(str(warp_path), '/shape/parent',
**common_settings)
child_bbox.to_hdf(str(warp_path), '/shape/child',
**common_settings)
self.warp_actor.parent_corners.to_hdf(str(warp_path),
'/corners/parent',
**common_settings)
self.warp_actor.child_corners.to_hdf(str(warp_path),
'/corners/child',
**common_settings)
self.pipeline_manager.set_config(self.config_requested,
record_path=self.record_path,
sink=sink)
self.config_requested = None
return True | 0.001338 |
def add_positional_embedding_nd(x, max_length, name=None):
"""Adds n-dimensional positional embedding.
The embeddings add to all positional dimensions of the tensor.
Args:
x: Tensor with shape [batch, p1 ... pn, depth]. It has n positional
dimensions, i.e., 1 for text, 2 for images, 3 for video, etc.
max_length: int representing static maximum size of any dimension.
name: str representing name of the embedding tf.Variable.
Returns:
Tensor of same shape as x.
"""
with tf.name_scope("add_positional_embedding_nd"):
x_shape = common_layers.shape_list(x)
num_dims = len(x_shape) - 2
depth = x_shape[-1]
base_shape = [1] * (num_dims + 1) + [depth]
base_start = [0] * (num_dims + 2)
base_size = [-1] + [1] * num_dims + [depth]
for i in range(num_dims):
shape = base_shape[:]
start = base_start[:]
size = base_size[:]
shape[i + 1] = max_length
size[i + 1] = x_shape[i + 1]
var = tf.get_variable(
name + "_%d" % i,
shape,
initializer=tf.random_normal_initializer(0, depth**-0.5))
var = var * depth**0.5
x += tf.slice(var, start, size)
return x | 0.009267 |
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
If the task fails, persist a record of the task.
"""
if not FailedTask.objects.filter(task_id=task_id, datetime_resolved=None).exists():
FailedTask.objects.create(
task_name=_truncate_to_field(FailedTask, 'task_name', self.name),
task_id=task_id, # Fixed length UUID: No need to truncate
args=args,
kwargs=kwargs,
exc=_truncate_to_field(FailedTask, 'exc', repr(exc)),
)
super(PersistOnFailureTask, self).on_failure(exc, task_id, args, kwargs, einfo) | 0.007634 |
def translate(self, address):
"""
Translates the given address to another address specific to network or service.
:param address: (:class:`~hazelcast.core.Address`), private address to be translated
:return: (:class:`~hazelcast.core.Address`), new address if given address is known, otherwise returns null
"""
if address is None:
return None
public_address = self._private_to_public.get(address)
if public_address:
return public_address
self.refresh()
return self._private_to_public.get(address) | 0.008292 |
def send(self, jlink):
"""Starts the SWD transaction.
Steps for a Write Transaction:
1. First phase in which the request is sent.
2. Second phase in which an ACK is received. This phase consists of
three bits. An OK response has the value ``1``.
3. Everytime the SWD IO may change directions, a turnaround phase is
inserted. For reads, this happens after the data phase, while
for writes this happens after between the acknowledge and data
phase, so we have to do the turnaround before writing data. This
phase consists of two bits.
4. Write the data and parity bits.
Args:
self (WriteRequest): the ``WriteRequest`` instance
jlink (JLink): the ``JLink`` instance to use for write/read
Returns:
An ``Response`` instance.
"""
ack = super(WriteRequest, self).send(jlink)
# Turnaround phase for write.
jlink.swd_write(0x0, 0x0, 2)
# Write the data and the parity bits.
jlink.swd_write32(0xFFFFFFFF, self.data)
jlink.swd_write8(0xFF, util.calculate_parity(self.data))
return Response(jlink.swd_read8(ack) & 7) | 0.001597 |
def set_payload(self, payload):
"""Set stanza payload to a single item.
All current stanza content of will be dropped.
Marks the stanza dirty.
:Parameters:
- `payload`: XML element or stanza payload object to use
:Types:
- `payload`: :etree:`ElementTree.Element` or `StanzaPayload`
"""
if isinstance(payload, ElementClass):
self._payload = [ XMLPayload(payload) ]
elif isinstance(payload, StanzaPayload):
self._payload = [ payload ]
else:
raise TypeError("Bad payload type")
self._dirty = True | 0.009449 |
def from_node(index, value):
"""
>>> h = TimelineHistory.from_node(1, 2)
>>> h.lines
[]
"""
try:
lines = json.loads(value)
except (TypeError, ValueError):
lines = None
if not isinstance(lines, list):
lines = []
return TimelineHistory(index, value, lines) | 0.005525 |
def points(self):
""" returns a pointer to the points as a numpy object """
# Get grid dimensions
nx, ny, nz = self.dimensions
nx -= 1
ny -= 1
nz -= 1
# get the points and convert to spacings
dx, dy, dz = self.spacing
# Now make the cell arrays
ox, oy, oz = self.origin
x = np.insert(np.cumsum(np.full(nx, dx)), 0, 0.0) + ox
y = np.insert(np.cumsum(np.full(ny, dy)), 0, 0.0) + oy
z = np.insert(np.cumsum(np.full(nz, dz)), 0, 0.0) + oz
xx, yy, zz = np.meshgrid(x,y,z, indexing='ij')
return np.c_[xx.ravel(), yy.ravel(), zz.ravel()] | 0.006154 |
def set_custom(sld, tld, nameservers):
'''
Sets domain to use custom DNS servers.
returns True if the custom nameservers were set successfully
sld
SLD of the domain name
tld
TLD of the domain name
nameservers
array of strings List of nameservers to be associated with this domain
CLI Example:
.. code-block:: bash
salt 'my-minion' namecheap_domains_dns.set_custom sld tld nameserver
'''
opts = salt.utils.namecheap.get_opts('namecheap.domains.dns.setCustom')
opts['SLD'] = sld
opts['TLD'] = tld
opts['Nameservers'] = ','.join(nameservers)
response_xml = salt.utils.namecheap.post_request(opts)
if response_xml is None:
return False
dnsresult = response_xml.getElementsByTagName('DomainDNSSetCustomResult')[0]
return salt.utils.namecheap.string_to_value(dnsresult.getAttribute('Update')) | 0.003322 |
def run_(self):
""" DEPRECATED """
all_records = []
for k in range(self.num_classes):
simulated_records = self.alf_params[k + 1].run()
names = ['class{0}_{1:0>{2}}'.format(k + 1, i,
len(str(self.class_list[k]))) for i in range(1,
len(
simulated_records) + 1)]
for (rec, name) in zip(simulated_records, names):
rec.name = name
all_records.extend(simulated_records)
self.result = all_records
self.clean()
return all_records | 0.00646 |
def escape(s, fold_newlines=True):
"""Escapes a string to make it usable in LaTeX text mode. Will replace
special characters as well as newlines.
Some problematic characters like ``[`` and ``]`` are escaped into groups
(e.g. ``{[}``), because they tend to cause problems when mixed with ``\\``
newlines otherwise.
:param s: The string to escape.
:param fold_newlines: If true, multiple newlines will be reduced to just a
single ``\\``. Otherwise, whitespace is kept intact
by adding multiple ``[n\baselineskip]``.
"""
def sub(m):
c = m.group()
if c in CHAR_ESCAPE:
return CHAR_ESCAPE[c]
if c.isspace():
if fold_newlines:
return r'\\'
return r'\\[{}\baselineskip]'.format(len(c))
return ESCAPE_RE.sub(sub, s) | 0.001136 |
def insertAfter(self, child, afterChild):
'''
insertAfter - Inserts a child after #afterChild
@param child <AdvancedTag/str> - Child block to insert
@param afterChild <AdvancedTag/str> - Child block to insert after. if None, will be appended
@return - The added child. Note, if it is a text block (str), the return isl NOT be linked by reference.
'''
# If after child is null/None, just append
if afterChild is None:
return self.appendBlock(child)
isChildTag = isTagNode(child)
myBlocks = self.blocks
myChildren = self.children
# Determine where we need to insert this both in "blocks" and, if a tag, "children"
try:
blocksIdx = myBlocks.index(afterChild)
if isChildTag:
childrenIdx = myChildren.index(afterChild)
except ValueError:
raise ValueError('Provided "afterChild" is not a child of element, cannot insert.')
# Append child to requested spot
self.blocks = myBlocks[:blocksIdx+1] + [child] + myBlocks[blocksIdx+1:]
if isChildTag:
self.children = myChildren[:childrenIdx+1] + [child] + myChildren[childrenIdx+1:]
return child | 0.006216 |
def render_text(self, request, instance, context):
"""
Custom rendering function for HTML output
"""
render_template = self.get_render_template(request, instance, email_format='text')
if not render_template:
# If there is no TEXT variation, create it by removing the HTML tags.
base_url = request.build_absolute_uri('/')
html = self.render_html(request, instance, context)
return html_to_text(html, base_url)
instance_context = self.get_context(request, instance, email_format='text', parent_context=context)
instance_context['email_format'] = 'text'
text = self.render_to_string(request, render_template, instance_context)
text = text + "" # Avoid being a safestring
if self.render_replace_context_fields:
text = replace_fields(text, instance_context, autoescape=False)
return text | 0.006431 |
def get_environment_config():
"""
Returns all the Turi Create configuration variables that can only
be set via environment variables.
- *TURI_FILEIO_WRITER_BUFFER_SIZE*: The file write buffer size.
- *TURI_FILEIO_READER_BUFFER_SIZE*: The file read buffer size.
- *OMP_NUM_THREADS*: The maximum number of threads to use for parallel processing.
Returns
-------
Returns a dictionary of {key:value,..}
"""
from .._connect import main as _glconnect
unity = _glconnect.get_unity()
return unity.list_globals(False) | 0.003559 |
def add_string(self, name, help, required=True, default=None):
"""
Add a new option with a type of String.
:param str name: The name of the option, how it will be referenced.
:param str help: The string returned as help to describe how the option is used.
:param bool required: Whether to require that this option be set or not.
:param str default: The default value for this option. If required is True and the user must specify it, set to anything but None.
"""
self._options[name] = Option(name, 'str', help, required, default=default) | 0.021622 |
def julian2sidereal(Jdate: float) -> float:
"""
Convert Julian time to sidereal time
D. Vallado Ed. 4
Parameters
----------
Jdate: float
Julian centuries from J2000.0
Results
-------
tsr : float
Sidereal time
"""
jdate = np.atleast_1d(Jdate)
assert jdate.ndim == 1
tsr = np.empty(jdate.size)
for i, jd in enumerate(jdate):
# %% Vallado Eq. 3-42 p. 184, Seidelmann 3.311-1
tUT1 = (jd - 2451545.0) / 36525.
# Eqn. 3-47 p. 188
gmst_sec = (67310.54841 + (876600 * 3600 + 8640184.812866) *
tUT1 + 0.093104 * tUT1**2 - 6.2e-6 * tUT1**3)
# 1/86400 and %(2*pi) implied by units of radians
tsr[i] = gmst_sec * (2 * pi) / 86400. % (2 * pi)
return tsr.squeeze() | 0.001242 |
def _get_tmp_account_id(cls, writer_spec):
"""Returns the account id to use with tmp bucket."""
# pick tmp id iff tmp bucket is set explicitly
if cls.TMP_BUCKET_NAME_PARAM in writer_spec:
return writer_spec.get(cls._TMP_ACCOUNT_ID_PARAM, None)
return cls._get_account_id(writer_spec) | 0.006557 |
def set_params(self, deep=False, force=False, **parameters):
"""
sets an object's paramters
Parameters
----------
deep : boolean, default: False
when True, also sets non-user-facing paramters
force : boolean, default: False
when True, also sets parameters that the object does not already
have
**parameters : paramters to set
Returns
------
self
"""
param_names = self.get_params(deep=deep).keys()
for parameter, value in parameters.items():
if (parameter in param_names
or force
or (hasattr(self, parameter) and parameter == parameter.strip('_'))):
setattr(self, parameter, value)
return self | 0.004981 |
def _freq_parser(self, freq):
"""Parse timedelta.
Valid keywords "days", "day", "d", "hours", "hour", "h",
"minutes", "minute", "min", "m", "seconds", "second", "sec", "s",
"weeks", "week", "w",
"""
freq = freq.lower().strip()
valid_keywords = [
"days", "day", "d",
"hours", "hour", "h",
"minutes", "minute", "min", "m",
"seconds", "second", "sec", "s",
"weeks", "week", "w",
]
error_message = "'%s' is invalid, use one of %s" % (
freq, valid_keywords)
try:
# day
for surfix in ["days", "day", "d"]:
if freq.endswith(surfix):
freq = freq.replace(surfix, "")
return timedelta(days=int(freq))
# hour
for surfix in ["hours", "hour", "h"]:
if freq.endswith(surfix):
freq = freq.replace(surfix, "")
return timedelta(hours=int(freq))
# minute
for surfix in ["minutes", "minute", "min", "m"]:
if freq.endswith(surfix):
freq = freq.replace(surfix, "")
return timedelta(minutes=int(freq))
# second
for surfix in ["seconds", "second", "sec", "s"]:
if freq.endswith(surfix):
freq = freq.replace(surfix, "")
return timedelta(seconds=int(freq))
# week
for surfix in ["weeks", "week", "w"]:
if freq.endswith(surfix):
freq = freq.replace(surfix, "")
return timedelta(days=int(freq) * 7)
except:
pass
raise ValueError(error_message) | 0.001663 |
def _prepare_controller(self, controller, template):
"""
Wraps the controller wether to render a jinja template or to return a json response (if template is None)
Args:
controller (coroutine): the coroutine to be wrapped
template (str): the name of the template or None
Returns:
coroutine: a wrapped coroutine of the controller
"""
if template:
fn = aiohttp_jinja2.template(template_name=template)(controller)
else:
fn = self._parse_json_response(controller)
return fn | 0.005059 |
def hover_on_element(driver, selector, by=By.CSS_SELECTOR):
"""
Fires the hover event for the specified element by the given selector.
@Params
driver - the webdriver object (required)
selector - the locator (css selector) that is used (required)
by - the method to search for the locator (Default: By.CSS_SELECTOR)
"""
element = driver.find_element(by=by, value=selector)
hover = ActionChains(driver).move_to_element(element)
hover.perform() | 0.002079 |
def report_message(message, level='error', request=None, extra_data=None, payload_data=None):
"""
Reports an arbitrary string message to Rollbar.
message: the string body of the message
level: level to report at. One of: 'critical', 'error', 'warning', 'info', 'debug'
request: the request object for the context of the message
extra_data: dictionary of params to include with the message. 'body' is reserved.
payload_data: param names to pass in the 'data' level of the payload; overrides defaults.
"""
try:
return _report_message(message, level, request, extra_data, payload_data)
except Exception as e:
log.exception("Exception while reporting message to Rollbar. %r", e) | 0.008208 |
def export_to_dom(self):
"""
Exports this model to a DOM.
"""
namespaces = 'xmlns="http://www.neuroml.org/lems/%s" ' + \
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' + \
'xsi:schemaLocation="http://www.neuroml.org/lems/%s %s"'
namespaces = namespaces%(self.target_lems_version,self.target_lems_version,self.schema_location)
xmlstr = '<Lems %s>'%namespaces
for include in self.includes:
xmlstr += include.toxml()
for target in self.targets:
xmlstr += '<Target component="{0}"/>'.format(target)
for dimension in self.dimensions:
xmlstr += dimension.toxml()
for unit in self.units:
xmlstr += unit.toxml()
for constant in self.constants:
xmlstr += constant.toxml()
for component_type in self.component_types:
xmlstr += component_type.toxml()
for component in self.components:
xmlstr += component.toxml()
xmlstr += '</Lems>'
xmldom = minidom.parseString(xmlstr)
return xmldom | 0.010771 |
def _updated_cb(self, downtotal, downdone, uptotal, updone):
""" Emit update signal, including transfer status metadata. """
self.emit('updated', downtotal, downdone, uptotal, updone) | 0.01005 |
def get_messages(self, limit=25, *, query=None, order_by=None, batch=None,
download_attachments=False):
"""
Downloads messages from this folder
:param int limit: limits the result set. Over 999 uses batch.
:param query: applies a filter to the request such as
"displayName eq 'HelloFolder'"
:type query: Query or str
:param order_by: orders the result set based on this condition
:type order_by: Query or str
:param int batch: batch size, retrieves items in
batches allowing to retrieve more items than the limit.
:param bool download_attachments: whether or not to download attachments
:return: list of messages
:rtype: list[Message] or Pagination
"""
if self.root:
url = self.build_url(self._endpoints.get('root_messages'))
else:
url = self.build_url(self._endpoints.get('folder_messages').format(
id=self.folder_id))
if limit is None or limit > self.protocol.max_top_value:
batch = self.protocol.max_top_value
params = {'$top': batch if batch else limit}
if order_by:
params['$orderby'] = order_by
if query:
if isinstance(query, str):
params['$filter'] = query
else:
params.update(query.as_params())
response = self.con.get(url, params=params)
if not response:
return iter(())
data = response.json()
# Everything received from cloud must be passed as self._cloud_data_key
messages = (self.message_constructor(
parent=self,
download_attachments=download_attachments,
**{self._cloud_data_key: message})
for message in data.get('value', []))
next_link = data.get(NEXT_LINK_KEYWORD, None)
if batch and next_link:
return Pagination(parent=self, data=messages,
constructor=self.message_constructor,
next_link=next_link, limit=limit,
download_attachments=download_attachments)
else:
return messages | 0.001782 |
def uisetup(ui):
"""Setup pre-/post- hooks"""
for hooktype in PREPOST_HOOKS:
ui.setconfig("hooks", "pre-" + hooktype + ".autohooks", autohook)
ui.setconfig("hooks", "post-" + hooktype + ".autohooks", autohook) | 0.004292 |
def copy_traj_attributes(target, origin, start):
""" Inserts certain attributes of origin into target
:param target: target trajectory object
:param origin: origin trajectory object
:param start: :py:obj:`origin` attributes will be inserted in :py:obj:`target` starting at this index
:return: target: the md trajectory with the attributes of :py:obj:`origin` inserted
"""
# The list of copied attributes can be extended here with time
# Or perhaps ask the mdtraj guys to implement something similar?
stop = start+origin.n_frames
target.xyz[start:stop] = origin.xyz
target.unitcell_lengths[start:stop] = origin.unitcell_lengths
target.unitcell_angles[start:stop] = origin.unitcell_angles
target.time[start:stop] = origin.time
return target | 0.003764 |
def parse_select(cls, text: str) -> Set:
"""
get columns from select text
:param text: col1, col2
:return: ALL_COLUMNS or ['col1', 'col2']
"""
if text == '*':
return ALL_COLUMNS # None means ALL
selected_columns = set(filter(lambda x: x, map(str.strip, text.split(','))))
if not selected_columns:
raise InvalidParams("No column(s) selected")
return selected_columns | 0.006494 |
def build_table_schema(data, index=True, primary_key=None, version=True):
"""
Create a Table schema from ``data``.
Parameters
----------
data : Series, DataFrame
index : bool, default True
Whether to include ``data.index`` in the schema.
primary_key : bool or None, default True
column names to designate as the primary key.
The default `None` will set `'primaryKey'` to the index
level or levels if the index is unique.
version : bool, default True
Whether to include a field `pandas_version` with the version
of pandas that generated the schema.
Returns
-------
schema : dict
Notes
-----
See `_as_json_table_type` for conversion types.
Timedeltas as converted to ISO8601 duration format with
9 decimal places after the seconds field for nanosecond precision.
Categoricals are converted to the `any` dtype, and use the `enum` field
constraint to list the allowed values. The `ordered` attribute is included
in an `ordered` field.
Examples
--------
>>> df = pd.DataFrame(
... {'A': [1, 2, 3],
... 'B': ['a', 'b', 'c'],
... 'C': pd.date_range('2016-01-01', freq='d', periods=3),
... }, index=pd.Index(range(3), name='idx'))
>>> build_table_schema(df)
{'fields': [{'name': 'idx', 'type': 'integer'},
{'name': 'A', 'type': 'integer'},
{'name': 'B', 'type': 'string'},
{'name': 'C', 'type': 'datetime'}],
'pandas_version': '0.20.0',
'primaryKey': ['idx']}
"""
if index is True:
data = set_default_names(data)
schema = {}
fields = []
if index:
if data.index.nlevels > 1:
for level in data.index.levels:
fields.append(convert_pandas_type_to_json_field(level))
else:
fields.append(convert_pandas_type_to_json_field(data.index))
if data.ndim > 1:
for column, s in data.iteritems():
fields.append(convert_pandas_type_to_json_field(s))
else:
fields.append(convert_pandas_type_to_json_field(data))
schema['fields'] = fields
if index and data.index.is_unique and primary_key is None:
if data.index.nlevels == 1:
schema['primaryKey'] = [data.index.name]
else:
schema['primaryKey'] = data.index.names
elif primary_key is not None:
schema['primaryKey'] = primary_key
if version:
schema['pandas_version'] = '0.20.0'
return schema | 0.000397 |
def aggregate_history(slugs, granularity="daily", since=None, with_data_table=False):
"""Template Tag to display history for multiple metrics.
* ``slug_list`` -- A list of slugs to display
* ``granularity`` -- the granularity: seconds, minutes, hourly,
daily, weekly, monthly, yearly
* ``since`` -- a datetime object or a string string matching one of the
following patterns: "YYYY-mm-dd" for a date or "YYYY-mm-dd HH:MM:SS" for
a date & time.
* ``with_data_table`` -- if True, prints the raw data in a table.
"""
r = get_r()
slugs = list(slugs)
try:
if since and len(since) == 10: # yyyy-mm-dd
since = datetime.strptime(since, "%Y-%m-%d")
elif since and len(since) == 19: # yyyy-mm-dd HH:MM:ss
since = datetime.strptime(since, "%Y-%m-%d %H:%M:%S")
except (TypeError, ValueError):
# assume we got a datetime object or leave since = None
pass
history = r.get_metric_history_chart_data(
slugs=slugs,
since=since,
granularity=granularity
)
return {
'chart_id': "metric-aggregate-history-{0}".format("-".join(slugs)),
'slugs': slugs,
'since': since,
'granularity': granularity,
'metric_history': history,
'with_data_table': with_data_table,
} | 0.001464 |
def clip_or_fit_solutions(self, pop, idx):
"""make sure that solutions fit to sample distribution, this interface will probably change.
In particular the frequency of long vectors appearing in pop[idx] - self.mean is limited.
"""
for k in idx:
self.repair_genotype(pop[k]) | 0.012579 |
def render_to_string(self, env, **kwargs):
"""Render this template.
:param dict env: The WSGI environment associated with the request causing this template to be rendered
:param any kwargs: The keyword arguments to be supplied to the Jninja template render method
:return: The rendered template
:rtype: str
"""
template = None
template_path = env.get(self.jenv.env_template_dir_key)
if template_path:
# jinja paths are not os paths, always use '/' as separator
# https://github.com/pallets/jinja/issues/411
template_path = template_path + '/' + self.insert_file
try:
template = self.jenv.jinja_env.get_template(template_path)
except TemplateNotFound as te:
pass
if not template:
template = self.jenv.jinja_env.get_template(self.insert_file)
params = env.get(self.jenv.env_template_params_key)
if params:
kwargs.update(params)
kwargs['env'] = env
kwargs['static_prefix'] = env.get('pywb.host_prefix', '') + env.get('pywb.app_prefix', '') + '/static'
return template.render(**kwargs) | 0.004898 |
def cmd_layout(self, args):
'''handle layout command'''
from MAVProxy.modules.lib import win_layout
if len(args) < 1:
print("usage: layout <save|load>")
return
if args[0] == "load":
win_layout.load_layout(self.mpstate.settings.vehicle_name)
elif args[0] == "save":
win_layout.save_layout(self.mpstate.settings.vehicle_name) | 0.004866 |
def handle_new_events(self, events):
"""Add each new events to the event queue."""
for event in events:
self.events.append(
self.create_event_object(
event[0],
event[1],
int(event[2]))) | 0.00692 |
def __proxy_password(self):
"""Returning the password and immediately destroying it"""
passwd = copy(self._inp_proxy_password.value)
self._inp_proxy_password.value = ''
return passwd | 0.009346 |
def ping():
'''
Is the chassis responding?
:return: Returns False if the chassis didn't respond, True otherwise.
'''
r = __salt__['dracr.system_info'](host=DETAILS['host'],
admin_username=DETAILS['admin_username'],
admin_password=DETAILS['admin_password'])
if r.get('retcode', 0) == 1:
return False
else:
return True
try:
return r['dict'].get('ret', False)
except Exception:
return False | 0.001876 |
def getRegularAnalyses(self):
"""
Return the analyses assigned to the current worksheet that are directly
associated to an Analysis Request but are not QC analyses. This is all
analyses that implement IRoutineAnalysis
:return: List of regular analyses
:rtype: List of ReferenceAnalysis/DuplicateAnalysis
"""
qc_types = ['ReferenceAnalysis', 'DuplicateAnalysis']
analyses = self.getAnalyses()
return [a for a in analyses if a.portal_type not in qc_types] | 0.003752 |
def _load_nnp(self, rel_name, rel_url):
'''
Args:
rel_name: relative path to where donwloaded nnp is saved.
rel_url: relative url path to where nnp is downloaded from.
'''
from nnabla.utils.download import download
path_nnp = os.path.join(
get_model_home(), 'imagenet/{}'.format(rel_name))
url = os.path.join(get_model_url_base(),
'imagenet/{}'.format(rel_url))
logger.info('Downloading {} from {}'.format(rel_name, url))
dir_nnp = os.path.dirname(path_nnp)
if not os.path.isdir(dir_nnp):
os.makedirs(dir_nnp)
download(url, path_nnp, open_file=False, allow_overwrite=False)
print('Loading {}.'.format(path_nnp))
self.nnp = NnpLoader(path_nnp) | 0.002398 |
def validate_arguments_type_of_function(param_type=None):
"""
Decorator to validate the <type> of arguments in
the calling function are of the `param_type` class.
if `param_type` is None, uses `param_type` as the class where it is used.
Note: Use this decorator on the functions of the class.
"""
def inner(function):
def wrapper(self, *args, **kwargs):
type_ = param_type or type(self)
for arg in args + tuple(kwargs.values()):
if not isinstance(arg, type_):
raise TypeError(
(
'Invalid Type: {}.{}() accepts only the '
'arguments of type "<{}>"'
).format(
type(self).__name__,
function.__name__,
type_.__name__,
)
)
return function(self, *args, **kwargs)
return wrapper
return inner | 0.000959 |
def items(self, obj):
"""
Items are the published entries of the tag.
"""
return TaggedItem.objects.get_by_model(
Entry.published.all(), obj)[:self.limit] | 0.010101 |
def _request(self, lat_min, lon_min, lat_max, lon_max, start, end, picture_size=None, set_=None, map_filter=None):
"""
Internal method to send requests to the Panoramio data API.
:param lat_min:
Minimum latitude of the bounding box
:type lat_min: float
:param lon_min:
Minimum longitude of the bounding box
:type lon_min: float
:param lat_max:
Maximum latitude of the bounding box
:type lat_max: float
:param lon_max:
Maximum longitude of the bounding box
:type lon_max: float
:param start:
Start number of the number of photo's to retrieve, where 0 is the most popular picture
:type start: int
:param end:
Last number of the number of photo's to retrieve, where 0 is the most popular picture
:type end: int
:param picture_size:
This can be: original, medium (*default*), small, thumbnail, square, mini_square
:type picture_size: basestring
:param set_:
This can be: public, popular or user-id; where user-id is the specific id of a user (as integer)
:type set_: basestring/int
:param map_filter:
Whether to return photos that look better together; when True, tries to avoid returning photos of the same
location
:type map_filter: bool
:return: JSON response of the request formatted as a dictionary.
"""
if not isinstance(lat_min, float):
raise PynoramioException(
'{0}._request requires the lat_min parameter to be a float.'.format(self.__class__.__name__))
if not isinstance(lon_min, float):
raise PynoramioException(
'{0}._request requires the lon_min parameter to be a float.'.format(self.__class__.__name__))
if not isinstance(lat_max, float):
raise PynoramioException(
'{0}._request requires the lat_max parameter to be a float.'.format(self.__class__.__name__))
if not isinstance(lon_max, float):
raise PynoramioException(
'{0}._request requires the lon_max parameter to be a float.'.format(self.__class__.__name__))
if not isinstance(start, int):
raise PynoramioException(
'{0}._request requires the start parameter to be an int.'.format(self.__class__.__name__))
if not isinstance(end, int):
raise PynoramioException(
'{0}._request requires the end parameter to be an int.'.format(self.__class__.__name__))
url = self.base_url + '&minx={0}&miny={1}&maxx={2}&maxy={3}&from={4}&to={5}'.format(lon_min, lat_min,
lon_max, lat_max,
start, end)
if picture_size is not None and isinstance(picture_size, basestring) \
and picture_size in ['original', 'medium', 'small', 'thumbnail', 'square', 'mini_square']:
url += '&size={0}'.format(picture_size)
if set_ is not None and (isinstance(set_, basestring) and set_ in ['public', 'full']) \
or (isinstance(set_, int)):
url += '&set={0}'.format(set_)
else:
url += '&set=public'
if map_filter is not None and isinstance(map_filter, bool) and not map_filter:
url += '&map_filter=false'
r = requests.get(url)
try:
return r.json()
except ValueError:
# add your debugging lines here, for example, print(r.url)
raise PynoramioException(
'An invalid or malformed url was passed to {0}._request'.format(self.__class__.__name__)) | 0.005432 |
def market_snapshot(self, prefix=False):
"""return all market quotation snapshot
:param prefix: if prefix is True, return quotation dict's stock_code
key start with sh/sz market flag
"""
return self.get_stock_data(self.stock_list, prefix=prefix) | 0.006873 |
def micro_calc(TP, item):
"""
Calculate PPV_Micro and TPR_Micro.
:param TP: true positive
:type TP:dict
:param item: FN or FP
:type item : dict
:return: PPV_Micro or TPR_Micro as float
"""
try:
TP_sum = sum(TP.values())
item_sum = sum(item.values())
return TP_sum / (TP_sum + item_sum)
except Exception:
return "None" | 0.002564 |
def learn(self, spiro, iterations=1):
'''Train short term memory with given spirograph.
:param spiro:
:py:class:`SpiroArtifact` object
'''
for i in range(iterations):
self.stmem.train_cycle(spiro.obj.flatten()) | 0.007491 |
def printer(self):
""" Returns a printer that was defined in the config, or throws an
exception.
This method loads the default config if one hasn't beeen already loaded.
"""
if not self._has_loaded:
self.load()
if not self._printer_name:
raise exceptions.ConfigSectionMissingError('printer')
if not self._printer:
# We could catch init errors and make them a ConfigSyntaxError,
# but I'll just let them pass
self._printer = getattr(printer, self._printer_name)(**self._printer_config)
return self._printer | 0.006309 |
def _find_files(self, entries, root, relative_path, file_name_regex):
"""
Return the elements in entries that
1. are in ``root/relative_path``, and
2. match ``file_name_regex``.
:param list entries: the list of entries (file paths) in the container
:param string root: the root directory of the container
:param string relative_path: the relative path in which we must search
:param regex file_name_regex: the regex matching the desired file names
:rtype: list of strings (path)
"""
self.log([u"Finding files within root: '%s'", root])
target = root
if relative_path is not None:
self.log([u"Joining relative path: '%s'", relative_path])
target = gf.norm_join(root, relative_path)
self.log([u"Finding files within target: '%s'", target])
files = []
target_len = len(target)
for entry in entries:
if entry.startswith(target):
self.log([u"Examining entry: '%s'", entry])
entry_suffix = entry[target_len + 1:]
self.log([u"Examining entry suffix: '%s'", entry_suffix])
if re.search(file_name_regex, entry_suffix) is not None:
self.log([u"Match: '%s'", entry])
files.append(entry)
else:
self.log([u"No match: '%s'", entry])
return sorted(files) | 0.001369 |
def quantiles(x,
num_quantiles,
axis=None,
interpolation=None,
keep_dims=False,
validate_args=False,
name=None):
"""Compute quantiles of `x` along `axis`.
The quantiles of a distribution are cut points dividing the range into
intervals with equal probabilities.
Given a vector `x` of samples, this function estimates the cut points by
returning `num_quantiles + 1` cut points, `(c0, ..., cn)`, such that, roughly
speaking, equal number of sample points lie in the `num_quantiles` intervals
`[c0, c1), [c1, c2), ..., [c_{n-1}, cn]`. That is,
* About `1 / n` fraction of the data lies in `[c_{k-1}, c_k)`, `k = 1, ..., n`
* About `k / n` fraction of the data lies below `c_k`.
* `c0` is the sample minimum and `cn` is the maximum.
The exact number of data points in each interval depends on the size of
`x` (e.g. whether the size is divisible by `n`) and the `interpolation` kwarg.
Args:
x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not `None`,
`x` must have statically known number of dimensions.
num_quantiles: Scalar `integer` `Tensor`. The number of intervals the
returned `num_quantiles + 1` cut points divide the range into.
axis: Optional `0-D` or `1-D` integer `Tensor` with constant values. The
axis that index independent samples over which to return the desired
percentile. If `None` (the default), treat every dimension as a sample
dimension, returning a scalar.
interpolation : {'nearest', 'linear', 'lower', 'higher', 'midpoint'}.
Default value: 'nearest'. This specifies the interpolation method to
use when the fractions `k / n` lie between two data points `i < j`:
* linear: i + (j - i) * fraction, where fraction is the fractional part
of the index surrounded by i and j.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j`, whichever is nearest.
* midpoint: (i + j) / 2. `linear` and `midpoint` interpolation do not
work with integer dtypes.
keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1
If `False`, the last dimension is removed from the output shape.
validate_args: Whether to add runtime checks of argument validity. If
False, and arguments are incorrect, correct behavior is not guaranteed.
name: A Python string name to give this `Op`. Default is 'percentile'
Returns:
cut_points: A `rank(x) + 1 - len(axis)` dimensional `Tensor` with same
`dtype` as `x` and shape `[num_quantiles + 1, ...]` where the trailing shape
is that of `x` without the dimensions in `axis` (unless `keep_dims is True`)
Raises:
ValueError: If argument 'interpolation' is not an allowed type.
ValueError: If interpolation type not compatible with `dtype`.
#### Examples
```python
# Get quartiles of x with various interpolation choices.
x = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]
tfp.stats.quantiles(x, num_quantiles=4, interpolation='nearest')
==> [ 0., 2., 5., 8., 10.]
tfp.stats.quantiles(x, num_quantiles=4, interpolation='linear')
==> [ 0. , 2.5, 5. , 7.5, 10. ]
tfp.stats.quantiles(x, num_quantiles=4, interpolation='lower')
==> [ 0., 2., 5., 7., 10.]
# Get deciles of columns of an R x C data set.
data = load_my_columnar_data(...)
tfp.stats.quantiles(data, num_quantiles=10)
==> Shape [11, C] Tensor
```
"""
with tf.compat.v1.name_scope(
name, 'quantiles', values=[x, num_quantiles, axis]):
x = tf.convert_to_tensor(value=x, name='x')
return percentile(
x,
q=tf.linspace(
# percentile casts q to float64 before using it...so may as well use
# float64 here. Note that using x.dtype won't work with linspace
# if x is integral type (which is anothe motivation for hard-coding
# float64).
tf.convert_to_tensor(value=0, dtype=tf.float64),
tf.convert_to_tensor(value=100, dtype=tf.float64),
num=num_quantiles + 1),
axis=axis,
interpolation=interpolation,
keep_dims=keep_dims,
validate_args=validate_args,
preserve_gradients=False) | 0.002321 |
def load_all(path, include_core=True, subfolders=None, path_in_arc=None):
""" Loads a full IO system with all extension in path
Parameters
----------
path : pathlib.Path or string
Path or path with para file name for the data to load.
This must either point to the directory containing the uncompressed
data or the location of a compressed zip file with the data. In the
later case and if there are several mrio's in the zip file the
parameter 'path_in_arc' need to be specifiec to further indicate the
location of the data in the compressed file.
include_core : boolean, optional
If False the load method does not include A, L and Z matrix. This
significantly reduces the required memory if the purpose is only
to analyse the results calculated beforehand.
subfolders: list of pathlib.Path or string, optional
By default (subfolders=None), all subfolders in path containing a json
parameter file (as defined in DEFAULT_FILE_NAMES['filepara']:
metadata.json) are parsed. If only a subset should be used, pass a list
of names of subfolders. These can either be strings specifying direct
subfolders of path, or absolute/relative path if the extensions are
stored at a different location. Both modes can be mixed. If the data
is read from a zip archive the path must be given as described below in
'path_in_arc', relative to the root defined in the paramter
'path_in_arc'. Extensions in a different zip archive must be read
separately by calling the function 'load' for this extension.
path_in_arc: string, optional
Path to the data in the zip file (where the fileparameters file is
located). path_in_arc must be given without leading dot and slash;
thus to point to the data in the root of the compressed file pass '',
for data in e.g. the folder 'emissions' pass 'emissions/'. Only used
if parameter 'path' points to an compressed zip file.
Can be None (default) if there is only one mrio database in the
zip archive (thus only one file_parameter file as the systemtype entry
'IOSystem'.
"""
def clean(varStr):
""" get valid python name from folder
"""
return re.sub('\W|^(?=\d)', '_', str(varStr))
path = Path(path)
if zipfile.is_zipfile(str(path)):
with zipfile.ZipFile(file=str(path), mode='r') as zz:
zipcontent = zz.namelist()
if path_in_arc:
path_in_arc = str(path_in_arc)
if path_in_arc not in zipcontent:
path_in_arc = os.path.join(path_in_arc,
DEFAULT_FILE_NAMES['filepara'])
if path_in_arc not in zipcontent:
raise ReadError('File parameter file {} not found in {}. '
'Tip: specify fileparameter filename '
'through "path_in_arc" if different '
'from default.'.format(
DEFAULT_FILE_NAMES['filepara'], path))
else:
with zipfile.ZipFile(file=str(path), mode='r') as zz:
fpfiles = [
f for f in zz.namelist()
if
os.path.basename(f) == DEFAULT_FILE_NAMES['filepara'] and
json.loads(zz.read(f).decode('utf-8')
)['systemtype'] == 'IOSystem']
if len(fpfiles) == 0:
raise ReadError('File parameter file {} not found in {}. '
'Tip: specify fileparameter filename '
'through "path_in_arc" if different '
'from default.'.format(
DEFAULT_FILE_NAMES['filepara'], path))
elif len(fpfiles) > 1:
raise ReadError('Mulitple mrio archives found in {}. '
'Specify one by the '
'parameter "path_in_arc"'.format(path))
else:
path_in_arc = os.path.dirname(fpfiles[0])
logging.debug("Expect file parameter-file at {} in {}".format(
path_in_arc, path))
io = load(path, include_core=include_core, path_in_arc=path_in_arc)
if zipfile.is_zipfile(str(path)):
root_in_zip = os.path.dirname(path_in_arc)
if subfolders is None:
subfolders = {
os.path.relpath(os.path.dirname(p), root_in_zip)
for p in zipcontent
if p.startswith(root_in_zip) and
os.path.dirname(p) != root_in_zip}
for subfolder_name in subfolders:
if subfolder_name not in zipcontent + list({
os.path.dirname(p) for p in zipcontent}):
subfolder_full = os.path.join(root_in_zip, subfolder_name)
else:
subfolder_full = subfolder_name
subfolder_name = os.path.basename(os.path.normpath(subfolder_name))
if subfolder_name not in zipcontent:
subfolder_full_meta = os.path.join(
subfolder_full, DEFAULT_FILE_NAMES['filepara'])
else:
subfolder_full_meta = subfolder_full
if subfolder_full_meta in zipcontent:
ext = load(path,
include_core=include_core,
path_in_arc=subfolder_full_meta)
setattr(io, clean(subfolder_name), ext)
io.meta._add_fileio("Added satellite account "
"from {}".format(subfolder_full))
else:
continue
else:
if subfolders is None:
subfolders = [d for d in path.iterdir() if d.is_dir()]
for subfolder_name in subfolders:
if not os.path.exists(str(subfolder_name)):
subfolder_full = path / subfolder_name
else:
subfolder_full = subfolder_name
subfolder_name = os.path.basename(os.path.normpath(subfolder_name))
if not os.path.isfile(str(subfolder_full)):
subfolder_full_meta = (subfolder_full /
DEFAULT_FILE_NAMES['filepara'])
else:
subfolder_full_meta = subfolder_full
if subfolder_full_meta.exists():
ext = load(subfolder_full, include_core=include_core)
setattr(io, clean(subfolder_name), ext)
io.meta._add_fileio("Added satellite account "
"from {}".format(subfolder_full))
else:
continue
return io | 0.000436 |
def save_function_effect(module):
""" Recursively save function effect for pythonic functions. """
for intr in module.values():
if isinstance(intr, dict): # Submodule case
save_function_effect(intr)
else:
fe = FunctionEffects(intr)
IntrinsicArgumentEffects[intr] = fe
if isinstance(intr, intrinsic.Class):
save_function_effect(intr.fields) | 0.002336 |
def getStateIndex(self,state):
"""
Returns the index of a state by calculating the state code and searching for this code a sorted list.
Can be called on multiple states at once.
"""
statecodes = self.getStateCode(state)
return np.searchsorted(self.codes,statecodes).astype(int) | 0.015337 |
def hz2cents(freq_hz, base_frequency=10.0):
"""Convert an array of frequency values in Hz to cents.
0 values are left in place.
Parameters
----------
freq_hz : np.ndarray
Array of frequencies in Hz.
base_frequency : float
Base frequency for conversion.
(Default value = 10.0)
Returns
-------
cent : np.ndarray
Array of frequencies in cents, relative to base_frequency
"""
freq_cent = np.zeros(freq_hz.shape[0])
freq_nonz_ind = np.flatnonzero(freq_hz)
normalized_frequency = np.abs(freq_hz[freq_nonz_ind])/base_frequency
freq_cent[freq_nonz_ind] = 1200*np.log2(normalized_frequency)
return freq_cent | 0.001441 |
def language_contents(instance):
"""Ensure keys in Language Content's 'contents' dictionary are valid
language codes, and that the keys in the sub-dictionaries match the rules
for object property names.
"""
if instance['type'] != 'language-content' or 'contents' not in instance:
return
for key, value in instance['contents'].items():
if key not in enums.LANG_CODES:
yield JSONError("Invalid key '%s' in 'contents' property must be"
" an RFC 5646 code" % key, instance['id'])
for subkey, subvalue in value.items():
if not PROPERTY_FORMAT_RE.match(subkey):
yield JSONError("'%s' in '%s' of the 'contents' property is "
"invalid and must match a valid property name"
% (subkey, key), instance['id'], 'observable-dictionary-keys') | 0.002203 |
def _read_triplets_dataframe(filename):
""" Reads the original dataset TSV as a pandas dataframe """
# delay importing this to avoid another dependency
import pandas
# read in triples of user/artist/playcount from the input dataset
# get a model based off the input params
start = time.time()
log.debug("reading data from %s", filename)
data = pandas.read_table("train_triplets.txt", names=['user', 'track', 'plays'])
# map each artist and user to a unique numeric value
data['user'] = data['user'].astype("category")
data['track'] = data['track'].astype("category")
# store as a CSR matrix
log.debug("read data file in %s", time.time() - start)
return data | 0.002789 |
def get_timeout(self, callback=None):
"""
Get the visibility timeout for the queue.
:rtype: int
:return: The number of seconds as an integer.
"""
def got_timeout(a):
if callable(callback):
callback(int(a['VisibilityTimeout']))
self.get_attributes('VisibilityTimeout', callback=got_timeout) | 0.007853 |
def build_response(headers: Headers, key: str) -> None:
"""
Build a handshake response to send to the client.
``key`` comes from :func:`check_request`.
"""
headers["Upgrade"] = "websocket"
headers["Connection"] = "Upgrade"
headers["Sec-WebSocket-Accept"] = accept(key) | 0.003356 |
def update_file(filename, result, content, indent):
"""Updates a Jekyll file to contain the counts form an object
This just converts the results to YAML and adds to the Jekyll frontmatter.
Args:
filename: the Jekyll file to update
result: the results object from `wc`
content: the contents of the original file
indent: the indentation level for dumping YAML
"""
# Split the file into frontmatter and content
parts = re.split('---+', content, 2)
# Load the frontmatter into an object
frontmatter = yaml.safe_load(parts[1])
# Add the counts entry in the results object to the frontmatter
frontmatter['counts'] = result['counts']
# Set the frontmatter part backed to the stringified version of the
# frontmatter object
parts[1] = '\n{}'.format(
yaml.safe_dump(frontmatter, default_flow_style=False, indent=indent))
result = '---'.join(parts)
# Write everything back to the file
with open(filename, 'wb') as f:
f.write(result.encode('utf-8'))
print('{} updated.'.format(filename)) | 0.000911 |
def sys_chdir(self, path):
"""
chdir - Change current working directory
:param int path: Pointer to path
"""
path_str = self.current.read_string(path)
logger.debug(f"chdir({path_str})")
try:
os.chdir(path_str)
return 0
except OSError as e:
return e.errno | 0.00565 |
def getChild(self, path, request):
"""
This is necessary because the parent class would call
proxy.ReverseProxyResource instead of CacheProxyResource
"""
return CacheProxyResource(
self.host, self.port, self.path + '/' + urlquote(path, safe=""),
self.reactor
) | 0.006024 |
def get_in(keys, coll, default=None, no_default=False):
"""
NB: This is a straight copy of the get_in implementation found in
the toolz library (https://github.com/pytoolz/toolz/). It works
with persistent data structures as well as the corresponding
datastructures from the stdlib.
Returns coll[i0][i1]...[iX] where [i0, i1, ..., iX]==keys.
If coll[i0][i1]...[iX] cannot be found, returns ``default``, unless
``no_default`` is specified, then it raises KeyError or IndexError.
``get_in`` is a generalization of ``operator.getitem`` for nested data
structures such as dictionaries and lists.
>>> from pyrsistent import freeze
>>> transaction = freeze({'name': 'Alice',
... 'purchase': {'items': ['Apple', 'Orange'],
... 'costs': [0.50, 1.25]},
... 'credit card': '5555-1234-1234-1234'})
>>> get_in(['purchase', 'items', 0], transaction)
'Apple'
>>> get_in(['name'], transaction)
'Alice'
>>> get_in(['purchase', 'total'], transaction)
>>> get_in(['purchase', 'items', 'apple'], transaction)
>>> get_in(['purchase', 'items', 10], transaction)
>>> get_in(['purchase', 'total'], transaction, 0)
0
>>> get_in(['y'], {}, no_default=True)
Traceback (most recent call last):
...
KeyError: 'y'
"""
try:
return reduce(operator.getitem, keys, coll)
except (KeyError, IndexError, TypeError):
if no_default:
raise
return default | 0.000636 |
def v1_tag_suggest(request, tags, prefix, parent=''):
'''Provide fast suggestions for tag components.
This yields suggestions for *components* of a tag and a given
prefix. For example, given the tags ``foo/bar/baz`` and
``fob/bob``, here are some example completions (ordering may be
different):
.. code-block:: text
/dossier/v1/tags/suggest/prefix/f => ['foo', 'fob']
/dossier/v1/tags/suggest/prefix/foo => ['foo']
/dossier/v1/tags/suggest/prefix/b/parent/foo => ['bar']
/dossier/v1/tags/suggest/prefix/b/parent/fob => ['bob']
/dossier/v1/tags/suggest/prefix/b/parent/foo/bar => ['baz']
N.B. Each of the lists above are wrapped in the following
JSON envelope for the response:
.. code-block:: text
{'suggestions': ['foo', 'fob']}
An optional query parameter, ``limit``, may be passed to control
the number of suggestions returned.
'''
prefix = prefix.decode('utf-8').strip()
parent = parent.decode('utf-8').strip()
limit = min(10000, int(request.params.get('limit', 100)))
return {'suggestions': tags.suggest(parent, prefix, limit=limit)} | 0.000864 |
def disk_xml(identifier, pool_xml, base_volume_xml, cow):
"""Clones volume_xml updating the required fields.
* name
* target path
* backingStore
"""
pool = etree.fromstring(pool_xml)
base_volume = etree.fromstring(base_volume_xml)
pool_path = pool.find('.//path').text
base_path = base_volume.find('.//target/path').text
target_path = os.path.join(pool_path, '%s.qcow2' % identifier)
volume_xml = VOLUME_DEFAULT_CONFIG.format(identifier, target_path)
volume = etree.fromstring(volume_xml)
base_volume_capacity = base_volume.find(".//capacity")
volume.append(base_volume_capacity)
if cow:
backing_xml = BACKING_STORE_DEFAULT_CONFIG.format(base_path)
backing_store = etree.fromstring(backing_xml)
volume.append(backing_store)
return etree.tostring(volume).decode('utf-8') | 0.001156 |
def collection(self, code):
"""
Retrieve the collection ids according to the given 3 letters acronym
"""
result = None
result = self.dispatcher(
'get_collection',
code=code
)
if not result:
logger.info('Collection not found for: %s', code)
return None
return result | 0.005277 |
def get_readonly_fields(self, request, obj=None):
"""
This is required a subclass of VersionedAdmin has readonly_fields
ours won't be undone
"""
if obj:
return list(self.readonly_fields) + ['id', 'identity',
'is_current']
return self.readonly_fields | 0.005587 |
def _robust_rmtree(path, logger=None, max_retries=5):
"""Try to delete paths robustly .
Retries several times (with increasing delays) if an OSError
occurs. If the final attempt fails, the Exception is propagated
to the caller. Taken from https://github.com/hashdist/hashdist/pull/116
"""
for i in range(max_retries):
try:
shutil.rmtree(path)
return
except OSError as e:
if logger:
info('Unable to remove path: %s' % path)
info('Retrying after %d seconds' % i)
time.sleep(i)
# Final attempt, pass any Exceptions up to caller.
shutil.rmtree(path) | 0.001479 |
def add_menu(self, menu):
'''add a new menu'''
self.menu.add(menu)
self.mpstate.console.set_menu(self.menu, self.menu_callback) | 0.013245 |
def convolve_stack(data, kernel, rot_kernel=False, method='scipy'):
r"""Convolve stack of data with stack of kernels
This method convolves the input data with a given kernel using FFT and
is the default convolution used for all routines
Parameters
----------
data : np.ndarray
Input data array, normally a 2D image
kernel : np.ndarray
Input kernel array, normally a 2D kernel
rot_kernel : bool
Option to rotate kernels by 180 degrees
method : str {'astropy', 'scipy'}, optional
Convolution method (default is 'scipy')
Returns
-------
np.ndarray convolved data
Examples
--------
>>> from math.convolve import convolve
>>> import numpy as np
>>> a = np.arange(18).reshape(2, 3, 3)
>>> b = a + 10
>>> convolve_stack(a, b)
array([[[ 534., 525., 534.],
[ 453., 444., 453.],
[ 534., 525., 534.]],
<BLANKLINE>
[[ 2721., 2712., 2721.],
[ 2640., 2631., 2640.],
[ 2721., 2712., 2721.]]])
>>> convolve_stack(a, b, rot_kernel=True)
array([[[ 474., 483., 474.],
[ 555., 564., 555.],
[ 474., 483., 474.]],
<BLANKLINE>
[[ 2661., 2670., 2661.],
[ 2742., 2751., 2742.],
[ 2661., 2670., 2661.]]])
See Also
--------
convolve : The convolution function called by convolve_stack
"""
if rot_kernel:
kernel = rotate_stack(kernel)
return np.array([convolve(data_i, kernel_i, method=method) for data_i,
kernel_i in zip(data, kernel)]) | 0.000611 |
def _K(m):
""" matrix K_m from Wiktorsson2001 """
M = m*(m - 1)//2
K = np.zeros((M, m**2), dtype=np.int64)
row = 0
for j in range(1, m):
col = (j - 1)*m + j
s = m - j
K[row:(row+s), col:(col+s)] = np.eye(s)
row += s
return K | 0.003571 |
def choice_voters_changed_update_cache(
sender, instance, action, reverse, model, pk_set, **kwargs):
"""Update cache when choice.voters changes."""
if action not in ('post_add', 'post_remove', 'post_clear'):
# post_clear is not handled, because clear is called in
# django.db.models.fields.related.ReverseManyRelatedObjects.__set__
# before setting the new order
return
if model == User:
assert type(instance) == Choice
choices = [instance]
if pk_set:
users = list(User.objects.filter(pk__in=pk_set))
else:
users = []
else:
if pk_set:
choices = list(Choice.objects.filter(pk__in=pk_set))
else:
choices = []
users = [instance]
from .tasks import update_cache_for_instance
for choice in choices:
update_cache_for_instance('Choice', choice.pk, choice)
for user in users:
update_cache_for_instance('User', user.pk, user) | 0.000994 |
def match(self, path_pattern):
"""
Return True if this path matches the given pattern.
"""
cf = self._flavour.casefold
path_pattern = cf(path_pattern)
drv, root, pat_parts = self._flavour.parse_parts((path_pattern,))
if not pat_parts:
raise ValueError("empty pattern")
if drv and drv != cf(self._drv):
return False
if root and root != cf(self._root):
return False
parts = self._cparts
if drv or root:
if len(pat_parts) != len(parts):
return False
pat_parts = pat_parts[1:]
elif len(pat_parts) > len(parts):
return False
for part, pat in zip(reversed(parts), reversed(pat_parts)):
if not fnmatch.fnmatchcase(part, pat):
return False
return True | 0.002296 |
def index_by_id(cls, target_id, resources):
"""Helper method to fetch the index of a resource by its id or address
Args:
resources (list of objects): The resources to be paginated
target_id (string): The address or header_signature of the resource
Returns:
integer: The index of the target resource
Raises:
AssertionError: Raised if the target is not found
"""
for index in range(len(resources)):
if cls.id_by_index(index, resources) == target_id:
return index
raise AssertionError | 0.003241 |
def remove_node(self, node, stop=False):
"""Removes a node from the cluster.
By default, it doesn't also stop the node, just remove from
the known hosts of this cluster.
:param node: node to remove
:type node: :py:class:`Node`
:param stop: Stop the node
:type stop: bool
"""
if node.kind not in self.nodes:
raise NodeNotFound("Unable to remove node %s: invalid node type `%s`.",
node.name, node.kind)
else:
try:
index = self.nodes[node.kind].index(node)
if self.nodes[node.kind][index]:
del self.nodes[node.kind][index]
if stop:
node.stop()
self._naming_policy.free(node.kind, node.name)
self.repository.save_or_update(self)
remaining_nodes = self.get_all_nodes()
self._gather_node_ip_addresses(
remaining_nodes, self.start_timeout, self.ssh_probe_timeout,
remake=True)
except ValueError:
raise NodeNotFound("Node %s not found in cluster" % node.name) | 0.003306 |
def metadata(func):
"""Add metadata to an item.
Decorator that adds metadata to a given item such as
the gelk revision used.
"""
@functools.wraps(func)
def decorator(self, *args, **kwargs):
eitem = func(self, *args, **kwargs)
metadata = {
'metadata__gelk_version': self.gelk_version,
'metadata__gelk_backend_name': self.__class__.__name__,
'metadata__enriched_on': datetime_utcnow().isoformat()
}
eitem.update(metadata)
return eitem
return decorator | 0.001802 |
def toString(self, format_='fasta-ss', structureSuffix=':structure'):
"""
Convert the read to a string in PDB format (sequence & structure). This
consists of two FASTA records, one for the sequence then one for the
structure.
@param format_: Either 'fasta-ss' or 'fasta'. In the former case, the
structure information is returned. Otherwise, plain FASTA is
returned.
@param structureSuffix: The C{str} suffix to append to the read id
for the second FASTA record, containing the structure information.
@raise ValueError: If C{format_} is not 'fasta'.
@return: A C{str} representing the read sequence and structure in PDB
FASTA format.
"""
if format_ == 'fasta-ss':
return '>%s\n%s\n>%s%s\n%s\n' % (
self.id, self.sequence, self.id, structureSuffix,
self.structure)
else:
if six.PY3:
return super().toString(format_=format_)
else:
return AARead.toString(self, format_=format_) | 0.0018 |
def get_filepath_findex_from_user(self, wildcard, message, style,
filterindex=0):
"""Opens a file dialog and returns filepath and filterindex
Parameters
----------
wildcard: String
\tWildcard string for file dialog
message: String
\tMessage in the file dialog
style: Integer
\tDialog style, e. g. wx.OPEN | wx.CHANGE_DIR
filterindex: Integer, defaults to 0
\tDefault filterindex that is selected when the dialog is displayed
"""
dlg = wx.FileDialog(self.main_window, wildcard=wildcard,
message=message, style=style,
defaultDir=os.getcwd(), defaultFile="")
# Set the initial filterindex
dlg.SetFilterIndex(filterindex)
filepath = None
filter_index = None
if dlg.ShowModal() == wx.ID_OK:
filepath = dlg.GetPath()
filter_index = dlg.GetFilterIndex()
dlg.Destroy()
return filepath, filter_index | 0.002788 |
def has_job(self, job):
"""
Checks whether there exists another job which has the same job key.
:param job: The job to check.
:return: True if there exists another job with the same key, False otherwise.
"""
job_key = self._job_key(job)
return job_key in self._job_map | 0.009146 |
def init_module(path):
"""Attempt to import a Python module located at path. If
successful, and if the newly imported module has an init()
function, then set the global PATH in order to simplify the
add_bundle() interface and call init() on the module, passing the
current global namespace, conveniently converted into a DictObj so
that it can be accessed with normal module style dot notation
instead of as a dict.
http://stackoverflow.com/questions/990422/how-to-get-a-reference-to-current-modules-attributes-in-python"""
mod = import_by_path(path)
if mod is not None and hasattr(mod, 'init'):
logger.debug('calling init on {0}'.format(mod))
global PATH
PATH = path
mod.init(DictObj(globals())) | 0.002591 |
def _get_hashed_params(self, page_text):
"""Get hashed params which will be used when you login, see issue#10"""
soup = BeautifulSoup(page_text, 'html.parser')
return [tag['name'] for tag in soup.find_all('input', class_='sl')] | 0.007968 |
def parse_header_line(self, line):
"""docstring for parse_header_line"""
self.header = line[1:].rstrip().split('\t')
if len(self.header) < 9:
self.header = line[1:].rstrip().split()
self.individuals = self.header[9:] | 0.007692 |
def __merge_clusters(self, cluster1, cluster2):
"""!
@brief Merges two clusters and returns new merged cluster. Representation points and mean points are calculated for the new cluster.
@param[in] cluster1 (cure_cluster): Cluster that should be merged.
@param[in] cluster2 (cure_cluster): Cluster that should be merged.
@return (cure_cluster) New merged CURE cluster.
"""
merged_cluster = cure_cluster(None, None)
merged_cluster.points = cluster1.points + cluster2.points
merged_cluster.indexes = cluster1.indexes + cluster2.indexes
# merged_cluster.mean = ( len(cluster1.points) * cluster1.mean + len(cluster2.points) * cluster2.mean ) / ( len(cluster1.points) + len(cluster2.points) );
dimension = len(cluster1.mean)
merged_cluster.mean = [0] * dimension
if merged_cluster.points[1:] == merged_cluster.points[:-1]:
merged_cluster.mean = merged_cluster.points[0]
else:
for index in range(dimension):
merged_cluster.mean[index] = ( len(cluster1.points) * cluster1.mean[index] + len(cluster2.points) * cluster2.mean[index] ) / ( len(cluster1.points) + len(cluster2.points) );
temporary = list()
for index in range(self.__number_represent_points):
maximal_distance = 0
maximal_point = None
for point in merged_cluster.points:
minimal_distance = 0
if index == 0:
minimal_distance = euclidean_distance_square(point, merged_cluster.mean)
#minimal_distance = euclidean_distance_sqrt(point, merged_cluster.mean);
else:
minimal_distance = min([euclidean_distance_square(point, p) for p in temporary])
#minimal_distance = cluster_distance(cure_cluster(point), cure_cluster(temporary[0]));
if minimal_distance >= maximal_distance:
maximal_distance = minimal_distance
maximal_point = point
if maximal_point not in temporary:
temporary.append(maximal_point)
for point in temporary:
representative_point = [0] * dimension
for index in range(dimension):
representative_point[index] = point[index] + self.__compression * (merged_cluster.mean[index] - point[index])
merged_cluster.rep.append(representative_point)
return merged_cluster | 0.011418 |
def start_check (aggregate, out):
"""Start checking in background and write encoded output to out."""
# check in background
t = threading.Thread(target=director.check_urls, args=(aggregate,))
t.start()
# time to wait for new data
sleep_seconds = 2
# current running time
run_seconds = 0
while not aggregate.is_finished():
yield out.get_data()
time.sleep(sleep_seconds)
run_seconds += sleep_seconds
if run_seconds > MAX_REQUEST_SECONDS:
director.abort(aggregate)
break
yield out.get_data() | 0.003425 |
def _get_credentials(vcap_services, service_name=None):
"""Retrieves the credentials of the VCAP Service of the specified `service_name`. If
`service_name` is not specified, it takes the information from STREAMING_ANALYTICS_SERVICE_NAME environment
variable.
Args:
vcap_services (dict): A dict representation of the VCAP Services information.
service_name (str): One of the service name stored in `vcap_services`
Returns:
dict: A dict representation of the credentials.
Raises:
ValueError: Cannot find `service_name` in `vcap_services`
"""
service_name = service_name or os.environ.get('STREAMING_ANALYTICS_SERVICE_NAME', None)
# Get the service corresponding to the SERVICE_NAME
services = vcap_services['streaming-analytics']
creds = None
for service in services:
if service['name'] == service_name:
creds = service['credentials']
break
# If no corresponding service is found, error
if creds is None:
raise ValueError("Streaming Analytics service " + str(service_name) + " was not found in VCAP_SERVICES")
return creds | 0.005181 |
def from_path(cls, conn, path):
"""Create container from path."""
path = path.strip(SEP)
full_path = os.path.join(conn.abs_root, path)
return cls(conn, path, 0, os.path.getsize(full_path)) | 0.009091 |
def _GetArgs(self):
""" Parse plusargs. """
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--src', help='override database source directory')
parser.add_argument('-d', '--dst', help='override database destination directory')
parser.add_argument('-e', '--extract', help='enable extracting of rar files', action="store_true")
parser.add_argument('-c', '--copy', help='enable copying between file systems', action="store_true")
parser.add_argument('-i', '--inplace', help='rename files in place', action="store_true")
parser.add_argument('-u', '--update_db', help='provides option to update existing database fields', action="store_true")
parser.add_argument('-p', '--print_db', help='print contents of database', action="store_true")
parser.add_argument('-n', '--no_input', help='automatically accept or skip all user input', action="store_true")
parser.add_argument('-nr', '--no_input_rename', help='automatically accept or skip user input for guide lookup and rename', action="store_true")
parser.add_argument('-ne', '--no_input_extract', help='automatically accept or skip user input for extraction', action="store_true")
parser.add_argument('--debug', help='enable full logging', action="store_true")
parser.add_argument('--tags', help='enable tags on log info', action="store_true")
parser.add_argument('--test', help='run with test database', action="store_true")
parser.add_argument('--reset', help='resets database', action="store_true")
args = parser.parse_args()
if args.test:
self._databasePath = 'test.db'
if args.no_input or args.no_input_rename:
self._skipUserInputRename = True
if args.no_input or args.no_input_extract:
self._skipUserInputExtract = True
if args.reset:
goodlogging.Log.Info("CLEAR", "*WARNING* YOU ARE ABOUT TO DELETE DATABASE {0}".format(self._databasePath))
response = goodlogging.Log.Input("CLEAR", "Are you sure you want to proceed [y/n]? ")
if response.lower() == 'y':
if(os.path.isfile(self._databasePath)):
os.remove(self._databasePath)
else:
sys.exit(0)
if args.inplace:
self._inPlaceRename = True
if args.copy:
self._crossSystemCopyEnabled = True
if args.tags:
goodlogging.Log.tagsEnabled = 1
if args.debug:
goodlogging.Log.verbosityThreshold = goodlogging.Verbosity.MINIMAL
if args.update_db:
self._dbUpdate = True
if args.print_db:
self._dbPrint = True
if args.extract:
self._enableExtract = True
if args.src:
if os.path.isdir(args.src):
self._sourceDir = args.src
else:
goodlogging.Log.Fatal("CLEAR", 'Source directory argument is not recognised as a directory: {}'.format(args.src))
if args.dst:
if os.path.isdir(args.dst):
self._tvDir = args.dst
else:
goodlogging.Log.Fatal("CLEAR", 'Target directory argument is not recognised as a directory: {}'.format(args.dst)) | 0.012211 |
def from_mult_iters(cls, name=None, idx=None, **kwargs):
"""Load values from multiple iters
Parameters
----------
name : string, default None
Name of the data set. If None (default), the name will be set to
``'table'``.
idx: string, default None
Iterable to use for the data index
**kwargs : dict of iterables
The ``values`` field will contain dictionaries with keys for
each of the iterables provided. For example,
d = Data.from_iters(idx='x', x=[0, 1, 5], y=(10, 20, 30))
would result in ``d`` having a ``values`` field with
[{'idx': 0, 'col': 'y', 'val': 10},
{'idx': 1, 'col': 'y', 'val': 20}
If the iterables are not the same length, then ValueError is
raised.
"""
if not name:
name = 'table'
lengths = [len(v) for v in kwargs.values()]
if len(set(lengths)) != 1:
raise ValueError('Iterables must all be same length')
if not idx:
raise ValueError('Must provide iter name index reference')
index = kwargs.pop(idx)
vega_vals = []
for k, v in sorted(kwargs.items()):
for idx, val in zip(index, v):
value = {}
value['idx'] = idx
value['col'] = k
value['val'] = val
vega_vals.append(value)
return cls(name, values=vega_vals) | 0.00131 |
def make_key(table_name, objid):
"""Create an object key for storage."""
key = datastore.Key()
path = key.path_element.add()
path.kind = table_name
path.name = str(objid)
return key | 0.004878 |
def convert_timestamp(timestamp):
"""
Converts bokehJS timestamp to datetime64.
"""
datetime = dt.datetime.utcfromtimestamp(timestamp/1000.)
return np.datetime64(datetime.replace(tzinfo=None)) | 0.004717 |
def resolve_remove_symbols(self, r, target_group_ids, rgroups):
"""Resolve the resources security groups that need be modified.
Specifically handles symbolic names that match annotations from policy filters
for groups being removed.
"""
if 'matched' in target_group_ids:
return r.get('c7n:matched-security-groups', ())
elif 'network-location' in target_group_ids:
for reason in r.get('c7n:NetworkLocation', ()):
if reason['reason'] == 'SecurityGroupMismatch':
return list(reason['security-groups'])
elif 'all' in target_group_ids:
return rgroups
return target_group_ids | 0.004249 |
def remote(fn, name=None, types=None):
"""Decorator that adds a remote attribute to a function.
fn -- function being decorated
name -- aliased name of the function, used for remote proxies
types -- a argument type specifier, can be used to ensure
arguments are of the correct type
"""
if not name:
name = fn.__name__
fn.remote = {"name": name, "types": types}
return fn | 0.002358 |
def generate_tuple_zip(self, token_list, n=2):
'''
Generate the N-gram.
Args:
token_list: The list of tokens.
n N
Returns:
zip of Tuple(N-gram)
'''
return zip(*[token_list[i:] for i in range(n)]) | 0.006452 |
async def geoadd(self, name, *values):
"""
Add the specified geospatial items to the specified key identified
by the ``name`` argument. The Geospatial items are given as ordered
members of the ``values`` argument, each item or place is formed by
the triad latitude, longitude and name.
"""
if len(values) % 3 != 0:
raise RedisError("GEOADD requires places with lon, lat and name"
" values")
return await self.execute_command('GEOADD', name, *values) | 0.00361 |
def stop(self):
"""Stop all of the connections."""
_logger.debug("Emitting quit signal for connections.")
self.__quit_ev.set()
_logger.info("Waiting for connection manager to stop.")
self.__manage_g.join() | 0.008097 |
def get_web_auth_url(self):
"""
The user must open this page, and you first, then
call get_web_auth_session_key(url) after that.
"""
token = self._get_web_auth_token()
url = "{homepage}/api/auth/?api_key={api}&token={token}".format(
homepage=self.network.homepage, api=self.network.api_key, token=token
)
self.web_auth_tokens[url] = token
return url | 0.006865 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.