code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def tree_multiresolution(G, Nlevel, reduction_method='resistance_distance',
compute_full_eigen=False, root=None):
r"""Compute a multiresolution of trees
Parameters
----------
G : Graph
Graph structure of a tree.
Nlevel : Number of times to downsample and coarsen the tree
root : int
The index of the root of the tree. (default = 1)
reduction_method : str
The graph reduction method (default = 'resistance_distance')
compute_full_eigen : bool
To also compute the graph Laplacian eigenvalues for every tree in the sequence
Returns
-------
Gs : ndarray
Ndarray, with each element containing a graph structure represent a reduced tree.
subsampled_vertex_indices : ndarray
Indices of the vertices of the previous tree that are kept for the subsequent tree.
"""
if not root:
if hasattr(G, 'root'):
root = G.root
else:
root = 1
Gs = [G]
if compute_full_eigen:
Gs[0].compute_fourier_basis()
subsampled_vertex_indices = []
depths, parents = _tree_depths(G.A, root)
old_W = G.W
for lev in range(Nlevel):
# Identify the vertices in the even depths of the current tree
down_odd = round(depths) % 2
down_even = np.ones((Gs[lev].N)) - down_odd
keep_inds = np.where(down_even == 1)[0]
subsampled_vertex_indices.append(keep_inds)
# There will be one undirected edge in the new graph connecting each
# non-root subsampled vertex to its new parent. Here, we find the new
# indices of the new parents
non_root_keep_inds, new_non_root_inds = np.setdiff1d(keep_inds, root)
old_parents_of_non_root_keep_inds = parents[non_root_keep_inds]
old_grandparents_of_non_root_keep_inds = parents[old_parents_of_non_root_keep_inds]
# TODO new_non_root_parents = dsearchn(keep_inds, old_grandparents_of_non_root_keep_inds)
old_W_i_inds, old_W_j_inds, old_W_weights = sparse.find(old_W)
i_inds = np.concatenate((new_non_root_inds, new_non_root_parents))
j_inds = np.concatenate((new_non_root_parents, new_non_root_inds))
new_N = np.sum(down_even)
if reduction_method == "unweighted":
new_weights = np.ones(np.shape(i_inds))
elif reduction_method == "sum":
# TODO old_weights_to_parents_inds = dsearchn([old_W_i_inds,old_W_j_inds], [non_root_keep_inds, old_parents_of_non_root_keep_inds]);
old_weights_to_parents = old_W_weights[old_weights_to_parents_inds]
# old_W(non_root_keep_inds,old_parents_of_non_root_keep_inds);
# TODO old_weights_parents_to_grandparents_inds = dsearchn([old_W_i_inds, old_W_j_inds], [old_parents_of_non_root_keep_inds, old_grandparents_of_non_root_keep_inds])
old_weights_parents_to_grandparents = old_W_weights[old_weights_parents_to_grandparents_inds]
# old_W(old_parents_of_non_root_keep_inds,old_grandparents_of_non_root_keep_inds);
new_weights = old_weights_to_parents + old_weights_parents_to_grandparents
new_weights = np.concatenate((new_weights. new_weights))
elif reduction_method == "resistance_distance":
# TODO old_weights_to_parents_inds = dsearchn([old_W_i_inds, old_W_j_inds], [non_root_keep_inds, old_parents_of_non_root_keep_inds])
old_weights_to_parents = old_W_weight[sold_weights_to_parents_inds]
# old_W(non_root_keep_inds,old_parents_of_non_root_keep_inds);
# TODO old_weights_parents_to_grandparents_inds = dsearchn([old_W_i_inds, old_W_j_inds], [old_parents_of_non_root_keep_inds, old_grandparents_of_non_root_keep_inds])
old_weights_parents_to_grandparents = old_W_weights[old_weights_parents_to_grandparents_inds]
# old_W(old_parents_of_non_root_keep_inds,old_grandparents_of_non_root_keep_inds);
new_weights = 1./(1./old_weights_to_parents + 1./old_weights_parents_to_grandparents)
new_weights = np.concatenate(([new_weights, new_weights]))
else:
raise ValueError('Unknown graph reduction method.')
new_W = sparse.csc_matrix((new_weights, (i_inds, j_inds)),
shape=(new_N, new_N))
# Update parents
new_root = np.where(keep_inds == root)[0]
parents = np.zeros(np.shape(keep_inds)[0], np.shape(keep_inds)[0])
parents[:new_root - 1, new_root:] = new_non_root_parents
# Update depths
depths = depths[keep_inds]
depths = depths/2.
# Store new tree
Gtemp = graphs.Graph(new_W, coords=Gs[lev].coords[keep_inds], limits=G.limits, root=new_root)
#Gs[lev].copy_graph_attributes(Gtemp, False)
if compute_full_eigen:
Gs[lev + 1].compute_fourier_basis()
# Replace current adjacency matrix and root
Gs.append(Gtemp)
old_W = new_W
root = new_root
return Gs, subsampled_vertex_indices | r"""Compute a multiresolution of trees
Parameters
----------
G : Graph
Graph structure of a tree.
Nlevel : Number of times to downsample and coarsen the tree
root : int
The index of the root of the tree. (default = 1)
reduction_method : str
The graph reduction method (default = 'resistance_distance')
compute_full_eigen : bool
To also compute the graph Laplacian eigenvalues for every tree in the sequence
Returns
-------
Gs : ndarray
Ndarray, with each element containing a graph structure represent a reduced tree.
subsampled_vertex_indices : ndarray
Indices of the vertices of the previous tree that are kept for the subsequent tree. |
def _convert_or_shorten_month(cls, data):
"""
Convert a given month into our unified format.
:param data: The month to convert or shorten.
:type data: str
:return: The unified month name.
:rtype: str
"""
# We map the different month and their possible representation.
short_month = {
"jan": [str(1), "01", "Jan", "January"],
"feb": [str(2), "02", "Feb", "February"],
"mar": [str(3), "03", "Mar", "March"],
"apr": [str(4), "04", "Apr", "April"],
"may": [str(5), "05", "May"],
"jun": [str(6), "06", "Jun", "June"],
"jul": [str(7), "07", "Jul", "July"],
"aug": [str(8), "08", "Aug", "August"],
"sep": [str(9), "09", "Sep", "September"],
"oct": [str(10), "Oct", "October"],
"nov": [str(11), "Nov", "November"],
"dec": [str(12), "Dec", "December"],
}
for month in short_month:
# We loop through our map.
if data in short_month[month]:
# If the parsed data (or month if you prefer) is into our map.
# We return the element (or key if you prefer) assigned to
# the month.
return month
# The element is not into our map.
# We return the parsed element (or month if you prefer).
return data | Convert a given month into our unified format.
:param data: The month to convert or shorten.
:type data: str
:return: The unified month name.
:rtype: str |
def rect(self):
"""rect(self) -> PyObject *"""
CheckParent(self)
val = _fitz.Link_rect(self)
val = Rect(val)
return val | rect(self) -> PyObject * |
def gausspars(fwhm, nsigma=1.5, ratio=1, theta=0.):
"""
height - the amplitude of the gaussian
x0, y0, - center of the gaussian
fwhm - full width at half maximum of the observation
nsigma - cut the gaussian at nsigma
ratio = ratio of xsigma/ysigma
theta - angle of position angle of the major axis measured
counter-clockwise from the x axis
Returns dimensions nx and ny of the elliptical kernel as well as the
ellipse parameters a, b, c, and f when defining an ellipse through the
quadratic form: a*(x-x0)^2+b(x-x0)*(y-y0)+c*(y-y0)^2 <= 2*f
"""
xsigma = fwhm / FWHM2SIG
ysigma = ratio * xsigma
f = nsigma**2/2.
theta = np.deg2rad(theta)
cost = np.cos(theta)
sint = np.sin(theta)
if ratio == 0: # 1D Gaussian
if theta == 0 or theta == 180:
a = 1/xsigma**2
b = 0.0
c = 0.0
elif theta == 90:
a = 0.0
b = 0.0
c = 1/xsigma**2
else:
print('Unable to construct 1D Gaussian with these parameters\n')
raise ValueError
nx = 2 * int(max(2, (xsigma*nsigma*np.abs(cost))))+1
ny = 2 * int(max(2, (xsigma*nsigma*np.abs(sint))))+1
else: #2D gaussian
xsigma2 = xsigma * xsigma
ysigma2 = ysigma * ysigma
a = cost**2/xsigma2 + sint**2/ysigma2
b = 2 * cost * sint *(1.0/xsigma2-1.0/ysigma2)
c = sint**2/xsigma2 + cost**2/ysigma2
d = b**2 - 4*a*c # discriminant
# nx = int(2*max(2, math.sqrt(-8*c*f/d)))+1
# ny = int(2*max(2, math.sqrt(-8*a*f/d)))+1
nx = 2 * int(2*max(1, nsigma*math.sqrt(-c/d)))+1
ny = 2 * int(2*max(1, nsigma*math.sqrt(-a/d)))+1
return nx, ny, a, b, c, f | height - the amplitude of the gaussian
x0, y0, - center of the gaussian
fwhm - full width at half maximum of the observation
nsigma - cut the gaussian at nsigma
ratio = ratio of xsigma/ysigma
theta - angle of position angle of the major axis measured
counter-clockwise from the x axis
Returns dimensions nx and ny of the elliptical kernel as well as the
ellipse parameters a, b, c, and f when defining an ellipse through the
quadratic form: a*(x-x0)^2+b(x-x0)*(y-y0)+c*(y-y0)^2 <= 2*f |
def add_to_emails(self, *emails):
"""
:calls: `POST /user/emails <http://developer.github.com/v3/users/emails>`_
:param email: string
:rtype: None
"""
assert all(isinstance(element, (str, unicode)) for element in emails), emails
post_parameters = emails
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/emails",
input=post_parameters
) | :calls: `POST /user/emails <http://developer.github.com/v3/users/emails>`_
:param email: string
:rtype: None |
def list_datasets(self, get_global_public):
"""
Lists datasets in resources. Setting 'get_global_public' to 'True'
will retrieve all public datasets in cloud. 'False' will get user's
public datasets.
Arguments:
get_global_public (bool): True if user wants all public datasets in
cloud. False if user wants only their
public datasets.
Returns:
dict: Returns datasets in JSON format
"""
appending = ""
if get_global_public:
appending = "public"
url = self.url() + "/resource/{}dataset/".format(appending)
req = self.remote_utils.get_url(url)
if req.status_code is not 200:
raise RemoteDataNotFoundError('Could not find {}'.format(req.text))
else:
return req.json() | Lists datasets in resources. Setting 'get_global_public' to 'True'
will retrieve all public datasets in cloud. 'False' will get user's
public datasets.
Arguments:
get_global_public (bool): True if user wants all public datasets in
cloud. False if user wants only their
public datasets.
Returns:
dict: Returns datasets in JSON format |
def run(cl_args, compo_type):
""" run command """
cluster, role, env = cl_args['cluster'], cl_args['role'], cl_args['environ']
topology = cl_args['topology-name']
spouts_only, bolts_only = cl_args['spout'], cl_args['bolt']
try:
components = tracker_access.get_logical_plan(cluster, env, topology, role)
topo_info = tracker_access.get_topology_info(cluster, env, topology, role)
table, header = to_table(components, topo_info)
if spouts_only == bolts_only:
print(tabulate(table, headers=header))
elif spouts_only:
table, header = filter_spouts(table, header)
print(tabulate(table, headers=header))
else:
table, header = filter_bolts(table, header)
print(tabulate(table, headers=header))
return True
except:
Log.error("Fail to connect to tracker: \'%s\'", cl_args["tracker_url"])
return False | run command |
def find_items(self, q, shape=ID_ONLY, depth=SHALLOW, additional_fields=None, order_fields=None,
calendar_view=None, page_size=None, max_items=None, offset=0):
"""
Private method to call the FindItem service
:param q: a Q instance containing any restrictions
:param shape: controls whether to return (id, chanegkey) tuples or Item objects. If additional_fields is
non-null, we always return Item objects.
:param depth: controls the whether to return soft-deleted items or not.
:param additional_fields: the extra properties we want on the return objects. Default is no properties. Be
aware that complex fields can only be fetched with fetch() (i.e. the GetItem service).
:param order_fields: the SortOrder fields, if any
:param calendar_view: a CalendarView instance, if any
:param page_size: the requested number of items per page
:param max_items: the max number of items to return
:param offset: the offset relative to the first item in the item collection
:return: a generator for the returned item IDs or items
"""
if shape not in SHAPE_CHOICES:
raise ValueError("'shape' %s must be one of %s" % (shape, SHAPE_CHOICES))
if depth not in ITEM_TRAVERSAL_CHOICES:
raise ValueError("'depth' %s must be one of %s" % (depth, ITEM_TRAVERSAL_CHOICES))
if not self.folders:
log.debug('Folder list is empty')
return
if additional_fields:
for f in additional_fields:
self.validate_item_field(field=f)
for f in additional_fields:
if f.field.is_complex:
raise ValueError("find_items() does not support field '%s'. Use fetch() instead" % f.field.name)
if calendar_view is not None and not isinstance(calendar_view, CalendarView):
raise ValueError("'calendar_view' %s must be a CalendarView instance" % calendar_view)
# Build up any restrictions
if q.is_empty():
restriction = None
query_string = None
elif q.query_string:
restriction = None
query_string = Restriction(q, folders=self.folders, applies_to=Restriction.ITEMS)
else:
restriction = Restriction(q, folders=self.folders, applies_to=Restriction.ITEMS)
query_string = None
log.debug(
'Finding %s items in folders %s (shape: %s, depth: %s, additional_fields: %s, restriction: %s)',
self.folders,
self.account,
shape,
depth,
additional_fields,
restriction.q if restriction else None,
)
items = FindItem(account=self.account, folders=self.folders, chunk_size=page_size).call(
additional_fields=additional_fields,
restriction=restriction,
order_fields=order_fields,
shape=shape,
query_string=query_string,
depth=depth,
calendar_view=calendar_view,
max_items=calendar_view.max_items if calendar_view else max_items,
offset=offset,
)
if shape == ID_ONLY and additional_fields is None:
for i in items:
yield i if isinstance(i, Exception) else Item.id_from_xml(i)
else:
for i in items:
if isinstance(i, Exception):
yield i
else:
yield Folder.item_model_from_tag(i.tag).from_xml(elem=i, account=self.account) | Private method to call the FindItem service
:param q: a Q instance containing any restrictions
:param shape: controls whether to return (id, chanegkey) tuples or Item objects. If additional_fields is
non-null, we always return Item objects.
:param depth: controls the whether to return soft-deleted items or not.
:param additional_fields: the extra properties we want on the return objects. Default is no properties. Be
aware that complex fields can only be fetched with fetch() (i.e. the GetItem service).
:param order_fields: the SortOrder fields, if any
:param calendar_view: a CalendarView instance, if any
:param page_size: the requested number of items per page
:param max_items: the max number of items to return
:param offset: the offset relative to the first item in the item collection
:return: a generator for the returned item IDs or items |
def _to_operator(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Operator representation."""
if rep == 'Operator':
return data
if rep == 'Stinespring':
return _stinespring_to_operator(data, input_dim, output_dim)
# Convert via Kraus representation
if rep != 'Kraus':
data = _to_kraus(rep, data, input_dim, output_dim)
return _kraus_to_operator(data, input_dim, output_dim) | Transform a QuantumChannel to the Operator representation. |
def get_redirect_url(self, **kwargs):
"""
Return the authorization/authentication URL signed with the request
token.
"""
params = {
'oauth_token': self.get_request_token().key,
}
return '%s?%s' % (self.auth_url, urllib.urlencode(params)) | Return the authorization/authentication URL signed with the request
token. |
def get_ref_free_exc_info():
"Free traceback from references to locals() in each frame to avoid circular reference leading to gc.collect() unable to reclaim memory"
type, val, tb = sys.exc_info()
traceback.clear_frames(tb)
return (type, val, tb) | Free traceback from references to locals() in each frame to avoid circular reference leading to gc.collect() unable to reclaim memory |
def read(filename,**kwargs):
""" Read a generic input file into a recarray.
Accepted file formats: [.fits,.fz,.npy,.csv,.txt,.dat]
Parameters:
filename : input file name
kwargs : keyword arguments for the reader
Returns:
recarray : data array
"""
base,ext = os.path.splitext(filename)
if ext in ('.fits','.fz'):
# Abstract fits here...
return fitsio.read(filename,**kwargs)
elif ext in ('.npy'):
return np.load(filename,**kwargs)
elif ext in ('.csv'):
return np.recfromcsv(filename,**kwargs)
elif ext in ('.txt','.dat'):
return np.genfromtxt(filename,**kwargs)
msg = "Unrecognized file type: %s"%filename
raise ValueError(msg) | Read a generic input file into a recarray.
Accepted file formats: [.fits,.fz,.npy,.csv,.txt,.dat]
Parameters:
filename : input file name
kwargs : keyword arguments for the reader
Returns:
recarray : data array |
def update_editor ( self ):
""" Updates the editor when the object trait changes externally to the
editor.
"""
object = self.value
# Graph the new object...
canvas = self.factory.canvas
if canvas is not None:
for nodes_name in canvas.node_children:
node_children = getattr(object, nodes_name)
self._add_nodes(node_children)
for edges_name in canvas.edge_children:
edge_children = getattr(object, edges_name)
self._add_edges(edge_children)
# ...then listen for changes.
self._add_listeners() | Updates the editor when the object trait changes externally to the
editor. |
def __solve_overlaps(self, start_time, end_time):
"""finds facts that happen in given interval and shifts them to
make room for new fact
"""
if end_time is None or start_time is None:
return
# possible combinations and the OR clauses that catch them
# (the side of the number marks if it catches the end or start time)
# |----------------- NEW -----------------|
# |--- old --- 1| |2 --- old --- 1| |2 --- old ---|
# |3 ----------------------- big old ------------------------ 3|
query = """
SELECT a.*, b.name, c.name as category
FROM facts a
LEFT JOIN activities b on b.id = a.activity_id
LEFT JOIN categories c on b.category_id = c.id
WHERE (end_time > ? and end_time < ?)
OR (start_time > ? and start_time < ?)
OR (start_time < ? and end_time > ?)
ORDER BY start_time
"""
conflicts = self.fetchall(query, (start_time, end_time,
start_time, end_time,
start_time, end_time))
for fact in conflicts:
# won't eliminate as it is better to have overlapping entries than loosing data
if start_time < fact["start_time"] and end_time > fact["end_time"]:
continue
# split - truncate until beginning of new entry and create new activity for end
if fact["start_time"] < start_time < fact["end_time"] and \
fact["start_time"] < end_time < fact["end_time"]:
logger.info("splitting %s" % fact["name"])
# truncate until beginning of the new entry
self.execute("""UPDATE facts
SET end_time = ?
WHERE id = ?""", (start_time, fact["id"]))
fact_name = fact["name"]
# create new fact for the end
new_fact = Fact(fact["name"],
category = fact["category"],
description = fact["description"])
new_fact_id = self.__add_fact(new_fact.serialized_name(), end_time, fact["end_time"])
# copy tags
tag_update = """INSERT INTO fact_tags(fact_id, tag_id)
SELECT ?, tag_id
FROM fact_tags
WHERE fact_id = ?"""
self.execute(tag_update, (new_fact_id, fact["id"])) #clone tags
# overlap start
elif start_time < fact["start_time"] < end_time:
logger.info("Overlapping start of %s" % fact["name"])
self.execute("UPDATE facts SET start_time=? WHERE id=?",
(end_time, fact["id"]))
# overlap end
elif start_time < fact["end_time"] < end_time:
logger.info("Overlapping end of %s" % fact["name"])
self.execute("UPDATE facts SET end_time=? WHERE id=?",
(start_time, fact["id"])) | finds facts that happen in given interval and shifts them to
make room for new fact |
def _involuted_reverse(self):
"""
This method reverses the StridedInterval object for real. Do expect loss of precision for most cases!
:return: A new reversed StridedInterval instance
"""
def inv_is_top(si):
return (si.stride == 1 and
self._lower_bound == StridedInterval._modular_add(self._upper_bound, 1, self.bits)
)
o = self.copy()
# Clear the reversed flag
o._reversed = not o._reversed
if o.bits == 8:
# No need for reversing
return o.copy()
if inv_is_top(o):
# A TOP is still a TOP after reversing
si = o.copy()
return si
else:
lb = o._lower_bound
ub = o._upper_bound
rounded_bits = ((o.bits + 7) // 8) * 8
lb_r = []
ub_r = []
for i in xrange(0, rounded_bits, 8):
if i != 0:
lb = lb >> 8
ub = ub >> 8
lb_r.append(lb & 0xff)
ub_r.append(ub & 0xff)
si_lb = None
si_ub = None
for b in lb_r:
if si_lb is None:
si_lb = b
else:
si_lb <<= 8
si_lb |= b
for b in ub_r:
if si_ub is None:
si_ub = b
else:
si_ub <<= 8
si_ub |= b
si = StridedInterval(bits=o.bits,
lower_bound=si_lb,
upper_bound=si_ub,
stride=o._stride,
uninitialized=o.uninitialized)
si._reversed = o._reversed
if not o.is_integer:
# We really don't want to do that... but well, sometimes it just happens...
logger.warning('Reversing a real strided-interval %s is bad', self)
return si | This method reverses the StridedInterval object for real. Do expect loss of precision for most cases!
:return: A new reversed StridedInterval instance |
def get_link_page_text(link_page):
"""
Construct the dialog box to display a list of links to the user.
"""
text = ''
for i, link in enumerate(link_page):
capped_link_text = (link['text'] if len(link['text']) <= 20
else link['text'][:19] + '…')
text += '[{}] [{}]({})\n'.format(i, capped_link_text, link['href'])
return text | Construct the dialog box to display a list of links to the user. |
def delete(self):
"""
Deletes the object. Returns without doing anything if the object is
new.
"""
if not self.id:
return
if not self._loaded:
self.reload()
return self.http_delete(self.id, etag=self.etag) | Deletes the object. Returns without doing anything if the object is
new. |
def pretty_format(message):
"""
Convert a message dictionary into a human-readable string.
@param message: Message to parse, as dictionary.
@return: Unicode string.
"""
skip = {
TIMESTAMP_FIELD, TASK_UUID_FIELD, TASK_LEVEL_FIELD, MESSAGE_TYPE_FIELD,
ACTION_TYPE_FIELD, ACTION_STATUS_FIELD}
def add_field(previous, key, value):
value = unicode(pprint.pformat(value, width=40)).replace(
"\\n", "\n ").replace("\\t", "\t")
# Reindent second line and later to match up with first line's
# indentation:
lines = value.split("\n")
# indent lines are " <key length>| <value>"
indent = "{}| ".format(" " * (2 + len(key)))
value = "\n".join([lines[0]] + [indent + l for l in lines[1:]])
return " %s: %s\n" % (key, value)
remaining = ""
for field in [ACTION_TYPE_FIELD, MESSAGE_TYPE_FIELD, ACTION_STATUS_FIELD]:
if field in message:
remaining += add_field(remaining, field, message[field])
for (key, value) in sorted(message.items()):
if key not in skip:
remaining += add_field(remaining, key, value)
level = "/" + "/".join(map(unicode, message[TASK_LEVEL_FIELD]))
return "%s -> %s\n%sZ\n%s" % (
message[TASK_UUID_FIELD],
level,
# If we were returning or storing the datetime we'd want to use an
# explicit timezone instead of a naive datetime, but since we're
# just using it for formatting we needn't bother.
datetime.utcfromtimestamp(message[TIMESTAMP_FIELD]).isoformat(
sep=str(" ")),
remaining, ) | Convert a message dictionary into a human-readable string.
@param message: Message to parse, as dictionary.
@return: Unicode string. |
def delitem_via_sibseqs(ol,*sibseqs):
'''
from elist.elist import *
y = ['a',['b',["bb"]],'c']
y[1][1]
delitem_via_sibseqs(y,1,1)
y
'''
pathlist = list(sibseqs)
this = ol
for i in range(0,pathlist.__len__()-1):
key = pathlist[i]
this = this.__getitem__(key)
this.__delitem__(pathlist[-1])
return(ol) | from elist.elist import *
y = ['a',['b',["bb"]],'c']
y[1][1]
delitem_via_sibseqs(y,1,1)
y |
def set_task_object(self,
task_id,
task_progress_object):
"""
Defines a new progress bar with the given information using a TaskProgress object.
:param task_id: Unique identifier for this progress bar. Will erase if already existing.
:param task_progress_object: TaskProgress object holding the progress bar information.
"""
self.set_task(task_id=task_id,
total=task_progress_object.total,
prefix=task_progress_object.prefix,
suffix=task_progress_object.suffix,
decimals=task_progress_object.decimals,
bar_length=task_progress_object.bar_length,
keep_alive=task_progress_object.keep_alive,
display_time=task_progress_object.display_time) | Defines a new progress bar with the given information using a TaskProgress object.
:param task_id: Unique identifier for this progress bar. Will erase if already existing.
:param task_progress_object: TaskProgress object holding the progress bar information. |
def flush(self):
"""
Sends buffered data to the target
"""
# Flush buffer
content = self._buffer.getvalue()
self._buffer = StringIO()
if content:
# Send message
self._client.send_message(self._target, content, mtype="chat") | Sends buffered data to the target |
def TEST():
"""
Modules for testing happiness of 'persons' in 'worlds'
based on simplistic preferences. Just a toy - dont take seriously
----- WORLD SUMMARY for : Mars -----
population = 0
tax_rate = 0.0
tradition = 0.9
equity = 0.0
Preferences for Rover
tax_min = 0.0
equity = 0.0
tax_max = 0.9
tradition = 0.9
Rover is Indifferent in Mars (0)
DETAILS
tax: Economic = 0.1 -> 0.3
tradition: Personal = 0.3 -> 0.9
equity: Personal = 0.1 -> 0.9
growth: Economic = 0.01 -> 0.09
"""
w = World('Mars', [0, 0.0, 0.9, 0.0])
print(w)
p = Person('Rover', {'tax_min':0.0, 'tax_max':0.9,'tradition':0.9, 'equity':0.0})
print(p)
h = Happiness(p,w)
#h.add_factor(HappinessFactors(name, type, min, max))
h.add_factor(HappinessFactors('tax', 'Economic', 0.1, 0.3))
h.add_factor(HappinessFactors('tradition', 'Personal', 0.3, 0.9))
h.add_factor(HappinessFactors('equity', 'Personal', 0.1, 0.9))
h.add_factor(HappinessFactors('growth', 'Economic', 0.01, 0.09))
print(h.show_details()) | Modules for testing happiness of 'persons' in 'worlds'
based on simplistic preferences. Just a toy - dont take seriously
----- WORLD SUMMARY for : Mars -----
population = 0
tax_rate = 0.0
tradition = 0.9
equity = 0.0
Preferences for Rover
tax_min = 0.0
equity = 0.0
tax_max = 0.9
tradition = 0.9
Rover is Indifferent in Mars (0)
DETAILS
tax: Economic = 0.1 -> 0.3
tradition: Personal = 0.3 -> 0.9
equity: Personal = 0.1 -> 0.9
growth: Economic = 0.01 -> 0.09 |
def base26(x, _alphabet=string.ascii_uppercase):
"""Return positive ``int`` ``x`` as string in bijective base26 notation.
>>> [base26(i) for i in [0, 1, 2, 26, 27, 28, 702, 703, 704]]
['', 'A', 'B', 'Z', 'AA', 'AB', 'ZZ', 'AAA', 'AAB']
>>> base26(344799) # 19 * 26**3 + 16 * 26**2 + 1 * 26**1 + 13 * 26**0
'SPAM'
>>> base26(256)
'IV'
"""
result = []
while x:
x, digit = divmod(x, 26)
if not digit:
x -= 1
digit = 26
result.append(_alphabet[digit - 1])
return ''.join(result[::-1]) | Return positive ``int`` ``x`` as string in bijective base26 notation.
>>> [base26(i) for i in [0, 1, 2, 26, 27, 28, 702, 703, 704]]
['', 'A', 'B', 'Z', 'AA', 'AB', 'ZZ', 'AAA', 'AAB']
>>> base26(344799) # 19 * 26**3 + 16 * 26**2 + 1 * 26**1 + 13 * 26**0
'SPAM'
>>> base26(256)
'IV' |
def get_consensus_hashes(block_heights, hostport=None, proxy=None):
"""
Get consensus hashes for a list of blocks
NOTE: returns {block_height (int): consensus_hash (str)}
(coerces the key to an int)
Returns {'error': ...} on error
"""
assert proxy or hostport, 'Need proxy or hostport'
if proxy is None:
proxy = connect_hostport(hostport)
consensus_hashes_schema = {
'type': 'object',
'properties': {
'consensus_hashes': {
'type': 'object',
'patternProperties': {
'^([0-9]+)$': {
'type': 'string',
'pattern': OP_CONSENSUS_HASH_PATTERN,
},
},
},
},
'required': [
'consensus_hashes',
],
}
resp_schema = json_response_schema( consensus_hashes_schema )
resp = {}
try:
resp = proxy.get_consensus_hashes(block_heights)
resp = json_validate(resp_schema, resp)
if json_is_error(resp):
log.error('Failed to get consensus hashes for {}: {}'.format(block_heights, resp['error']))
return resp
except ValidationError as e:
if BLOCKSTACK_DEBUG:
log.exception(e)
resp = {'error': 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.', 'http_status': 502}
return resp
except socket.timeout:
log.error("Connection timed out")
resp = {'error': 'Connection to remote host timed out.', 'http_status': 503}
return resp
except socket.error as se:
log.error("Connection error {}".format(se.errno))
resp = {'error': 'Connection to remote host failed.', 'http_status': 502}
return resp
except Exception as ee:
if BLOCKSTACK_DEBUG:
log.exception(ee)
log.error("Caught exception while connecting to Blockstack node: {}".format(ee))
resp = {'error': 'Failed to contact Blockstack node. Try again with `--debug`.', 'http_status': 500}
return resp
consensus_hashes = resp['consensus_hashes']
# hard to express as a JSON schema, but the format is thus:
# { block_height (str): consensus_hash (str) }
# need to convert all block heights to ints
try:
ret = {int(k): v for k, v in consensus_hashes.items()}
log.debug('consensus hashes: {}'.format(ret))
return ret
except ValueError:
return {'error': 'Server returned invalid data: expected int', 'http_status': 503} | Get consensus hashes for a list of blocks
NOTE: returns {block_height (int): consensus_hash (str)}
(coerces the key to an int)
Returns {'error': ...} on error |
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) | Add `dist` if we ``can_add()`` it and it has not already been added |
def _CollectTypeChecks(function, parent_type_check_dict, stack_location,
self_name):
"""Collect all type checks for this function."""
type_check_dict = dict(parent_type_check_dict)
type_check_dict.update(_ParseDocstring(function))
# Convert any potential string based checks into python instances.
for key, value in type_check_dict.items():
if isinstance(value, str):
type_check_dict[key] = _ParseTypeCheckString(value, stack_location + 1,
self_name)
return type_check_dict | Collect all type checks for this function. |
def add_unique_template_variables(self, options):
"""Update map template variables specific to graduated circle visual"""
options.update(dict(
colorProperty=self.color_property,
colorStops=self.color_stops,
colorType=self.color_function_type,
radiusType=self.radius_function_type,
defaultColor=self.color_default,
defaultRadius=self.radius_default,
radiusProperty=self.radius_property,
radiusStops=self.radius_stops,
strokeWidth=self.stroke_width,
strokeColor=self.stroke_color,
highlightColor=self.highlight_color
))
if self.vector_source:
options.update(dict(
vectorColorStops=self.generate_vector_color_map(),
vectorRadiusStops=self.generate_vector_numeric_map('radius'))) | Update map template variables specific to graduated circle visual |
def answers(self, other):
"""DEV: true if self is an answer from other"""
if other.__class__ == self.__class__:
return (other.service + 0x40) == self.service or \
(self.service == 0x7f and
self.request_service_id == other.service)
return False | DEV: true if self is an answer from other |
def init_module(self, run_object):
"""Initializes profiler with a module."""
self.profile = self.profile_module
self._run_object, _, self._run_args = run_object.partition(' ')
self._object_name = '%s (module)' % self._run_object
self._globs = {
'__file__': self._run_object,
'__name__': '__main__',
'__package__': None,
}
program_path = os.path.dirname(self._run_object)
if sys.path[0] != program_path:
sys.path.insert(0, program_path)
self._replace_sysargs() | Initializes profiler with a module. |
def convert_types(cls, value):
"""
Takes a value from MSSQL, and converts it to a value that's safe for
JSON/Google Cloud Storage/BigQuery.
"""
if isinstance(value, decimal.Decimal):
return float(value)
else:
return value | Takes a value from MSSQL, and converts it to a value that's safe for
JSON/Google Cloud Storage/BigQuery. |
def query(querystr, connection=None, **connectkwargs):
"""Execute a query of the given SQL database
"""
if connection is None:
connection = connect(**connectkwargs)
cursor = connection.cursor()
cursor.execute(querystr)
return cursor.fetchall() | Execute a query of the given SQL database |
def _preprocess_values(self, Y):
"""
Check if the values of the observations correspond to the values
assumed by the likelihood function.
..Note:: Binary classification algorithm works better with classes {-1, 1}
"""
Y_prep = Y.copy()
Y1 = Y[Y.flatten()==1].size
Y2 = Y[Y.flatten()==0].size
assert Y1 + Y2 == Y.size, 'Bernoulli likelihood is meant to be used only with outputs in {0, 1}.'
Y_prep[Y.flatten() == 0] = -1
return Y_prep | Check if the values of the observations correspond to the values
assumed by the likelihood function.
..Note:: Binary classification algorithm works better with classes {-1, 1} |
def segments(self):
"""List of :class:`ChatMessageSegment` in message (:class:`list`)."""
seg_list = self._event.chat_message.message_content.segment
return [ChatMessageSegment.deserialize(seg) for seg in seg_list] | List of :class:`ChatMessageSegment` in message (:class:`list`). |
def get_safe_return_to(request, return_to):
"""
Ensure the user-originating redirection url is safe, i.e. within same scheme://domain:port
"""
if return_to and is_safe_url(url=return_to, host=request.get_host()) and return_to != request.build_absolute_uri():
return return_to | Ensure the user-originating redirection url is safe, i.e. within same scheme://domain:port |
def reduce(fname, reduction_factor):
"""
Produce a submodel from `fname` by sampling the nodes randomly.
Supports source models, site models and exposure models. As a special
case, it is also able to reduce .csv files by sampling the lines.
This is a debugging utility to reduce large computations to small ones.
"""
if fname.endswith('.csv'):
with open(fname) as f:
line = f.readline() # read the first line
if csv.Sniffer().has_header(line):
header = line
all_lines = f.readlines()
else:
header = None
f.seek(0)
all_lines = f.readlines()
lines = general.random_filter(all_lines, reduction_factor)
shutil.copy(fname, fname + '.bak')
print('Copied the original file in %s.bak' % fname)
_save_csv(fname, lines, header)
print('Extracted %d lines out of %d' % (len(lines), len(all_lines)))
return
elif fname.endswith('.npy'):
array = numpy.load(fname)
shutil.copy(fname, fname + '.bak')
print('Copied the original file in %s.bak' % fname)
arr = numpy.array(general.random_filter(array, reduction_factor))
numpy.save(fname, arr)
print('Extracted %d rows out of %d' % (len(arr), len(array)))
return
node = nrml.read(fname)
model = node[0]
if model.tag.endswith('exposureModel'):
total = len(model.assets)
model.assets.nodes = general.random_filter(
model.assets, reduction_factor)
num_nodes = len(model.assets)
elif model.tag.endswith('siteModel'):
total = len(model)
model.nodes = general.random_filter(model, reduction_factor)
num_nodes = len(model)
elif model.tag.endswith('sourceModel'):
reduce_source_model(fname, reduction_factor)
return
elif model.tag.endswith('logicTree'):
for smpath in logictree.collect_info(fname).smpaths:
reduce_source_model(smpath, reduction_factor)
return
else:
raise RuntimeError('Unknown model tag: %s' % model.tag)
save_bak(fname, node, num_nodes, total) | Produce a submodel from `fname` by sampling the nodes randomly.
Supports source models, site models and exposure models. As a special
case, it is also able to reduce .csv files by sampling the lines.
This is a debugging utility to reduce large computations to small ones. |
def encode_sequence(content, error=None, version=None, mode=None,
mask=None, encoding=None, eci=False, boost_error=True,
symbol_count=None):
"""\
EXPERIMENTAL: Creates a sequence of QR Codes in Structured Append mode.
:return: Iterable of named tuples, see :py:func:`encode` for details.
"""
def one_item_segments(chunk, mode):
"""\
Creates a Segments sequence with one item.
"""
segs = Segments()
segs.add_segment(make_segment(chunk, mode=mode, encoding=encoding))
return segs
def divide_into_chunks(data, num):
k, m = divmod(len(data), num)
return [data[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(num)]
def calc_qrcode_bit_length(char_count, ver_range, mode, encoding=None,
is_eci=False, is_sa=False):
overhead = 4 # Mode indicator for QR Codes, only
# Number of bits in character count indicator
overhead += consts.CHAR_COUNT_INDICATOR_LENGTH[mode][ver_range]
if is_eci and mode == consts.MODE_BYTE and encoding != consts.DEFAULT_BYTE_ENCODING:
overhead += 4 # ECI indicator
overhead += 8 # ECI assignment no
if is_sa:
# 4 bit for mode, 4 bit for the position, 4 bit for total number of symbols
# 8 bit for parity data
overhead += 5 * 4
bits = 0
if mode == consts.MODE_NUMERIC:
num, remainder = divmod(char_count, 3)
bits += num * 10 + (4 if remainder == 1 else 7)
elif mode == consts.MODE_ALPHANUMERIC:
num, remainder = divmod(char_count, 2)
bits += num * 11 + (6 if remainder else 0)
elif mode == consts.MODE_BYTE:
bits += char_count * 8
elif mode == consts.MODE_KANJI:
bits += char_count * 13
return overhead + bits
def number_of_symbols_by_version(content, version, error, mode):
"""\
Returns the number of symbols for the provided version.
"""
length = len(content)
ver_range = version_range(version)
bit_length = calc_qrcode_bit_length(length, ver_range, mode, encoding,
is_eci=eci, is_sa=True)
capacity = consts.SYMBOL_CAPACITY[version][error]
# Initial result does not contain the overhead of SA mode for all QR Codes
cnt = int(math.ceil(bit_length / capacity))
# Overhead of SA mode for all QR Codes
bit_length += 5 * 4 * (cnt - 1) + (12 * (cnt - 1) if eci else 0)
return int(math.ceil(bit_length / capacity))
version = normalize_version(version)
if version is not None:
if version < 1:
raise VersionError('This function does not accept Micro QR Code versions. '
'Provided: "{0}"'.format(get_version_name(version)))
elif symbol_count is None:
raise ValueError('Please provide a QR Code version or the symbol count')
if symbol_count is not None and not 1 <= symbol_count <= 16:
raise ValueError('The symbol count must be in range 1 .. 16')
error = normalize_errorlevel(error, accept_none=True)
if error is None:
error = consts.ERROR_LEVEL_L
mode = normalize_mode(mode)
mask = normalize_mask(mask, is_micro=False)
segments = prepare_data(content, mode, encoding, version)
guessed_version = None
if symbol_count is None:
try:
# Try to find a version which fits without using Structured Append
guessed_version = find_version(segments, error, eci=eci, micro=False)
except DataOverflowError:
# Data does fit into a usual QR Code but ignore the error silently,
# guessed_version is None
pass
if guessed_version and guessed_version <= (version or guessed_version):
# Return iterable of size 1
return [_encode(segments, error=error, version=(version or guessed_version),
mask=mask, eci=eci, boost_error=boost_error)]
if len(segments.modes) > 1:
raise ValueError('This function cannot handle more than one mode (yet). Sorry.')
mode = segments.modes[0] # CHANGE iff more than one mode is supported!
# Creating one QR code failed or max_no is not None
if mode == consts.MODE_NUMERIC:
content = str(content)
if symbol_count is not None and len(content) < symbol_count:
raise ValueError('The content is not long enough to be divided into {0} symbols'.format(symbol_count))
sa_parity_data = calc_structured_append_parity(content)
num_symbols = symbol_count or 16
if version is not None:
num_symbols = number_of_symbols_by_version(content, version, error, mode)
if num_symbols > 16:
raise DataOverflowError('The data does not fit into Structured Append version {0}'.format(version))
chunks = divide_into_chunks(content, num_symbols)
if symbol_count is not None:
segments = one_item_segments(max(chunks, key=len), mode)
version = find_version(segments, error, eci=eci, micro=False, is_sa=True)
sa_info = partial(_StructuredAppendInfo, total=len(chunks) - 1,
parity=sa_parity_data)
return [_encode(one_item_segments(chunk, mode), error=error, version=version,
mask=mask, eci=eci, boost_error=boost_error,
sa_info=sa_info(i)) for i, chunk in enumerate(chunks)] | \
EXPERIMENTAL: Creates a sequence of QR Codes in Structured Append mode.
:return: Iterable of named tuples, see :py:func:`encode` for details. |
def _do_api_call(self, method, data):
"""
Convenience method to carry out a standard API call against the
Petfinder API.
:param basestring method: The API method name to call.
:param dict data: Key/value parameters to send to the API method.
This varies based on the method.
:raises: A number of :py:exc:`petfinder.exceptions.PetfinderAPIError``
sub-classes, depending on what went wrong.
:rtype: lxml.etree._Element
:returns: The parsed document.
"""
# Developer API keys, auth tokens, and other standard, required args.
data.update({
"key": self.api_key,
# No API methods currently use this, but we're ready for it,
# should that change.
"token": self.api_auth_token,
})
# Ends up being a full URL+path.
url = "%s%s" % (self.endpoint, method)
# Bombs away!
response = requests.get(url, params=data)
# Parse and return an ElementTree instance containing the document.
root = etree.fromstring(response.content)
# If this is anything but '100', it's an error.
status_code = root.find("header/status/code").text
# If this comes back as non-None, we know we've got problems.
exc_class = _get_exception_class_from_status_code(status_code)
if exc_class:
# Sheet, sheet, errar! Raise the appropriate error, and pass
# the accompanying error message as the exception message.
error_message = root.find("header/status/message").text
#noinspection PyCallingNonCallable
raise exc_class(error_message)
return root | Convenience method to carry out a standard API call against the
Petfinder API.
:param basestring method: The API method name to call.
:param dict data: Key/value parameters to send to the API method.
This varies based on the method.
:raises: A number of :py:exc:`petfinder.exceptions.PetfinderAPIError``
sub-classes, depending on what went wrong.
:rtype: lxml.etree._Element
:returns: The parsed document. |
def patch(module, external=(), internal=()):
"""
Temporarily monkey-patch dependencies which can be external to, or internal
to the supplied module.
:param module: Module object
:param external: External dependencies to patch (full paths as strings)
:param internal: Internal dependencies to patch (short names as strings)
:return:
"""
external = tuple(external)
internal = tuple(internal)
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
# The master mock is used to contain all of the sub-mocks. It is a
# useful container and can also be used to determine the order of
# calls to all sub-mocks.
master_mock = mock.MagicMock()
def get_mock(name):
return getattr(master_mock, __patch_name(name))
def patch_external(name):
return mock.patch(name, get_mock(name))
def patch_internal(name):
return mock.patch(module.__name__ + '.' + name, get_mock(name))
try:
with __nested(patch_external(n) for n in external):
if external:
# Reload the module to ensure that patched external
# dependencies are accounted for.
reload_module(module)
# Patch objects in the module itself.
with __nested(patch_internal(n) for n in internal):
return fn(master_mock, *args, **kwargs)
finally:
if external:
# When all patches have been discarded, reload the module
# to bring it back to its original state (except for all of
# the references which have been reassigned).
reload_module(module)
return wrapper
return decorator | Temporarily monkey-patch dependencies which can be external to, or internal
to the supplied module.
:param module: Module object
:param external: External dependencies to patch (full paths as strings)
:param internal: Internal dependencies to patch (short names as strings)
:return: |
def activate_left(self, token):
"""Make a copy of the received token and call `_activate_left`."""
watchers.MATCHER.debug(
"Node <%s> activated left with token %r", self, token)
return self._activate_left(token.copy()) | Make a copy of the received token and call `_activate_left`. |
def str_rfind(x, sub, start=0, end=None):
"""Returns the highest indices in each string in a column, where the provided substring is fully contained between within a
sample. If the substring is not found, -1 is returned.
:param str sub: A substring to be found in the samples
:param int start:
:param int end:
:returns: an expression containing the highest indices specifying the start of the substring.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.rfind(sub="et")
Expression = str_rfind(text, sub='et')
Length: 5 dtype: int64 (expression)
-----------------------------------
0 3
1 7
2 -1
3 -1
4 -1
"""
return _to_string_sequence(x).find(sub, start, 0 if end is None else end, end is None, False) | Returns the highest indices in each string in a column, where the provided substring is fully contained between within a
sample. If the substring is not found, -1 is returned.
:param str sub: A substring to be found in the samples
:param int start:
:param int end:
:returns: an expression containing the highest indices specifying the start of the substring.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.rfind(sub="et")
Expression = str_rfind(text, sub='et')
Length: 5 dtype: int64 (expression)
-----------------------------------
0 3
1 7
2 -1
3 -1
4 -1 |
def _generate_splits(self, m, r):
"""
When a rectangle is placed inside a maximal rectangle, it stops being one
and up to 4 new maximal rectangles may appear depending on the placement.
_generate_splits calculates them.
Arguments:
m (Rectangle): max_rect rectangle
r (Rectangle): rectangle placed
Returns:
list : list containing new maximal rectangles or an empty list
"""
new_rects = []
if r.left > m.left:
new_rects.append(Rectangle(m.left, m.bottom, r.left-m.left, m.height))
if r.right < m.right:
new_rects.append(Rectangle(r.right, m.bottom, m.right-r.right, m.height))
if r.top < m.top:
new_rects.append(Rectangle(m.left, r.top, m.width, m.top-r.top))
if r.bottom > m.bottom:
new_rects.append(Rectangle(m.left, m.bottom, m.width, r.bottom-m.bottom))
return new_rects | When a rectangle is placed inside a maximal rectangle, it stops being one
and up to 4 new maximal rectangles may appear depending on the placement.
_generate_splits calculates them.
Arguments:
m (Rectangle): max_rect rectangle
r (Rectangle): rectangle placed
Returns:
list : list containing new maximal rectangles or an empty list |
def get_page_labels(self, page_id, prefix=None, start=None, limit=None):
"""
Returns the list of labels on a piece of Content.
:param page_id: A string containing the id of the labels content container.
:param prefix: OPTIONAL: The prefixes to filter the labels with {@see Label.Prefix}.
Default: None.
:param start: OPTIONAL: The start point of the collection to return. Default: None (0).
:param limit: OPTIONAL: The limit of the number of labels to return, this may be restricted by
fixed system limits. Default: 200.
:return: The JSON data returned from the content/{id}/label endpoint, or the results of the
callback. Will raise requests.HTTPError on bad input, potentially.
"""
url = 'rest/api/content/{id}/label'.format(id=page_id)
params = {}
if prefix:
params['prefix'] = prefix
if start is not None:
params['start'] = int(start)
if limit is not None:
params['limit'] = int(limit)
return self.get(url, params=params) | Returns the list of labels on a piece of Content.
:param page_id: A string containing the id of the labels content container.
:param prefix: OPTIONAL: The prefixes to filter the labels with {@see Label.Prefix}.
Default: None.
:param start: OPTIONAL: The start point of the collection to return. Default: None (0).
:param limit: OPTIONAL: The limit of the number of labels to return, this may be restricted by
fixed system limits. Default: 200.
:return: The JSON data returned from the content/{id}/label endpoint, or the results of the
callback. Will raise requests.HTTPError on bad input, potentially. |
def extractHolidayWeekendSchedules(self):
""" extract holiday and weekend :class:`~ekmmeters.Schedule` from meter object buffer.
Returns:
tuple: Holiday and weekend :class:`~ekmmeters.Schedule` values, as strings.
======= ======================================
Holiday :class:`~ekmmeters.Schedule` as string
Weekend :class:`~ekmmeters.Schedule` as string
======= ======================================
"""
result = namedtuple("result", ["Weekend", "Holiday"])
result.Weekend = self.m_hldy["Weekend_Schd"][MeterData.StringValue]
result.Holiday = self.m_hldy["Holiday_Schd"][MeterData.StringValue]
return result | extract holiday and weekend :class:`~ekmmeters.Schedule` from meter object buffer.
Returns:
tuple: Holiday and weekend :class:`~ekmmeters.Schedule` values, as strings.
======= ======================================
Holiday :class:`~ekmmeters.Schedule` as string
Weekend :class:`~ekmmeters.Schedule` as string
======= ====================================== |
def expose_ancestors_or_children(self, member, collection, lang=None):
""" Build an ancestor or descendant dict view based on selected information
:param member: Current Member to build for
:param collection: Collection from which we retrieved it
:param lang: Language to express data in
:return:
"""
x = {
"id": member.id,
"label": str(member.get_label(lang)),
"model": str(member.model),
"type": str(member.type),
"size": member.size,
"semantic": self.semantic(member, parent=collection)
}
if isinstance(member, ResourceCollection):
x["lang"] = str(member.lang)
return x | Build an ancestor or descendant dict view based on selected information
:param member: Current Member to build for
:param collection: Collection from which we retrieved it
:param lang: Language to express data in
:return: |
def create_service_from_endpoint(endpoint, service_type, title=None, abstract=None, catalog=None):
"""
Create a service from an endpoint if it does not already exists.
"""
from models import Service
if Service.objects.filter(url=endpoint, catalog=catalog).count() == 0:
# check if endpoint is valid
request = requests.get(endpoint)
if request.status_code == 200:
LOGGER.debug('Creating a %s service for endpoint=%s catalog=%s' % (service_type, endpoint, catalog))
service = Service(
type=service_type, url=endpoint, title=title, abstract=abstract,
csw_type='service', catalog=catalog
)
service.save()
return service
else:
LOGGER.warning('This endpoint is invalid, status code is %s' % request.status_code)
else:
LOGGER.warning('A service for this endpoint %s in catalog %s already exists' % (endpoint, catalog))
return None | Create a service from an endpoint if it does not already exists. |
def getKeyword(filename, keyword, default=None, handle=None):
"""
General, write-safe method for returning a keyword value from the header of
a IRAF recognized image.
Returns the value as a string.
"""
# Insure that there is at least 1 extension specified...
if filename.find('[') < 0:
filename += '[0]'
_fname, _extn = parseFilename(filename)
if not handle:
# Open image whether it is FITS or GEIS
_fimg = openImage(_fname)
else:
# Use what the user provides, after insuring
# that it is a proper PyFITS object.
if isinstance(handle, fits.HDUList):
_fimg = handle
else:
raise ValueError('Handle must be %r object!' % fits.HDUList)
# Address the correct header
_hdr = getExtn(_fimg, _extn).header
try:
value = _hdr[keyword]
except KeyError:
_nextn = findKeywordExtn(_fimg, keyword)
try:
value = _fimg[_nextn].header[keyword]
except KeyError:
value = ''
if not handle:
_fimg.close()
del _fimg
if value == '':
if default is None:
value = None
else:
value = default
# NOTE: Need to clean up the keyword.. Occasionally the keyword value
# goes right up to the "/" FITS delimiter, and iraf.keypar is incapable
# of realizing this, so it incorporates "/" along with the keyword value.
# For example, after running "pydrizzle" on the image "j8e601bkq_flt.fits",
# the CD keywords look like this:
#
# CD1_1 = 9.221627430999639E-06/ partial of first axis coordinate w.r.t. x
# CD1_2 = -1.0346992614799E-05 / partial of first axis coordinate w.r.t. y
#
# so for CD1_1, iraf.keypar returns:
# "9.221627430999639E-06/"
#
# So, the following piece of code CHECKS for this and FIXES the string,
# very simply by removing the last character if it is a "/".
# This fix courtesy of Anton Koekemoer, 2002.
elif isinstance(value, string_types):
if value[-1:] == '/':
value = value[:-1]
return value | General, write-safe method for returning a keyword value from the header of
a IRAF recognized image.
Returns the value as a string. |
def generate_join_docs_list(self, left_collection_list, right_collection_list):
"""
Helper function for merge_join_docs
:param left_collection_list: Left Collection to be joined
:type left_collection_list: MongoCollection
:param right_collection_list: Right Collection to be joined
:type right_collection_list: MongoCollection
:return joined_docs: List of docs post join
"""
joined_docs = []
if (len(left_collection_list) != 0) and (len(right_collection_list) != 0):
for left_doc in left_collection_list:
for right_doc in right_collection_list:
l_dict = self.change_dict_keys(left_doc, 'L_')
r_dict = self.change_dict_keys(right_doc, 'R_')
joined_docs.append(dict(l_dict, **r_dict))
elif left_collection_list:
for left_doc in left_collection_list:
joined_docs.append(self.change_dict_keys(left_doc, 'L_'))
else:
for right_doc in right_collection_list:
joined_docs.append(self.change_dict_keys(right_doc, 'R_'))
return joined_docs | Helper function for merge_join_docs
:param left_collection_list: Left Collection to be joined
:type left_collection_list: MongoCollection
:param right_collection_list: Right Collection to be joined
:type right_collection_list: MongoCollection
:return joined_docs: List of docs post join |
def _nextSequence(cls, name=None):
"""Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence)
"""
if not name:
name = cls._sqlSequence
if not name:
# Assume it's tablename_primarykey_seq
if len(cls._sqlPrimary) <> 1:
raise "Could not guess sequence name for multi-primary-key"
primary = cls._sqlPrimary[0]
name = '%s_%s_seq' % (cls._sqlTable, primary.replace('.','_'))
# Don't have . as a tablename or column name! =)
curs = cls.cursor()
curs.execute("SELECT nextval('%s')" % name)
value = curs.fetchone()[0]
curs.close()
return value | Return a new sequence number for insertion in self._sqlTable.
Note that if your sequences are not named
tablename_primarykey_seq (ie. for table 'blapp' with primary
key 'john_id', sequence name blapp_john_id_seq) you must give
the full sequence name as an optional argument to _nextSequence) |
def get_project(self) -> str:
""" Get the ihc project and make sure controller is ready before"""
with IHCController._mutex:
if self._project is None:
if self.client.get_state() != IHCSTATE_READY:
ready = self.client.wait_for_state_change(IHCSTATE_READY,
10)
if ready != IHCSTATE_READY:
return None
self._project = self.client.get_project()
return self._project | Get the ihc project and make sure controller is ready before |
def new(cls, nsptagname, val):
"""
Return a new ``CT_String`` element with tagname *nsptagname* and
``val`` attribute set to *val*.
"""
elm = OxmlElement(nsptagname)
elm.val = val
return elm | Return a new ``CT_String`` element with tagname *nsptagname* and
``val`` attribute set to *val*. |
def send(self, smtp=None, **kw):
"""
Sends message.
:param smtp: When set, parameters from this dictionary overwrite
options from config. See `emails.Message.send` for more information.
:param kwargs: Parameters for `emails.Message.send`
:return: Response objects from emails backend.
For default `emails.backend.smtp.STMPBackend` returns an `emails.backend.smtp.SMTPResponse` object.
"""
smtp_options = {}
smtp_options.update(self.config.smtp_options)
if smtp:
smtp_options.update(smtp)
return super(Message, self).send(smtp=smtp_options, **kw) | Sends message.
:param smtp: When set, parameters from this dictionary overwrite
options from config. See `emails.Message.send` for more information.
:param kwargs: Parameters for `emails.Message.send`
:return: Response objects from emails backend.
For default `emails.backend.smtp.STMPBackend` returns an `emails.backend.smtp.SMTPResponse` object. |
def switch_delete_record_for_userid(self, userid):
"""Remove userid switch record from switch table."""
with get_network_conn() as conn:
conn.execute("DELETE FROM switch WHERE userid=?",
(userid,))
LOG.debug("Switch record for user %s is removed from "
"switch table" % userid) | Remove userid switch record from switch table. |
def repack(self, to_width, *, msb_first, start=0, start_bit=0,
length=None):
"""Extracts a part of a BinArray's data and converts it to a BinArray
of a different width.
For the purposes of this conversion, words in this BinArray are joined
side-by-side, starting from a given start index (defaulting to 0),
skipping ``start_bit`` first bits of the first word, then the resulting
stream is split into ``to_width``-sized words and ``length`` first
such words are returned as a new BinArray.
If ``msb_first`` is False, everything proceeds with little endian
ordering: the first word provides the least significant bits of the
combined stream, ``start_bit`` skips bits starting from the LSB,
and the first output word is made from the lowest bits of the combined
stream. Otherwise (``msb_first`` is True), everything proceeds
with big endian ordering: the first word provides the most
significant bits of the combined stream, ``start_bit`` skips bits
starting from the MSB, and the first output word is made from the
highest bits of the combined stream.
``start_bits`` must be smaller than the width of the input word.
It is an error to request a larger length than can be provided from
the input array. If ``length`` is not provided, this function
returns as many words as can be extracted.
For example, consider a 10-to-3 repack with start_bit=2, length=4
msb_first=True:
+---------+-+-+-+-+-+-+-+-+-+-+
| | MSB ... LSB |
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
| start |X|X|a|b|c|d|e|f|g|h|
+---------+-+-+-+-+-+-+-+-+-+-+
| start+1 |i|j|k|l|X|X|X|X|X|X|
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
is repacked to:
+-+-+-+-+
|0|a|b|c|
+-+-+-+-+
|1|d|e|f|
+-+-+-+-+
|2|g|h|i|
+-+-+-+-+
|3|j|k|l|
+-+-+-+-+
The same repack for msb_first=False is performed as follows:
+---------+-+-+-+-+-+-+-+-+-+-+
| | MSB ... LSB |
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
| start |h|g|f|e|d|c|b|a|X|X|
+---------+-+-+-+-+-+-+-+-+-+-+
| start+1 |X|X|X|X|X|X|l|k|j|i|
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
into:
+-+-+-+-+
|0|c|b|a|
+-+-+-+-+
|1|f|e|d|
+-+-+-+-+
|2|i|h|g|
+-+-+-+-+
|3|l|k|j|
+-+-+-+-+
"""
to_width = operator.index(to_width)
if not isinstance(msb_first, bool):
raise TypeError('msb_first must be a bool')
available = self.repack_data_available(
to_width, start=start, start_bit=start_bit)
if length is None:
length = available
else:
length = operator.index(length)
if length > available:
raise ValueError('not enough data available')
if length < 0:
raise ValueError('length cannot be negative')
start = operator.index(start)
start_bit = operator.index(start_bit)
pos = start
accum = BinWord(0, 0)
if start_bit:
accum = self[pos]
pos += 1
rest = accum.width - start_bit
if msb_first:
accum = accum.extract(0, rest)
else:
accum = accum.extract(start_bit, rest)
res = BinArray(width=to_width, length=length)
for idx in range(length):
while len(accum) < to_width:
cur = self[pos]
pos += 1
if msb_first:
accum = BinWord.concat(cur, accum)
else:
accum = BinWord.concat(accum, cur)
rest = accum.width - to_width
if msb_first:
cur = accum.extract(rest, to_width)
accum = accum.extract(0, rest)
else:
cur = accum.extract(0, to_width)
accum = accum.extract(to_width, rest)
res[idx] = cur
return res | Extracts a part of a BinArray's data and converts it to a BinArray
of a different width.
For the purposes of this conversion, words in this BinArray are joined
side-by-side, starting from a given start index (defaulting to 0),
skipping ``start_bit`` first bits of the first word, then the resulting
stream is split into ``to_width``-sized words and ``length`` first
such words are returned as a new BinArray.
If ``msb_first`` is False, everything proceeds with little endian
ordering: the first word provides the least significant bits of the
combined stream, ``start_bit`` skips bits starting from the LSB,
and the first output word is made from the lowest bits of the combined
stream. Otherwise (``msb_first`` is True), everything proceeds
with big endian ordering: the first word provides the most
significant bits of the combined stream, ``start_bit`` skips bits
starting from the MSB, and the first output word is made from the
highest bits of the combined stream.
``start_bits`` must be smaller than the width of the input word.
It is an error to request a larger length than can be provided from
the input array. If ``length`` is not provided, this function
returns as many words as can be extracted.
For example, consider a 10-to-3 repack with start_bit=2, length=4
msb_first=True:
+---------+-+-+-+-+-+-+-+-+-+-+
| | MSB ... LSB |
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
| start |X|X|a|b|c|d|e|f|g|h|
+---------+-+-+-+-+-+-+-+-+-+-+
| start+1 |i|j|k|l|X|X|X|X|X|X|
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
is repacked to:
+-+-+-+-+
|0|a|b|c|
+-+-+-+-+
|1|d|e|f|
+-+-+-+-+
|2|g|h|i|
+-+-+-+-+
|3|j|k|l|
+-+-+-+-+
The same repack for msb_first=False is performed as follows:
+---------+-+-+-+-+-+-+-+-+-+-+
| | MSB ... LSB |
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
| start |h|g|f|e|d|c|b|a|X|X|
+---------+-+-+-+-+-+-+-+-+-+-+
| start+1 |X|X|X|X|X|X|l|k|j|i|
+---------+-+-+-+-+-+-+-+-+-+-+
| | ... |
+---------+-+-+-+-+-+-+-+-+-+-+
into:
+-+-+-+-+
|0|c|b|a|
+-+-+-+-+
|1|f|e|d|
+-+-+-+-+
|2|i|h|g|
+-+-+-+-+
|3|l|k|j|
+-+-+-+-+ |
def _escape(self, msg):
"""
Escapes double quotes by adding another double quote as per the Scratch
protocol. Expects a string without its delimiting quotes. Returns a new
escaped string.
"""
escaped = ''
for c in msg:
escaped += c
if c == '"':
escaped += '"'
return escaped | Escapes double quotes by adding another double quote as per the Scratch
protocol. Expects a string without its delimiting quotes. Returns a new
escaped string. |
def timezone(self, tz):
"""
Set timezone on the audit records. Timezone can be in formats:
'US/Eastern', 'PST', 'Europe/Helsinki'
See SMC Log Viewer settings for more examples.
:param str tz: timezone, i.e. CST
"""
self.data['resolving'].update(
timezone=tz,
time_show_zone=True) | Set timezone on the audit records. Timezone can be in formats:
'US/Eastern', 'PST', 'Europe/Helsinki'
See SMC Log Viewer settings for more examples.
:param str tz: timezone, i.e. CST |
def send(self, to, from_, body, dm=False):
"""
Send BODY as an @message from FROM to TO
If we don't have the access tokens for FROM, raise AccountNotFoundError.
If the tweet resulting from '@{0} {1}'.format(TO, BODY) is > 140 chars
raise TweetTooLongError.
If we want to send this message as a DM, do so.
Arguments:
- `to`: str
- `from_`: str
- `body`: str
- `dm`: [optional] bool
Return: None
Exceptions: AccountNotFoundError
TweetTooLongError
"""
tweet = '@{0} {1}'.format(to, body)
if from_ not in self.accounts:
raise AccountNotFoundError()
if len(tweet) > 140:
raise TweetTooLongError()
self.auth.set_access_token(*self.accounts.get(from_))
api = tweepy.API(self.auth)
if dm:
api.send_direct_message(screen_name=to, text=body)
else:
api.update_status(tweet)
return | Send BODY as an @message from FROM to TO
If we don't have the access tokens for FROM, raise AccountNotFoundError.
If the tweet resulting from '@{0} {1}'.format(TO, BODY) is > 140 chars
raise TweetTooLongError.
If we want to send this message as a DM, do so.
Arguments:
- `to`: str
- `from_`: str
- `body`: str
- `dm`: [optional] bool
Return: None
Exceptions: AccountNotFoundError
TweetTooLongError |
def domain_block(self, domain=None):
"""
Add a block for all statuses originating from the specified domain for the logged-in user.
"""
params = self.__generate_params(locals())
self.__api_request('POST', '/api/v1/domain_blocks', params) | Add a block for all statuses originating from the specified domain for the logged-in user. |
def validate_text(value):
"""Validate a text formatoption
Parameters
----------
value: see :attr:`psyplot.plotter.labelplotter.text`
Raises
------
ValueError"""
possible_transform = ['axes', 'fig', 'data']
validate_transform = ValidateInStrings('transform', possible_transform,
True)
tests = [validate_float, validate_float, validate_str,
validate_transform, dict]
if isinstance(value, six.string_types):
xpos, ypos = rcParams['texts.default_position']
return [(xpos, ypos, value, 'axes', {'ha': 'right'})]
elif isinstance(value, tuple):
value = [value]
try:
value = list(value)[:]
except TypeError:
raise ValueError("Value must be string or list of tuples!")
for i, val in enumerate(value):
try:
val = tuple(val)
except TypeError:
raise ValueError(
"Text must be an iterable of the form "
"(x, y, s[, trans, params])!")
if len(val) < 3:
raise ValueError(
"Text tuple must at least be like [x, y, s], with floats x, "
"y and string s!")
elif len(val) == 3 or isinstance(val[3], dict):
val = list(val)
val.insert(3, 'data')
if len(val) == 4:
val += [{}]
val = tuple(val)
if len(val) > 5:
raise ValueError(
"Text tuple must not be longer then length 5. It can be "
"like (x, y, s[, trans, params])!")
value[i] = (validate(x) for validate, x in zip(tests, val))
return value | Validate a text formatoption
Parameters
----------
value: see :attr:`psyplot.plotter.labelplotter.text`
Raises
------
ValueError |
def interpolate_gridded_scalar(self, x, y, c, order=1, pad=1, offset=0):
"""Interpolate gridded scalar C to points x,y.
Parameters
----------
x, y : array-like
Points at which to interpolate
c : array-like
The scalar, assumed to be defined on the grid.
order : int
Order of interpolation
pad : int
Number of pad cells added
offset : int
???
Returns
-------
ci : array-like
The interpolated scalar
"""
## no longer necessary because we accept pre-padded arrays
# assert c.shape == (self.Ny, self.Nx), 'Shape of c needs to be (Ny,Nx)'
# first pad the array to deal with the boundaries
# (map_coordinates can't seem to deal with this by itself)
# pad twice so cubic interpolation can be used
if pad > 0:
cp = self._pad_field(c, pad=pad)
else:
cp = c
# now the shape is Nx+2, Nx+2
i = (x - self.xmin)/self.Lx*self.Nx + pad + offset - 0.5
j = (y - self.ymin)/self.Ly*self.Ny + pad + offset - 0.5
# for some reason this still does not work with high precision near the boundaries
return scipy.ndimage.map_coordinates(cp, [j,i],
mode='constant', order=order, cval=np.nan) | Interpolate gridded scalar C to points x,y.
Parameters
----------
x, y : array-like
Points at which to interpolate
c : array-like
The scalar, assumed to be defined on the grid.
order : int
Order of interpolation
pad : int
Number of pad cells added
offset : int
???
Returns
-------
ci : array-like
The interpolated scalar |
def add_auth_attribute(attr, value, actor=False):
"""
Helper function for login managers. Adds authorization attributes
to :obj:`current_auth` for the duration of the request.
:param str attr: Name of the attribute
:param value: Value of the attribute
:param bool actor: Whether this attribute is an actor
(user or client app accessing own data)
If the attribute is an actor and :obj:`current_auth` does not currently
have an actor, the attribute is also made available as
``current_auth.actor``, which in turn is used by
``current_auth.is_authenticated``.
The attribute name ``user`` is special-cased:
1. ``user`` is always treated as an actor
2. ``user`` is also made available as ``_request_ctx_stack.top.user`` for
compatibility with Flask-Login
"""
if attr in ('actor', 'anchors', 'is_anonymous', 'not_anonymous', 'is_authenticated', 'not_authenticated'):
raise AttributeError("Attribute name %s is reserved by current_auth" % attr)
# Invoking current_auth will also create it on the local stack. We can
# then proceed to set attributes on it.
ca = current_auth._get_current_object()
# Since :class:`CurrentAuth` overrides ``__setattr__``, we need to use :class:`object`'s.
object.__setattr__(ca, attr, value)
if attr == 'user':
# Special-case 'user' for compatibility with Flask-Login
_request_ctx_stack.top.user = value
# A user is always an actor
actor = True
if actor:
object.__setattr__(ca, 'actor', value) | Helper function for login managers. Adds authorization attributes
to :obj:`current_auth` for the duration of the request.
:param str attr: Name of the attribute
:param value: Value of the attribute
:param bool actor: Whether this attribute is an actor
(user or client app accessing own data)
If the attribute is an actor and :obj:`current_auth` does not currently
have an actor, the attribute is also made available as
``current_auth.actor``, which in turn is used by
``current_auth.is_authenticated``.
The attribute name ``user`` is special-cased:
1. ``user`` is always treated as an actor
2. ``user`` is also made available as ``_request_ctx_stack.top.user`` for
compatibility with Flask-Login |
def compliance_report(self, validation_file=None, validation_source=None):
"""
Return a compliance report.
Verify that the device complies with the given validation file and writes a compliance
report file. See https://napalm.readthedocs.io/en/latest/validate/index.html.
:param validation_file: Path to the file containing compliance definition. Default is None.
:param validation_source: Dictionary containing compliance rules.
:raise ValidationException: File is not valid.
:raise NotImplementedError: Method not implemented.
"""
return validate.compliance_report(
self, validation_file=validation_file, validation_source=validation_source
) | Return a compliance report.
Verify that the device complies with the given validation file and writes a compliance
report file. See https://napalm.readthedocs.io/en/latest/validate/index.html.
:param validation_file: Path to the file containing compliance definition. Default is None.
:param validation_source: Dictionary containing compliance rules.
:raise ValidationException: File is not valid.
:raise NotImplementedError: Method not implemented. |
def mouseMoveEvent(self, e):
"""
Extends mouseMoveEvent to display a pointing hand cursor when the
mouse cursor is over a file location
"""
super(PyInteractiveConsole, self).mouseMoveEvent(e)
cursor = self.cursorForPosition(e.pos())
assert isinstance(cursor, QtGui.QTextCursor)
p = cursor.positionInBlock()
usd = cursor.block().userData()
if usd and usd.start_pos_in_block <= p <= usd.end_pos_in_block:
if QtWidgets.QApplication.overrideCursor() is None:
QtWidgets.QApplication.setOverrideCursor(
QtGui.QCursor(QtCore.Qt.PointingHandCursor))
else:
if QtWidgets.QApplication.overrideCursor() is not None:
QtWidgets.QApplication.restoreOverrideCursor() | Extends mouseMoveEvent to display a pointing hand cursor when the
mouse cursor is over a file location |
def apply(self, coordinates):
"""Generate, apply and return a random manipulation"""
transform = self.get_transformation(coordinates)
result = MolecularDistortion(self.affected_atoms, transform)
result.apply(coordinates)
return result | Generate, apply and return a random manipulation |
def set_monitor(module):
""" Defines the monitor method on the module. """
def monitor(name, tensor,
track_data=True,
track_grad=True):
"""
Register the tensor under the name given (now a string)
and track it based on the track_data and track_grad arguments.
"""
module.monitored_vars[name] = {
'tensor':tensor,
'track_data':track_data,
'track_grad':track_grad,
}
module.monitor = monitor | Defines the monitor method on the module. |
def cmsearch_from_file(cm_file_path, seqs, moltype, cutoff=0.0, params=None):
"""Uses cmbuild to build a CM file, then cmsearch to find homologs.
- cm_file_path: path to the file created by cmbuild, containing aligned
sequences. This will be used to search sequences in seqs.
- seqs: SequenceCollection object or something that can be used to
construct one, containing unaligned sequences that are to be
searched.
- moltype: cogent.core.moltype object. Must be DNA or RNA
- cutoff: bitscore cutoff. No sequences < cutoff will be kept in
search results. (Default=0.0). Infernal documentation suggests
a cutoff of log2(number nucleotides searching) will give most
likely true homologs.
"""
#NOTE: Must degap seqs or Infernal well seg fault!
seqs = SequenceCollection(seqs,MolType=moltype).degap()
#Create mapping between abbreviated IDs and full IDs
int_map, int_keys = seqs.getIntMap()
#Create SequenceCollection from int_map.
int_map = SequenceCollection(int_map,MolType=moltype)
app = Cmsearch(InputHandler='_input_as_paths',WorkingDir='/tmp',\
params=params)
app.Parameters['--informat'].on('FASTA')
app.Parameters['-T'].on(cutoff)
seqs_path = app._input_as_multiline_string(int_map.toFasta())
paths = [cm_file_path,seqs_path]
_, tmp_file = mkstemp(dir=app.WorkingDir)
app.Parameters['--tabfile'].on(tmp_file)
res = app(paths)
search_results = list(CmsearchParser(res['SearchResults'].readlines()))
if search_results:
for i,line in enumerate(search_results):
label = line[1]
search_results[i][1]=int_keys.get(label,label)
res.cleanUp()
return search_results | Uses cmbuild to build a CM file, then cmsearch to find homologs.
- cm_file_path: path to the file created by cmbuild, containing aligned
sequences. This will be used to search sequences in seqs.
- seqs: SequenceCollection object or something that can be used to
construct one, containing unaligned sequences that are to be
searched.
- moltype: cogent.core.moltype object. Must be DNA or RNA
- cutoff: bitscore cutoff. No sequences < cutoff will be kept in
search results. (Default=0.0). Infernal documentation suggests
a cutoff of log2(number nucleotides searching) will give most
likely true homologs. |
def wipe_container(self):
"""
Completely wipes out the contents of the container.
"""
if self.test_run:
print("Wipe would delete {0} objects.".format(len(self.container.object_count)))
else:
if not self.quiet or self.verbosity > 1:
print("Deleting {0} objects...".format(len(self.container.object_count)))
self._connection.delete_all_objects() | Completely wipes out the contents of the container. |
def updateMktDepthL2(self, id, position, marketMaker, operation, side, price, size):
"""updateMktDepthL2(EWrapper self, TickerId id, int position, IBString marketMaker, int operation, int side, double price, int size)"""
return _swigibpy.EWrapper_updateMktDepthL2(self, id, position, marketMaker, operation, side, price, size) | updateMktDepthL2(EWrapper self, TickerId id, int position, IBString marketMaker, int operation, int side, double price, int size) |
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
User = orm[user_orm_label]
try:
user = User.objects.all()[0]
for article in orm.Article.objects.all():
article.author = user
article.save()
except IndexError:
pass | Write your forwards methods here. |
def set_limit(self, param):
"""
Models "Limit Command" functionality of device.
Sets the target temperate to be reached.
:param param: Target temperature in C, multiplied by 10, as a string. Can be negative.
:return: Empty string.
"""
# TODO: Is not having leading zeroes / 4 digits an error?
limit = int(param)
if -2000 <= limit <= 6000:
self.device.temperature_limit = limit / 10.0
return "" | Models "Limit Command" functionality of device.
Sets the target temperate to be reached.
:param param: Target temperature in C, multiplied by 10, as a string. Can be negative.
:return: Empty string. |
def set_stepdown_window(self, start, end, enabled=True, scheduled=True, weekly=True):
"""Set the stepdown window for this instance.
Date times are assumed to be UTC, so use UTC date times.
:param datetime.datetime start: The datetime which the stepdown window is to open.
:param datetime.datetime end: The datetime which the stepdown window is to close.
:param bool enabled: A boolean indicating whether or not stepdown is to be enabled.
:param bool scheduled: A boolean indicating whether or not to schedule stepdown.
:param bool weekly: A boolean indicating whether or not to schedule compaction weekly.
"""
# Ensure a logical start and endtime is requested.
if not start < end:
raise TypeError('Parameter "start" must occur earlier in time than "end".')
# Ensure specified window is less than a week in length.
week_delta = datetime.timedelta(days=7)
if not ((end - start) <= week_delta):
raise TypeError('Stepdown windows can not be longer than 1 week in length.')
url = self._service_url + 'stepdown/'
data = {
'start': int(start.strftime('%s')),
'end': int(end.strftime('%s')),
'enabled': enabled,
'scheduled': scheduled,
'weekly': weekly,
}
response = requests.post(
url,
data=json.dumps(data),
**self._instances._default_request_kwargs
)
return response.json() | Set the stepdown window for this instance.
Date times are assumed to be UTC, so use UTC date times.
:param datetime.datetime start: The datetime which the stepdown window is to open.
:param datetime.datetime end: The datetime which the stepdown window is to close.
:param bool enabled: A boolean indicating whether or not stepdown is to be enabled.
:param bool scheduled: A boolean indicating whether or not to schedule stepdown.
:param bool weekly: A boolean indicating whether or not to schedule compaction weekly. |
def add_store(name, store, saltenv='base'):
'''
Store a certificate to the given store
name
The certificate to store, this can use local paths
or salt:// paths
store
The store to add the certificate to
saltenv
The salt environment to use, this is ignored if a local
path is specified
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
cert_file = __salt__['cp.cache_file'](name, saltenv)
if cert_file is False:
ret['result'] = False
ret['comment'] += 'Certificate file not found.'
else:
cert_serial = __salt__['certutil.get_cert_serial'](cert_file)
serials = __salt__['certutil.get_stored_cert_serials'](store)
if cert_serial not in serials:
out = __salt__['certutil.add_store'](name, store)
if "successfully" in out:
ret['changes']['added'] = name
else:
ret['result'] = False
ret['comment'] += "Failed to store certificate {0}".format(name)
else:
ret['comment'] += "{0} already stored.".format(name)
return ret | Store a certificate to the given store
name
The certificate to store, this can use local paths
or salt:// paths
store
The store to add the certificate to
saltenv
The salt environment to use, this is ignored if a local
path is specified |
def add_format(self, mimetype, format, requires_context=False):
""" Registers a new format to be used in a graph's serialize call
If you've installed an rdflib serializer plugin, use this
to add it to the content negotiation system
Set requires_context=True if this format requires a context-aware graph
"""
self.formats[mimetype] = format
if not requires_context:
self.ctxless_mimetypes.append(mimetype)
self.all_mimetypes.append(mimetype) | Registers a new format to be used in a graph's serialize call
If you've installed an rdflib serializer plugin, use this
to add it to the content negotiation system
Set requires_context=True if this format requires a context-aware graph |
def urlopen(self, method, url, body=None, headers=None, retries=None,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, chunked=False,
body_pos=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param int body_pos:
Position to seek to in file-like body in the event of a retry or
redirect. Typically this won't need to be set because urllib3 will
auto-populate the value when needed.
:param \\**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
conn = None
# Track whether `conn` needs to be released before
# returning/raising/recursing. Update this variable if necessary, and
# leave `release_conn` constant throughout the function. That way, if
# the function recurses, the original value of `release_conn` will be
# passed down into the recursive call, and its value will be respected.
#
# See issue #651 [1] for details.
#
# [1] <https://github.com/shazow/urllib3/issues/651>
release_this_conn = release_conn
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
# Keep track of whether we cleanly exited the except block. This
# ensures we do proper cleanup in finally.
clean_exit = False
# Rewind body position, if needed. Record current position
# for future rewinds in the event of a redirect/retry.
body_pos = set_file_position(body, body_pos)
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout)
conn.timeout = timeout_obj.connect_timeout
is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
if is_new_proxy_conn:
self._prepare_proxy(conn)
# Make the request on the httplib connection object.
httplib_response = self._make_request(conn, method, url,
timeout=timeout_obj,
body=body, headers=headers,
chunked=chunked)
# If we're going to release the connection in ``finally:``, then
# the response doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = conn if not release_conn else None
# Pass method to Response for length checking
response_kw['request_method'] = method
# Import httplib's response into our own wrapper object
response = self.ResponseCls.from_httplib(httplib_response,
pool=self,
connection=response_conn,
retries=retries,
**response_kw)
# Everything went great!
clean_exit = True
except queue.Empty:
# Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.")
except (TimeoutError, HTTPException, SocketError, ProtocolError,
BaseSSLError, SSLError, CertificateError) as e:
# Discard the connection for these exceptions. It will be
# replaced during the next _get_conn() call.
clean_exit = False
if isinstance(e, (BaseSSLError, CertificateError)):
e = SSLError(e)
elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
e = ProxyError('Cannot connect to proxy.', e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError('Connection aborted.', e)
retries = retries.increment(method, url, error=e, _pool=self,
_stacktrace=sys.exc_info()[2])
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if not clean_exit:
# We hit some kind of exception, handled or otherwise. We need
# to throw the connection away unless explicitly told not to.
# Close the connection, set the variable to None, and make sure
# we put the None back in the pool to avoid leaking it.
conn = conn and conn.close()
release_this_conn = True
if release_this_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning("Retrying (%r) after connection "
"broken by '%r': %s", retries, err, url)
return self.urlopen(method, url, body, headers, retries,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, body_pos=body_pos,
**response_kw)
def drain_and_release_conn(response):
try:
# discard any remaining response body, the connection will be
# released back to the pool once the entire response is read
response.read()
except (TimeoutError, HTTPException, SocketError, ProtocolError,
BaseSSLError, SSLError) as e:
pass
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
# Drain and release the connection for this response, since
# we're not returning it to be released manually.
drain_and_release_conn(response)
raise
return response
# drain and return the connection to the pool before recursing
drain_and_release_conn(response)
retries.sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(
method, redirect_location, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, body_pos=body_pos,
**response_kw)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.getheader('Retry-After'))
if retries.is_retry(method, response.status, has_retry_after):
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
# Drain and release the connection for this response, since
# we're not returning it to be released manually.
drain_and_release_conn(response)
raise
return response
# drain and return the connection to the pool before recursing
drain_and_release_conn(response)
retries.sleep(response)
log.debug("Retry: %s", url)
return self.urlopen(
method, url, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn,
body_pos=body_pos, **response_kw)
return response | Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param int body_pos:
Position to seek to in file-like body in the event of a retry or
redirect. Typically this won't need to be set because urllib3 will
auto-populate the value when needed.
:param \\**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib` |
def create_permissions_from_tuples(model, codename_tpls):
"""Creates custom permissions on model "model".
"""
if codename_tpls:
model_cls = django_apps.get_model(model)
content_type = ContentType.objects.get_for_model(model_cls)
for codename_tpl in codename_tpls:
app_label, codename, name = get_from_codename_tuple(
codename_tpl, model_cls._meta.app_label
)
try:
Permission.objects.get(codename=codename, content_type=content_type)
except ObjectDoesNotExist:
Permission.objects.create(
name=name, codename=codename, content_type=content_type
)
verify_codename_exists(f"{app_label}.{codename}") | Creates custom permissions on model "model". |
def auth(self):
"""Send authorization secret to nsqd."""
self.send(nsq.auth(self.auth_secret))
frame, data = self.read_response()
if frame == nsq.FRAME_TYPE_ERROR:
raise data
try:
response = json.loads(data.decode('utf-8'))
except ValueError:
self.close_stream()
raise errors.NSQException(
'failed to parse AUTH response JSON from nsqd: '
'{!r}'.format(data))
self.on_auth.send(self, response=response)
return response | Send authorization secret to nsqd. |
def __replace_adjective(sentence, counts):
"""Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts:
"""
if sentence is not None:
while sentence.find('#ADJECTIVE') != -1:
sentence = sentence.replace('#ADJECTIVE',
str(__get_adjective(counts)), 1)
if sentence.find('#ADJECTIVE') == -1:
return sentence
return sentence
else:
return sentence | Lets find and replace all instances of #ADJECTIVE
:param _sentence:
:param counts: |
def _maybe_trim_strings(self, array, **keys):
"""
if requested, trim trailing white space from
all string fields in the input array
"""
trim_strings = keys.get('trim_strings', False)
if self.trim_strings or trim_strings:
_trim_strings(array) | if requested, trim trailing white space from
all string fields in the input array |
def update_next_block(self):
""" If the last instruction of this block is a JP, JR or RET (with no
conditions) then the next and goes_to sets just contains a
single block
"""
last = self.mem[-1]
if last.inst not in ('ret', 'jp', 'jr') or last.condition_flag is not None:
return
if last.inst == 'ret':
if self.next is not None:
self.next.delete_from(self)
self.delete_goes(self.next)
return
if last.opers[0] not in LABELS.keys():
__DEBUG__("INFO: %s is not defined. No optimization is done." % last.opers[0], 2)
LABELS[last.opers[0]] = LabelInfo(last.opers[0], 0, DummyBasicBlock(ALL_REGS, ALL_REGS))
n_block = LABELS[last.opers[0]].basic_block
if self.next is n_block:
return
if self.next.prev == self:
# The next basic block is not this one since it ends with a jump
self.next.delete_from(self)
self.delete_goes(self.next)
self.next = n_block
self.next.add_comes_from(self)
self.add_goes_to(self.next) | If the last instruction of this block is a JP, JR or RET (with no
conditions) then the next and goes_to sets just contains a
single block |
async def fetchmany(self, size: int = None) -> Iterable[sqlite3.Row]:
"""Fetch up to `cursor.arraysize` number of rows."""
args = () # type: Tuple[int, ...]
if size is not None:
args = (size,)
return await self._execute(self._cursor.fetchmany, *args) | Fetch up to `cursor.arraysize` number of rows. |
def load_dataset(data_name):
"""Load sentiment dataset."""
if data_name == 'MR' or data_name == 'Subj':
train_dataset, output_size = _load_file(data_name)
vocab, max_len = _build_vocab(data_name, train_dataset, [])
train_dataset, train_data_lengths = _preprocess_dataset(train_dataset, vocab, max_len)
return vocab, max_len, output_size, train_dataset, train_data_lengths
else:
train_dataset, test_dataset, output_size = _load_file(data_name)
vocab, max_len = _build_vocab(data_name, train_dataset, test_dataset)
train_dataset, train_data_lengths = _preprocess_dataset(train_dataset, vocab, max_len)
test_dataset, test_data_lengths = _preprocess_dataset(test_dataset, vocab, max_len)
return vocab, max_len, output_size, train_dataset, train_data_lengths, test_dataset, \
test_data_lengths | Load sentiment dataset. |
def natural_ipv4_netmask(ip, fmt='prefixlen'):
'''
Returns the "natural" mask of an IPv4 address
'''
bits = _ipv4_to_bits(ip)
if bits.startswith('11'):
mask = '24'
elif bits.startswith('1'):
mask = '16'
else:
mask = '8'
if fmt == 'netmask':
return cidr_to_ipv4_netmask(mask)
else:
return '/' + mask | Returns the "natural" mask of an IPv4 address |
def _record(self):
# type: () -> bytes
'''
An internal method to generate a string representing this El Torito
Validation Entry.
Parameters:
None.
Returns:
String representing this El Torito Validation Entry.
'''
return struct.pack(self.FMT, 1, self.platform_id, 0, self.id_string,
self.checksum, 0x55, 0xaa) | An internal method to generate a string representing this El Torito
Validation Entry.
Parameters:
None.
Returns:
String representing this El Torito Validation Entry. |
def compare_tags(self, tags):
''' given a list of tags that the user has specified, return two lists:
matched_tags: tags were found within the current play and match those given
by the user
unmatched_tags: tags that were found within the current play but do not match
any provided by the user '''
# gather all the tags in all the tasks into one list
all_tags = []
for task in self._tasks:
all_tags.extend(task.tags)
# compare the lists of tags using sets and return the matched and unmatched
all_tags_set = set(all_tags)
tags_set = set(tags)
matched_tags = all_tags_set & tags_set
unmatched_tags = all_tags_set - tags_set
return matched_tags, unmatched_tags | given a list of tags that the user has specified, return two lists:
matched_tags: tags were found within the current play and match those given
by the user
unmatched_tags: tags that were found within the current play but do not match
any provided by the user |
def get_inline_instances(self, request, *args, **kwargs):
"""
Create the inlines for the admin, including the placeholder and contentitem inlines.
"""
inlines = super(PlaceholderEditorAdmin, self).get_inline_instances(request, *args, **kwargs)
extra_inline_instances = []
inlinetypes = self.get_extra_inlines()
for InlineType in inlinetypes:
inline_instance = InlineType(self.model, self.admin_site)
extra_inline_instances.append(inline_instance)
return extra_inline_instances + inlines | Create the inlines for the admin, including the placeholder and contentitem inlines. |
def _plot_colorbar(mappable, fig, subplot_spec, max_cbar_height=4):
"""
Plots a vertical color bar based on mappable.
The height of the colorbar is min(figure-height, max_cmap_height)
Parameters
----------
mappable : The image to which the colorbar applies.
fig ; The figure object
subplot_spec : the gridspec subplot. Eg. axs[1,2]
max_cbar_height : `float`
The maximum colorbar height
Returns
-------
color bar ax
"""
width, height = fig.get_size_inches()
if height > max_cbar_height:
# to make the colorbar shorter, the
# ax is split and the lower portion is used.
axs2 = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=subplot_spec,
height_ratios=[height - max_cbar_height, max_cbar_height])
heatmap_cbar_ax = fig.add_subplot(axs2[1])
else:
heatmap_cbar_ax = fig.add_subplot(subplot_spec)
pl.colorbar(mappable, cax=heatmap_cbar_ax)
return heatmap_cbar_ax | Plots a vertical color bar based on mappable.
The height of the colorbar is min(figure-height, max_cmap_height)
Parameters
----------
mappable : The image to which the colorbar applies.
fig ; The figure object
subplot_spec : the gridspec subplot. Eg. axs[1,2]
max_cbar_height : `float`
The maximum colorbar height
Returns
-------
color bar ax |
def add_transaction_clause(self, clause):
"""
Adds a iff clause to this statement
:param clause: The clause that will be added to the iff statement
:type clause: TransactionClause
"""
if not isinstance(clause, TransactionClause):
raise StatementException('only instances of AssignmentClause can be added to statements')
clause.set_context_id(self.context_counter)
self.context_counter += clause.get_context_size()
self.transactions.append(clause) | Adds a iff clause to this statement
:param clause: The clause that will be added to the iff statement
:type clause: TransactionClause |
def create_parser() -> FileAwareParser:
"""
Create a command line parser
:return: parser
"""
parser = FileAwareParser(description="Clear data from FHIR observation fact table", prog="removefacts",
use_defaults=False)
parser.add_argument("-ss", "--sourcesystem", metavar="SOURCE SYSTEM CODE", help="Sourcesystem code")
parser.add_argument("-u", "--uploadid", metavar="UPLOAD IDENTIFIER",
help="Upload identifer -- uniquely identifies this batch", type=int,
nargs='*')
add_connection_args(parser, strong_config_file=False)
parser.add_argument("-p", "--testprefix", metavar="SS PREFIX",
help=f"Sourcesystem_cd prefix for test suite functions (Default: {default_test_prefix}")
parser.add_argument("--testlist", help="List leftover test suite entries", action="store_true")
parser.add_argument("--removetestlist", help="Remove leftover test suite entries", action="store_true")
return parser | Create a command line parser
:return: parser |
def write_extra_data(self, stream: WriteStream) -> None:
"""Writes the param container and string pointer arrays.
Unlike other write_extra_data functions, this can be called before write()."""
if self.params:
stream.align(8)
if self._params_offset_writer:
self._params_offset_writer.write_current_offset(stream)
else:
self._params_offset = stream.tell()
self.params.write(stream)
if self.actions:
stream.align(8)
if self._actions_offset_writer:
self._actions_offset_writer.write_current_offset(stream)
else:
self._actions_offset = stream.tell()
for s in self.actions:
stream.write_string_ref(s.v)
if self.queries:
stream.align(8)
if self._queries_offset_writer:
self._queries_offset_writer.write_current_offset(stream)
else:
self._queries_offset = stream.tell()
for s in self.queries:
stream.write_string_ref(s.v) | Writes the param container and string pointer arrays.
Unlike other write_extra_data functions, this can be called before write(). |
def resolve_imports(self, imports, import_depth, parser=None):
"""Import required ontologies.
"""
if imports and import_depth:
for i in list(self.imports):
try:
if os.path.exists(i) or i.startswith(('http', 'ftp')):
self.merge(Ontology(i, import_depth=import_depth-1, parser=parser))
else: # try to look at neighbouring ontologies
self.merge(Ontology( os.path.join(os.path.dirname(self.path), i),
import_depth=import_depth-1, parser=parser))
except (IOError, OSError, URLError, HTTPError, _etree.ParseError) as e:
warnings.warn("{} occured during import of "
"{}".format(type(e).__name__, i),
ProntoWarning) | Import required ontologies. |
def MGMT_ED_SCAN(self, sAddr, xCommissionerSessionId, listChannelMask, xCount, xPeriod, xScanDuration):
"""send MGMT_ED_SCAN message to a given destinaition.
Args:
sAddr: IPv6 destination address for this message
xCommissionerSessionId: commissioner session id
listChannelMask: a channel array to indicate which channels to be scaned
xCount: number of IEEE 802.15.4 ED Scans (milliseconds)
xPeriod: Period between successive IEEE802.15.4 ED Scans (milliseconds)
xScanDuration: IEEE 802.15.4 ScanDuration to use when performing an IEEE 802.15.4 ED Scan (milliseconds)
Returns:
True: successful to send MGMT_ED_SCAN message.
False: fail to send MGMT_ED_SCAN message
"""
print '%s call MGMT_ED_SCAN' % self.port
channelMask = ''
channelMask = '0x' + self.__convertLongToString(self.__convertChannelMask(listChannelMask))
try:
cmd = 'commissioner energy %s %s %s %s %s' % (channelMask, xCount, xPeriod, xScanDuration, sAddr)
print cmd
return self.__sendCommand(cmd) == 'Done'
except Exception, e:
ModuleHelper.writeintodebuglogger("MGMT_ED_SCAN() error: " + str(e)) | send MGMT_ED_SCAN message to a given destinaition.
Args:
sAddr: IPv6 destination address for this message
xCommissionerSessionId: commissioner session id
listChannelMask: a channel array to indicate which channels to be scaned
xCount: number of IEEE 802.15.4 ED Scans (milliseconds)
xPeriod: Period between successive IEEE802.15.4 ED Scans (milliseconds)
xScanDuration: IEEE 802.15.4 ScanDuration to use when performing an IEEE 802.15.4 ED Scan (milliseconds)
Returns:
True: successful to send MGMT_ED_SCAN message.
False: fail to send MGMT_ED_SCAN message |
def create_job(db, datadir):
"""
Create job for the given user, return it.
:param db:
a :class:`openquake.server.dbapi.Db` instance
:param datadir:
Data directory of the user who owns/started this job.
:returns:
the job ID
"""
calc_id = get_calc_id(db, datadir) + 1
job = dict(id=calc_id, is_running=1, description='just created',
user_name='openquake', calculation_mode='to be set',
ds_calc_dir=os.path.join('%s/calc_%s' % (datadir, calc_id)))
return db('INSERT INTO job (?S) VALUES (?X)',
job.keys(), job.values()).lastrowid | Create job for the given user, return it.
:param db:
a :class:`openquake.server.dbapi.Db` instance
:param datadir:
Data directory of the user who owns/started this job.
:returns:
the job ID |
def devices(self):
"""Wait for new DS4 devices to appear."""
context = Context()
existing_devices = context.list_devices(subsystem="hidraw")
future_devices = self._get_future_devices(context)
for hidraw_device in itertools.chain(existing_devices, future_devices):
hid_device = hidraw_device.parent
if hid_device.subsystem != "hid":
continue
cls = HID_DEVICES.get(hid_device.get("HID_NAME"))
if not cls:
continue
for child in hid_device.parent.children:
event_device = child.get("DEVNAME", "")
if event_device.startswith("/dev/input/event"):
break
else:
continue
try:
device_addr = hid_device.get("HID_UNIQ", "").upper()
if device_addr:
device_name = "{0} {1}".format(device_addr,
hidraw_device.sys_name)
else:
device_name = hidraw_device.sys_name
yield cls(name=device_name,
addr=device_addr,
type=cls.__type__,
hidraw_device=hidraw_device.device_node,
event_device=event_device)
except DeviceError as err:
self.logger.error("Unable to open DS4 device: {0}", err) | Wait for new DS4 devices to appear. |
def cut_mechanisms(self):
"""The mechanisms of this system that are currently cut.
Note that although ``cut_indices`` returns micro indices, this
returns macro mechanisms.
Yields:
tuple[int]
"""
for mechanism in utils.powerset(self.node_indices, nonempty=True):
micro_mechanism = self.macro2micro(mechanism)
if self.cut.splits_mechanism(micro_mechanism):
yield mechanism | The mechanisms of this system that are currently cut.
Note that although ``cut_indices`` returns micro indices, this
returns macro mechanisms.
Yields:
tuple[int] |
def resource_filename(package_or_requirement, resource_name):
"""
Similar to pkg_resources.resource_filename but if the resource it not found via pkg_resources
it also looks in a predefined list of paths in order to find the resource
:param package_or_requirement: the module in which the resource resides
:param resource_name: the name of the resource
:return: the path to the resource
:rtype: str
"""
if pkg_resources.resource_exists(package_or_requirement, resource_name):
return pkg_resources.resource_filename(package_or_requirement, resource_name)
path = _search_in_share_folders(package_or_requirement, resource_name)
if path:
return path
raise RuntimeError("Resource {} not found in {}".format(package_or_requirement, resource_name)) | Similar to pkg_resources.resource_filename but if the resource it not found via pkg_resources
it also looks in a predefined list of paths in order to find the resource
:param package_or_requirement: the module in which the resource resides
:param resource_name: the name of the resource
:return: the path to the resource
:rtype: str |
def build_model(self):
'''Find out the type of model configured and dispatch the request to the appropriate method'''
if self.model_config['model-type']:
return self.build_red()
elif self.model_config['model-type']:
return self.buidl_hred()
else:
raise Error("Unrecognized model type '{}'".format(self.model_config['model-type'])) | Find out the type of model configured and dispatch the request to the appropriate method |
def check_config(conf):
'''Type and boundary check'''
if 'fmode' in conf and not isinstance(conf['fmode'], string_types):
raise TypeError(TAG + ": `fmode` must be a string")
if 'dmode' in conf and not isinstance(conf['dmode'], string_types):
raise TypeError(TAG + ": `dmode` must be a string")
if 'depth' in conf:
if not isinstance(conf['depth'], int):
raise TypeError(TAG + ": `depth` must be an int")
if conf['depth'] < 0:
raise ValueError(TAG + ": `depth` must be a positive number")
if 'hash_alg' in conf:
if not isinstance(conf['hash_alg'], string_types):
raise TypeError(TAG + ": `hash_alg` must be a string")
if conf['hash_alg'] not in ACCEPTED_HASH_ALG:
raise ValueError(TAG + ": `hash_alg` must be one of " + str(ACCEPTED_HASH_ALG)) | Type and boundary check |
def Authenticate(self, app_id, challenge, registered_keys):
"""Authenticates app_id with the security key.
Executes the U2F authentication/signature flow with the security key.
Args:
app_id: The app_id to register the security key against.
challenge: Server challenge passed to the security key as a bytes object.
registered_keys: List of keys already registered for this app_id+user.
Returns:
SignResponse with client_data, key_handle, and signature_data. The client
data is an object, while the signature_data is encoded in FIDO U2F binary
format.
Raises:
U2FError: There was some kind of problem with authentication (e.g.
there was a timeout while waiting for the test of user presence.)
"""
client_data = model.ClientData(model.ClientData.TYP_AUTHENTICATION,
challenge, self.origin)
app_param = self.InternalSHA256(app_id)
challenge_param = self.InternalSHA256(client_data.GetJson())
num_invalid_keys = 0
for key in registered_keys:
try:
if key.version != u'U2F_V2':
continue
for _ in range(30):
try:
resp = self.security_key.CmdAuthenticate(challenge_param, app_param,
key.key_handle)
return model.SignResponse(key.key_handle, resp, client_data)
except errors.TUPRequiredError:
self.security_key.CmdWink()
time.sleep(0.5)
except errors.InvalidKeyHandleError:
num_invalid_keys += 1
continue
except errors.HardwareError as e:
raise errors.U2FError(errors.U2FError.BAD_REQUEST, e)
if num_invalid_keys == len(registered_keys):
# In this case, all provided keys were invalid.
raise errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE)
# In this case, the TUP was not pressed.
raise errors.U2FError(errors.U2FError.TIMEOUT) | Authenticates app_id with the security key.
Executes the U2F authentication/signature flow with the security key.
Args:
app_id: The app_id to register the security key against.
challenge: Server challenge passed to the security key as a bytes object.
registered_keys: List of keys already registered for this app_id+user.
Returns:
SignResponse with client_data, key_handle, and signature_data. The client
data is an object, while the signature_data is encoded in FIDO U2F binary
format.
Raises:
U2FError: There was some kind of problem with authentication (e.g.
there was a timeout while waiting for the test of user presence.) |
def exists(self, uri):
"""Method returns true is the entity exists in the Repository,
false, otherwise
Args:
uri(str): Entity URI
Returns:
bool
"""
##entity_uri = "/".join([self.base_url, entity_id])
try:
urllib.request.urlopen(uri)
return True
except urllib.error.HTTPError:
return False | Method returns true is the entity exists in the Repository,
false, otherwise
Args:
uri(str): Entity URI
Returns:
bool |
def get_parent_families(self, family_id):
"""Gets the parent families of the given ``id``.
arg: family_id (osid.id.Id): the ``Id`` of the ``Family`` to
query
return: (osid.relationship.FamilyList) - the parent families of
the ``id``
raise: NotFound - a ``Family`` identified by ``Id is`` not
found
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_parent_bins
if self._catalog_session is not None:
return self._catalog_session.get_parent_catalogs(catalog_id=family_id)
return FamilyLookupSession(
self._proxy,
self._runtime).get_families_by_ids(
list(self.get_parent_family_ids(family_id))) | Gets the parent families of the given ``id``.
arg: family_id (osid.id.Id): the ``Id`` of the ``Family`` to
query
return: (osid.relationship.FamilyList) - the parent families of
the ``id``
raise: NotFound - a ``Family`` identified by ``Id is`` not
found
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def is_valid(edtf_candidate):
"""isValid takes a candidate date and returns if it is valid or not"""
if (
isLevel0(edtf_candidate) or
isLevel1(edtf_candidate) or
isLevel2(edtf_candidate)
):
if '/' in edtf_candidate:
return is_valid_interval(edtf_candidate)
else:
return True
else:
return False | isValid takes a candidate date and returns if it is valid or not |
Subsets and Splits