text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def change_path_prefix(self, path, old_prefix, new_prefix, app_name):
"""Change path prefix and include app name."""
relative_path = os.path.relpath(path, old_prefix)
return os.path.join(new_prefix, app_name, relative_path) | 0.008097 |
def _extract_links_from_asset_tags_in_text(self, text):
"""
Scan the text and extract asset tags and links to corresponding
files.
@param text: Page text.
@type text: str
@return: @see CourseraOnDemand._extract_links_from_text
"""
# Extract asset tags from instructions text
asset_tags_map = self._extract_asset_tags(text)
ids = list(iterkeys(asset_tags_map))
if not ids:
return {}
# asset tags contain asset names and ids. We need to make another
# HTTP request to get asset URL.
asset_urls = self._extract_asset_urls(ids)
supplement_links = {}
# Build supplement links, providing nice titles along the way
for asset in asset_urls:
title = clean_filename(
asset_tags_map[asset['id']]['name'],
self._unrestricted_filenames)
extension = clean_filename(
asset_tags_map[asset['id']]['extension'].strip(),
self._unrestricted_filenames)
url = asset['url'].strip()
if extension not in supplement_links:
supplement_links[extension] = []
supplement_links[extension].append((url, title))
return supplement_links | 0.001534 |
def get_elements(self, dat, multiple=False, retry=1, **kwargs):
"""
映射 dat:dict => ``find_element_by_dat.key`` 来查找元素
:param dat: a dict like {'name': <the ele name>}
:type dat: dict
:param multiple:
:type multiple: bool
:param retry:
:type retry: int
:return:
:rtype:
"""
if not isinstance(dat, dict):
return dat
while retry > 0:
for k, v in dat.items():
try:
# 获取成功, 直接返回
return getattr(self.driver, gen_find_method(k, multiple, kwargs.get('extra_maps')))(v)
except WebDriverException as _:
if not kwargs.get('skip_log'):
log.warning('[{}]: {}'.format(dat, _))
finally:
retry -= 1
raise ValueError(dat) | 0.00333 |
def _generate_route_method_decl(
self, namespace, route, arg_data_type, request_binary_body,
method_name_suffix='', extra_args=None):
"""Generates the method prototype for a route."""
args = ['self']
if extra_args:
args += extra_args
if request_binary_body:
args.append('f')
if is_struct_type(arg_data_type):
for field in arg_data_type.all_fields:
if is_nullable_type(field.data_type):
args.append('{}=None'.format(field.name))
elif field.has_default:
# TODO(kelkabany): Decide whether we really want to set the
# default in the argument list. This will send the default
# over the wire even if it isn't overridden. The benefit is
# it locks in a default even if it is changed server-side.
if is_user_defined_type(field.data_type):
ns = field.data_type.namespace
else:
ns = None
arg = '{}={}'.format(
field.name,
self._generate_python_value(ns, field.default))
args.append(arg)
else:
args.append(field.name)
elif is_union_type(arg_data_type):
args.append('arg')
elif not is_void_type(arg_data_type):
raise AssertionError('Unhandled request type: %r' %
arg_data_type)
method_name = fmt_func(route.name + method_name_suffix, version=route.version)
namespace_name = fmt_underscores(namespace.name)
self.generate_multiline_list(args, 'def {}_{}'.format(namespace_name, method_name), ':') | 0.002192 |
def rows(self, table, cols):
'''
Fetches rows from the local cache or from the db if there's no cache.
:param table: table name to select
:cols: list of columns to select
:return: list of rows
:rtype: list
'''
if self.orng_tables:
data = []
for ex in self.orng_tables[table]:
data.append([ex[str(col)] for col in cols])
return data
else:
return self.fetch(table, cols) | 0.003839 |
def parseJSON(js):
"""
{
kv_type : "",
type : "",
actors : <Actors list>
[
{
actorName : <String>,
formula: <String>,
events: ["->b", "b->"],
trace: [],
speed: 1,2,3...
}
]
}
:param json:
:return:
"""
decoded = json.JSONDecoder().decode(js)
actors = decoded.get("actors")
if actors is None:
raise Exception("No actors found in the system !")
s = System()
for a in actors:
# Getting actor info
a_name = a.get("name")
a_formula = a.get("formula")
a_trace = a.get("trace")
sa_events = a.get("events")
a_events = []
a_speed = 1 if a.get("speed") is None else int(a["speed"])
# Parsing actor info
for e in sa_events:
tmp = e.split("|")
tmp2 = []
for x in tmp:
tmp2.append(Actor.Event.parse(x))
a_events.append(tmp2)
a_formula = eval(a_formula)
a_trace = Trace.parse(a_trace)
# Creating the actor
actor = Actor(name=a_name, formula=a_formula, trace=a_trace, events=a_events, speed=a_speed)
# Add actor to the system
s.add_actors(actor)
s.generate_monitors()
return s | 0.001923 |
def columns_classes(self):
'''returns columns count'''
md = 12 / self.objects_per_row
sm = None
if self.objects_per_row > 2:
sm = 12 / (self.objects_per_row / 2)
return md, (sm or md), 12 | 0.008368 |
def convert_relational(relational):
"""Convert all inequalities to >=0 form.
"""
rel = relational.rel_op
if rel in ['==', '>=', '>']:
return relational.lhs-relational.rhs
elif rel in ['<=', '<']:
return relational.rhs-relational.lhs
else:
raise Exception("The relational operation ' + rel + ' is not "
"implemented!") | 0.002571 |
def _genBgTerm_fromXX(self,vTot,vCommon,XX,a=None,c=None):
"""
generate background term from SNPs
Args:
vTot: variance of Yc+Yi
vCommon: variance of Yc
XX: kinship matrix
a: common scales, it can be set for debugging purposes
c: indipendent scales, it can be set for debugging purposes
"""
vSpecific = vTot-vCommon
SP.random.seed(0)
if c==None: c = SP.randn(self.P)
XX += 1e-3 * SP.eye(XX.shape[0])
L = LA.cholesky(XX,lower=True)
# common effect
R = self.genWeights(self.N,self.P)
A = self.genTraitEffect()
if a is not None: A[0,:] = a
Yc = SP.dot(L,SP.dot(R,A))
Yc*= SP.sqrt(vCommon)/SP.sqrt(Yc.var(0).mean())
# specific effect
R = SP.randn(self.N,self.P)
Yi = SP.dot(L,SP.dot(R,SP.diag(c)))
Yi*= SP.sqrt(vSpecific)/SP.sqrt(Yi.var(0).mean())
return Yc, Yi | 0.021407 |
def is_line_layer(layer):
"""Check if a QGIS layer is vector and its geometries are lines.
:param layer: A vector layer.
:type layer: QgsVectorLayer, QgsMapLayer
:returns: True if the layer contains lines, otherwise False.
:rtype: bool
"""
try:
return (layer.type() == QgsMapLayer.VectorLayer) and (
layer.geometryType() == QgsWkbTypes.LineGeometry)
except AttributeError:
return False | 0.002232 |
def add_request_type_view(request):
''' View to add a new request type. Restricted to presidents and superadmins. '''
form = RequestTypeForm(request.POST or None)
if form.is_valid():
rtype = form.save()
messages.add_message(request, messages.SUCCESS,
MESSAGES['REQUEST_TYPE_ADDED'].format(typeName=rtype.name))
return HttpResponseRedirect(reverse('managers:manage_request_types'))
return render_to_response('edit_request_type.html', {
'page_name': "Admin - Add Request Type",
'request_types': RequestType.objects.all(),
'form': form,
}, context_instance=RequestContext(request)) | 0.004412 |
def _get_team_names(self, game):
"""
Find the names and abbreviations for both teams in a game.
Using the HTML contents in a boxscore, find the name and abbreviation
for both teams and determine wether or not this is a matchup between
two Division-I teams.
Parameters
----------
game : PyQuery object
A PyQuery object of a single boxscore containing information about
both teams.
Returns
-------
tuple
Returns a tuple containing the names and abbreviations of both
teams in the following order: Away Name, Away Abbreviation, Away
Score, Away Ranking, Home Name, Home Abbreviation, Home Score, Home
Ranking, a boolean which evaluates to True if either team does not
participate in Division-I athletics, and a boolean which evalutes
to True if either team is currently ranked.
"""
# Grab the first <td...> tag for each <tr> row in the boxscore,
# representing the name for each participating team.
links = [g('td:first') for g in game('tr').items()]
# The away team is the first link in the boxscore
away = links[0]
# The home team is the last (3rd) link in the boxscore
home = links[-1]
non_di = False
scores = re.findall(r'<td class="right">\d+</td>', str(game))
away_score = None
home_score = None
# If the game hasn't started or hasn't been updated on sports-reference
# yet, no score will be shown and therefore can't be parsed.
if len(scores) == 2:
away_score = self._get_score(scores[0])
home_score = self._get_score(scores[1])
away_name, away_abbr, away_non_di = self._get_name(away('a'))
home_name, home_abbr, home_non_di = self._get_name(home('a'))
non_di = away_non_di or home_non_di
away_rank = self._get_rank(away)
home_rank = self._get_rank(home)
top_25 = bool(away_rank or home_rank)
return (away_name, away_abbr, away_score, away_rank, home_name,
home_abbr, home_score, home_rank, non_di, top_25) | 0.000904 |
def decrypt_file(self, filename):
'''
Decrypt File
Args:
filename: Pass the filename to encrypt.
Returns:
No return.
'''
if not os.path.exists(filename):
print "Invalid filename %s. Does not exist" % filename
return
if self.vault_password is None:
print "ENV Variable PYANSI_VAULT_PASSWORD not set"
return
if not self.is_file_encrypted(filename):
# No need to do anything.
return
cipher = 'AES256'
vaulteditor = VaultEditor(cipher, self.vault_password, filename)
vaulteditor.decrypt_file() | 0.002959 |
def read(self, symbol, as_of=None, date_range=None, from_version=None, allow_secondary=None, **kwargs):
"""
Read data for the named symbol. Returns a VersionedItem object with
a data and metdata element (as passed into write).
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or `int` or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
date_range: `arctic.date.DateRange`
DateRange to read data for. Applies to Pandas data, with a DateTime index
returns only the part of the data that falls in the DateRange.
allow_secondary : `bool` or `None`
Override the default behavior for allowing reads from secondary members of a cluster:
`None` : use the settings from the top-level `Arctic` object used to query this version store.
`True` : allow reads from secondary members
`False` : only allow reads from primary members
Returns
-------
VersionedItem namedtuple which contains a .data and .metadata element
"""
try:
read_preference = self._read_preference(allow_secondary)
_version = self._read_metadata(symbol, as_of=as_of, read_preference=read_preference)
return self._do_read(symbol, _version, from_version,
date_range=date_range, read_preference=read_preference, **kwargs)
except (OperationFailure, AutoReconnect) as e:
# Log the exception so we know how often this is happening
log_exception('read', e, 1)
# If we've failed to read from the secondary, then it's possible the
# secondary has lagged. In this case direct the query to the primary.
_version = mongo_retry(self._read_metadata)(symbol, as_of=as_of,
read_preference=ReadPreference.PRIMARY)
return self._do_read_retry(symbol, _version, from_version,
date_range=date_range,
read_preference=ReadPreference.PRIMARY,
**kwargs)
except Exception as e:
log_exception('read', e, 1)
raise | 0.004677 |
def binary_dilation(x, radius=3):
"""Return fast binary morphological dilation of an image.
see `skimage.morphology.binary_dilation <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.binary_dilation>`__.
Parameters
-----------
x : 2D array
A binary image.
radius : int
For the radius of mask.
Returns
-------
numpy.array
A processed binary image.
"""
mask = disk(radius)
x = _binary_dilation(x, selem=mask)
return x | 0.00381 |
def _make_dataset(cls, coords):
"""Construct a new dataset given the coordinates.
"""
class Slice(cls._SliceType):
extra_coords = coords
Slice.__name__ = '%s.slice(%s)' % (
cls.__name__,
', '.join('%s=%r' % item for item in coords.items()),
)
return Slice | 0.005882 |
def readGraph(edgeList, nodeList = None, directed = False, idKey = 'ID', eSource = 'From', eDest = 'To'):
"""Reads the files given by _edgeList_ and _nodeList_ and creates a networkx graph for the files.
This is designed only for the files produced by metaknowledge and is meant to be the reverse of [writeGraph()](#metaknowledge.graphHelpers.writeGraph), if this does not produce the desired results the networkx builtin [networkx.read_edgelist()](https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.readwrite.edgelist.read_edgelist.html) could be tried as it is aimed at a more general usage.
The read edge list format assumes the column named _eSource_ (default `'From'`) is the source node, then the column _eDest_ (default `'To'`) givens the destination and all other columns are attributes of the edges, e.g. weight.
The read node list format assumes the column _idKey_ (default `'ID'`) is the ID of the node for the edge list and the resulting network. All other columns are considered attributes of the node, e.g. count.
**Note**: If the names of the columns do not match those given to **readGraph()** a `KeyError` exception will be raised.
**Note**: If nodes appear in the edgelist but not the nodeList they will be created silently with no attributes.
# Parameters
_edgeList_ : `str`
> a string giving the path to the edge list file
_nodeList_ : `optional [str]`
> default `None`, a string giving the path to the node list file
_directed_ : `optional [bool]`
> default `False`, if `True` the produced network is directed from _eSource_ to _eDest_
_idKey_ : `optional [str]`
> default `'ID'`, the name of the ID column in the node list
_eSource_ : `optional [str]`
> default `'From'`, the name of the source column in the edge list
_eDest_ : `optional [str]`
> default `'To'`, the name of the destination column in the edge list
# Returns
`networkx Graph`
> the graph described by the input files
"""
progArgs = (0, "Starting to reading graphs")
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
if directed:
grph = nx.DiGraph()
else:
grph = nx.Graph()
if nodeList:
PBar.updateVal(0, "Reading " + nodeList)
f = open(os.path.expanduser(os.path.abspath(nodeList)))
nFile = csv.DictReader(f)
for line in nFile:
vals = line
ndID = vals[idKey]
del vals[idKey]
if len(vals) > 0:
grph.add_node(ndID, **vals)
else:
grph.add_node(ndID)
f.close()
PBar.updateVal(.25, "Reading " + edgeList)
f = open(os.path.expanduser(os.path.abspath(edgeList)))
eFile = csv.DictReader(f)
for line in eFile:
vals = line
eFrom = vals[eSource]
eTo = vals[eDest]
del vals[eSource]
del vals[eDest]
if len(vals) > 0:
grph.add_edge(eFrom, eTo, **vals)
else:
grph.add_edge(eFrom, eTo)
PBar.finish("{} nodes and {} edges found".format(len(grph.nodes()), len(grph.edges())))
f.close()
return grph | 0.006382 |
def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if headers else fmt.without_header_hide
pad = fmt.padding
headerrow = fmt.headerrow if fmt.headerrow else fmt.datarow
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(colwidths, pad, *fmt.lineabove))
if headers:
lines.append(_build_row(headers, pad, *headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
begin, fill, sep, end = fmt.linebelowheader
if fmt.usecolons:
segs = [
_line_segment_with_colons(fmt.linebelowheader, a, w + 2 * pad)
for w, a in zip(colwidths, colaligns)]
lines.append(_build_row(segs, 0, begin, sep, end))
else:
lines.append(_build_line(colwidths, pad, *fmt.linebelowheader))
if rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in rows[:-1]:
lines.append(_build_row(row, pad, *fmt.datarow))
lines.append(_build_line(colwidths, pad, *fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(rows[-1], pad, *fmt.datarow))
else:
for row in rows:
lines.append(_build_row(row, pad, *fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(colwidths, pad, *fmt.linebelow))
return "\n".join(lines) | 0.000636 |
def draw_name(self, context, transparency, only_calculate_size=False):
"""Draws the name of the port
Offers the option to only calculate the size of the name.
:param context: The context to draw on
:param transparency: The transparency of the text
:param only_calculate_size: Whether to only calculate the size
:return: Size of the name
:rtype: float, float
"""
c = context
cairo_context = c
if isinstance(c, CairoBoundingBoxContext):
cairo_context = c._cairo
# c.set_antialias(Antialias.GOOD)
side_length = self.port_side_size
layout = PangoCairo.create_layout(cairo_context)
font_name = constants.INTERFACE_FONT
font_size = gap_draw_helper.FONT_SIZE
font = FontDescription(font_name + " " + str(font_size))
layout.set_font_description(font)
layout.set_text(self.name, -1)
ink_extents, logical_extents = layout.get_extents()
extents = [extent / float(SCALE) for extent in [logical_extents.x, logical_extents.y,
logical_extents.width, logical_extents.height]]
real_name_size = extents[2], extents[3]
desired_height = side_length * 0.75
scale_factor = real_name_size[1] / desired_height
# Determine the size of the text, increase the width to have more margin left and right of the text
margin = side_length / 4.
name_size = real_name_size[0] / scale_factor, desired_height
name_size_with_margin = name_size[0] + margin * 2, name_size[1] + margin * 2
# Only the size is required, stop here
if only_calculate_size:
return name_size_with_margin
# Current position is the center of the port rectangle
c.save()
if self.side is SnappedSide.RIGHT or self.side is SnappedSide.LEFT:
c.rotate(deg2rad(-90))
c.rel_move_to(-name_size[0] / 2, -name_size[1] / 2)
c.scale(1. / scale_factor, 1. / scale_factor)
c.rel_move_to(-extents[0], -extents[1])
c.set_source_rgba(*gap_draw_helper.get_col_rgba(self.text_color, transparency))
PangoCairo.update_layout(cairo_context, layout)
PangoCairo.show_layout(cairo_context, layout)
c.restore()
return name_size_with_margin | 0.002942 |
def get_closest(self, sma):
"""
Return the `~photutils.isophote.Isophote` instance that has the
closest semimajor axis length to the input semimajor axis.
Parameters
----------
sma : float
The semimajor axis length.
Returns
-------
isophote : `~photutils.isophote.Isophote` instance
The isophote with the closest semimajor axis value.
"""
index = (np.abs(self.sma - sma)).argmin()
return self._list[index] | 0.003788 |
def create_domain_smarthost(self, domainid, data):
"""Create a domain smarthost"""
return self.api_call(
ENDPOINTS['domainsmarthosts']['new'],
dict(domainid=domainid),
body=data) | 0.008696 |
def get(self, key):
"""
Get an entry from the cache by key.
@raise KeyError: if the given key is not present in the cache.
@raise CacheFault: (a L{KeyError} subclass) if the given key is present
in the cache, but the value it points to is gone.
"""
o = self.data[key]()
if o is None:
# On CPython, the weakref callback will always(?) run before any
# other code has a chance to observe that the weakref is broken;
# and since the callback removes the item from the dict, this
# branch of code should never run. However, on PyPy (and possibly
# other Python implementations), the weakref callback does not run
# immediately, thus we may be able to observe this intermediate
# state. Should this occur, we remove the dict item ourselves,
# and raise CacheFault (which is a KeyError subclass).
del self.data[key]
raise CacheFault(
"FinalizingCache has %r but its value is no more." % (key,))
log.msg(interface=iaxiom.IStatEvent, stat_cache_hits=1, key=key)
return o | 0.001689 |
def get(self, request, bot_id, format=None):
"""
Get list of states
---
serializer: StateSerializer
responseMessages:
- code: 401
message: Not authenticated
"""
return super(StateList, self).get(request, bot_id, format) | 0.006645 |
def prepare_backend_environ(self, host, method, relative_url, headers, body,
source_ip, port):
"""Build an environ object for the backend to consume.
Args:
host: A string containing the host serving the request.
method: A string containing the HTTP method of the request.
relative_url: A string containing path and query string of the request.
headers: A list of (key, value) tuples where key and value are both
strings.
body: A string containing the request body.
source_ip: The source IP address for the request.
port: The port to which to direct the request.
Returns:
An environ object with all the information necessary for the backend to
process the request.
"""
if isinstance(body, unicode):
body = body.encode('ascii')
url = urlparse.urlsplit(relative_url)
if port != 80:
host = '%s:%s' % (host, port)
else:
host = host
environ = {'CONTENT_LENGTH': str(len(body)),
'PATH_INFO': url.path,
'QUERY_STRING': url.query,
'REQUEST_METHOD': method,
'REMOTE_ADDR': source_ip,
'SERVER_NAME': host,
'SERVER_PORT': str(port),
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.errors': cStringIO.StringIO(),
'wsgi.multithread': True,
'wsgi.multiprocess': True,
'wsgi.input': cStringIO.StringIO(body)}
util.put_headers_in_environ(headers, environ)
environ['HTTP_HOST'] = host
return environ | 0.002969 |
def add_word(self, word_id, url='https://api.shanbay.com/bdc/learning/'):
"""添加单词"""
data = {
'id': word_id
}
return self._request(url, method='post', data=data).json() | 0.009434 |
def get_mst(points):
"""
Parameters
----------
points : list of points (geometry.Point)
The first element of the list is the center of the bounding box of the
first stroke, the second one belongs to the seconds stroke, ...
Returns
-------
mst : square matrix
0 nodes the edges are not connected, > 0 means they are connected
"""
graph = Graph()
for point in points:
graph.add_node(point)
graph.generate_euclidean_edges()
matrix = scipy.sparse.csgraph.minimum_spanning_tree(graph.w)
mst = matrix.toarray().astype(int)
# returned matrix is not symmetrical! make it symmetrical
for i in range(len(mst)):
for j in range(len(mst)):
if mst[i][j] > 0:
mst[j][i] = mst[i][j]
if mst[j][i] > 0:
mst[i][j] = mst[j][i]
return mst | 0.001139 |
def shape_vecs(*args):
'''Reshape all ndarrays with ``shape==(n,)`` to ``shape==(n,1)``.
Recognizes ndarrays and ignores all others.'''
ret_args = []
flat_vecs = True
for arg in args:
if type(arg) is numpy.ndarray:
if len(arg.shape) == 1:
arg = shape_vec(arg)
else:
flat_vecs = False
ret_args.append(arg)
return flat_vecs, ret_args | 0.002336 |
def SetRowsCustomProperties(self, rows, custom_properties):
"""Sets the custom properties for given row(s).
Can accept a single row or an iterable of rows.
Sets the given custom properties for all specified rows.
Args:
rows: The row, or rows, to set the custom properties for.
custom_properties: A string to string dictionary of custom properties to
set for all rows.
"""
if not hasattr(rows, "__iter__"):
rows = [rows]
for row in rows:
self.__data[row] = (self.__data[row][0], custom_properties) | 0.005396 |
def fill_extents(self):
"""Computes a bounding box in user-space coordinates
covering the area that would be affected, (the "inked" area),
by a :meth:`fill` operation given the current path and fill parameters.
If the current path is empty,
returns an empty rectangle ``(0, 0, 0, 0)``.
Surface dimensions and clipping are not taken into account.
Contrast with :meth:`path_extents` which is similar,
but returns non-zero extents for some paths with no inked area,
(such as a simple line segment).
Note that :meth:`fill_extents` must necessarily do more work
to compute the precise inked areas in light of the fill rule,
so :meth:`path_extents` may be more desirable for sake of performance
if the non-inked path extents are desired.
See :meth:`fill`, :meth:`set_fill_rule` and :meth:`fill_preserve`.
:return:
A ``(x1, y1, x2, y2)`` tuple of floats:
the left, top, right and bottom of the resulting extents,
respectively.
"""
extents = ffi.new('double[4]')
cairo.cairo_fill_extents(
self._pointer, extents + 0, extents + 1, extents + 2, extents + 3)
self._check_status()
return tuple(extents) | 0.001535 |
def _remove_existing(self):
"""
When a change is detected, remove keys that existed in the old file.
"""
for key in self._keys:
if key in os.environ:
LOG.debug('%r: removing old key %r', self, key)
del os.environ[key]
self._keys = [] | 0.006309 |
def has(self, decorated_function, *args, **kwargs):
""" Check if there is a result for given function
:param decorated_function: called function (original)
:param args: args with which function is called
:param kwargs: kwargs with which function is called
:return: None
"""
return self.get_cache(decorated_function, *args, **kwargs).has_value | 0.02514 |
def iter_files(self):
"""Iterate over files."""
# file_iter may be a callable or an iterator
if callable(self.file_iter):
return self.file_iter()
return iter(self.file_iter) | 0.009217 |
def get_packages(dname, pkgname=None, results=None, ignore=None, parent=None):
"""
Get all packages which are under dname. This is necessary for
Python 2.2's distutils. Pretty similar arguments to getDataFiles,
including 'parent'.
"""
parent = parent or ""
prefix = []
if parent:
prefix = [parent]
bname = os.path.basename(dname)
ignore = ignore or []
if bname in ignore:
return []
if results is None:
results = []
if pkgname is None:
pkgname = []
subfiles = os.listdir(dname)
abssubfiles = [os.path.join(dname, x) for x in subfiles]
if '__init__.py' in subfiles:
results.append(prefix + pkgname + [bname])
for subdir in filter(os.path.isdir, abssubfiles):
get_packages(subdir, pkgname=pkgname + [bname],
results=results, ignore=ignore,
parent=parent)
res = ['.'.join(result) for result in results]
return res | 0.00303 |
def find_by_user(user_id, _connection=None, page_size=100, page_number=0,
sort_by=enums.DEFAULT_SORT_BY, sort_order=enums.DEFAULT_SORT_ORDER):
"""
List all videos uploaded by a certain user.
"""
return connection.ItemResultSet('find_videos_by_user_id',
Video, _connection, page_size, page_number, sort_by, sort_order,
user_id=user_id) | 0.012563 |
def localize(date_time, time_zone):
"""Returns a datetime adjusted to a timezone:
* If dateTime is a naive datetime (datetime with no timezone information), timezone information is added but date
and time remains the same.
* If dateTime is not a naive datetime, a datetime object with new tzinfo attribute is returned, adjusting the date
and time data so the result is the same UTC time.
"""
if datetime_is_naive(date_time):
ret = time_zone.localize(date_time)
else:
ret = date_time.astimezone(time_zone)
return ret | 0.005208 |
def _item_attributes_match(crypto_config, plaintext_item, encrypted_item):
# type: (CryptoConfig, Dict, Dict) -> Bool
"""Determines whether the unencrypted values in the plaintext items attributes are the same as those in the
encrypted item. Essentially this uses brute force to cover when we don't know the primary and sort
index attribute names, since they can't be encrypted.
:param CryptoConfig crypto_config: CryptoConfig used in encrypting the given items
:param dict plaintext_item: The plaintext item
:param dict encrypted_item: The encrypted item
:return: Bool response, True if the unencrypted attributes in the plaintext item match those in
the encrypted item
:rtype: bool
"""
for name, value in plaintext_item.items():
if crypto_config.attribute_actions.action(name) == CryptoAction.ENCRYPT_AND_SIGN:
continue
if encrypted_item.get(name) != value:
return False
return True | 0.006122 |
def _connect_control(self, event, param, arg):
"""
Is the actual callback function for :meth:`init_hw_connect_control_ex`.
:param event:
Event (:data:`CbEvent.EVENT_CONNECT`, :data:`CbEvent.EVENT_DISCONNECT` or
:data:`CbEvent.EVENT_FATALDISCON`).
:param param: Additional parameter depending on the event.
- CbEvent.EVENT_CONNECT: always 0
- CbEvent.EVENT_DISCONNECT: always 0
- CbEvent.EVENT_FATALDISCON: USB-CAN-Handle of the disconnected module
:param arg: Additional parameter defined with :meth:`init_hardware_ex` (not used in this wrapper class).
"""
log.debug("Event: %s, Param: %s" % (event, param))
if event == CbEvent.EVENT_FATALDISCON:
self.fatal_disconnect_event(param)
elif event == CbEvent.EVENT_CONNECT:
self.connect_event()
elif event == CbEvent.EVENT_DISCONNECT:
self.disconnect_event() | 0.004128 |
def prepare(session_data={}, #pylint: disable=dangerous-default-value
passphrase=None):
"""
Returns *session_dict* as a base64 encrypted json string.
The full encrypted text is special crafted to be compatible
with openssl. It can be decrypted with:
$ echo _full_encypted_ | openssl aes-256-cbc -d -a -k _passphrase_ -p
salt=...
key=...
iv=...
_json_formatted_
"""
if passphrase is None:
passphrase = settings.DJAODJIN_SECRET_KEY
encrypted = crypt.encrypt(
json.dumps(session_data, cls=crypt.JSONEncoder),
passphrase=passphrase,
debug_stmt="encrypted_cookies.SessionStore.prepare")
# b64encode will return `bytes` (Py3) but Django 2.0 is expecting
# a `str` to add to the cookie header, otherwise it wraps those
# `bytes` into a b'***' and adds that to the cookie.
# Note that Django 1.11 will add those `bytes` to the cookie "as-is".
if not isinstance(encrypted, six.string_types):
as_text = encrypted.decode('ascii')
else:
as_text = encrypted
return as_text | 0.004129 |
def get_sync_info(self, name, key=None):
"""Get mtime/size when this target's current dir was last synchronized with remote."""
peer_target = self.peer
if self.is_local():
info = self.cur_dir_meta.dir["peer_sync"].get(peer_target.get_id())
else:
info = peer_target.cur_dir_meta.dir["peer_sync"].get(self.get_id())
if name is not None:
info = info.get(name) if info else None
if info and key:
info = info.get(key)
return info | 0.005682 |
def find_shows_by_category(self, category, genre=None, area=None,
release_year=None, paid=None,
orderby='view-today-count',
streamtypes=None, person=None,
page=1, count=20):
"""doc: http://open.youku.com/docs/doc?id=62
"""
url = 'https://openapi.youku.com/v2/shows/by_category.json'
params = {
'client_id': self.client_id,
'category': category,
'genre': genre,
'area': area,
'release_year': release_year,
'paid': paid,
'orderby': orderby,
'streamtypes': streamtypes,
'person': person,
'page': page,
'count': count
}
params = remove_none_value(params)
r = requests.get(url, params=params)
check_error(r)
return r.json() | 0.006342 |
def current_resp_rate(self):
"""Return current respiratory rate for in-progress session."""
try:
rates = self.intervals[0]['timeseries']['respiratoryRate']
num_rates = len(rates)
if num_rates == 0:
return None
rate = rates[num_rates-1][1]
except KeyError:
rate = None
return rate | 0.005141 |
def _parse_results(self, results, cached_records):
"""
Parses the given results (in MARCXML format).
The given "cached_records" list is a pool of
already existing parsed records (in order to
avoid keeping several times the same records in memory)
"""
parser = xml.sax.make_parser()
handler = RecordsHandler(cached_records)
parser.setContentHandler(handler)
parser.parse(results)
return handler.records | 0.004082 |
def _RemoveForemanRule(self):
"""Removes the foreman rule corresponding to this hunt."""
if data_store.RelationalDBEnabled():
data_store.REL_DB.RemoveForemanRule(hunt_id=self.session_id.Basename())
return
with aff4.FACTORY.Open(
"aff4:/foreman", mode="rw", token=self.token) as foreman:
aff4_rules = foreman.Get(foreman.Schema.RULES)
aff4_rules = foreman.Schema.RULES(
# Remove those rules which fire off this hunt id.
[r for r in aff4_rules if r.hunt_id != self.session_id])
foreman.Set(aff4_rules) | 0.012281 |
def reset(self):
"""Reset the view."""
self.pan = (0., 0.)
self.zoom = self._default_zoom
self.update() | 0.014815 |
def resize(self, size):
""" Grow this array to specified length. This array can't be shrinked
:param size: new length
:return: None
"""
if size < len(self):
raise ValueError("Value is out of bound. Array can't be shrinked")
current_size = self.__size
for i in range(size - current_size):
self.__array.append(WBinArray(0, self.__class__.byte_size))
self.__size = size | 0.030848 |
def cdk_module_matches_env(env_name, env_config, env_vars):
"""Return bool on whether cdk command should continue in current env."""
if env_config.get(env_name):
current_env_config = env_config[env_name]
if isinstance(current_env_config, type(True)) and current_env_config:
return True
if isinstance(current_env_config, six.string_types):
(account_id, region) = current_env_config.split('/')
if region == env_vars['AWS_DEFAULT_REGION']:
boto_args = extract_boto_args_from_env(env_vars)
sts_client = boto3.client(
'sts',
region_name=env_vars['AWS_DEFAULT_REGION'],
**boto_args
)
if sts_client.get_caller_identity()['Account'] == account_id:
return True
if isinstance(current_env_config, dict):
return True
return False | 0.001049 |
def fix_repeating_arguments(self):
"""Fix elements that should accumulate/increment values."""
either = [list(child.children) for child in transform(self).children]
for case in either:
for e in [child for child in case if case.count(child) > 1]:
if type(e) is Argument or type(e) is Option and e.argcount:
if e.value is None:
e.value = []
elif type(e.value) is not list:
e.value = e.value.split()
if type(e) is Command or type(e) is Option and e.argcount == 0:
e.value = 0
return self | 0.002994 |
def install(pkg=None,
pkgs=None,
dir=None,
runas=None,
registry=None,
env=None,
dry_run=False,
silent=True):
'''
Install an NPM package.
If no directory is specified, the package will be installed globally. If
no package is specified, the dependencies (from package.json) of the
package in the given directory will be installed.
pkg
A package name in any format accepted by NPM, including a version
identifier
pkgs
A list of package names in the same format as the ``name`` parameter
.. versionadded:: 2014.7.0
dir
The target directory in which to install the package, or None for
global installation
runas
The user to run NPM with
registry
The NPM registry to install the package from.
.. versionadded:: 2014.7.0
env
Environment variables to set when invoking npm. Uses the same ``env``
format as the :py:func:`cmd.run <salt.modules.cmdmod.run>` execution
function.
.. versionadded:: 2014.7.0
silent
Whether or not to run NPM install with --silent flag.
.. versionadded:: 2016.3.0
dry_run
Whether or not to run NPM install with --dry-run flag.
.. versionadded:: 2015.8.4
silent
Whether or not to run NPM install with --silent flag.
.. versionadded:: 2015.8.5
CLI Example:
.. code-block:: bash
salt '*' npm.install coffee-script
salt '*' npm.install [email protected]
'''
# Protect against injection
if pkg:
pkgs = [_cmd_quote(pkg)]
elif pkgs:
pkgs = [_cmd_quote(v) for v in pkgs]
else:
pkgs = []
if registry:
registry = _cmd_quote(registry)
cmd = ['npm', 'install', '--json']
if silent:
cmd.append('--silent')
if not dir:
cmd.append('--global')
if registry:
cmd.append('--registry="{0}"'.format(registry))
if dry_run:
cmd.append('--dry-run')
cmd.extend(pkgs)
env = env or {}
if runas:
uid = salt.utils.user.get_uid(runas)
if uid:
env.update({'SUDO_UID': uid, 'SUDO_USER': ''})
cmd = ' '.join(cmd)
result = __salt__['cmd.run_all'](cmd,
python_shell=True,
cwd=dir,
runas=runas,
env=env)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
# npm >1.2.21 is putting the output to stderr even though retcode is 0
npm_output = result['stdout'] or result['stderr']
try:
return salt.utils.json.find_json(npm_output)
except ValueError:
return npm_output | 0.000349 |
def values_are_equivalent(self, val1, val2):
'''Check whether 2 values are equivalent (meaning they
are in the same bucket/range)
Returns:
true if the 2 values are equivalent
'''
return self.get_lowest_equivalent_value(val1) == self.get_lowest_equivalent_value(val2) | 0.009404 |
def wrap(self, text, width=None, indent=None):
"""Return ``text`` wrapped to ``width`` and indented with ``indent``.
By default:
* ``width`` is ``self.options.wrap_length``
* ``indent`` is ``self.indentation``.
"""
width = width if width is not None else self.options.wrap_length
indent = indent if indent is not None else self.indentation
initial_indent = self.initial_indentation
return textwrap.fill(text, width=width,
initial_indent=initial_indent,
subsequent_indent=indent) | 0.003273 |
def plot_power_factor_mu(self, temp=600, output='eig',
relaxation_time=1e-14, xlim=None):
"""
Plot the power factor in function of Fermi level. Semi-log plot
Args:
temp: the temperature
xlim: a list of min and max fermi energy by default (0, and band
gap)
tau: A relaxation time in s. By default none and the plot is by
units of relaxation time
Returns:
a matplotlib object
"""
import matplotlib.pyplot as plt
plt.figure(figsize=(9, 7))
pf = self._bz.get_power_factor(relaxation_time=relaxation_time,
output=output, doping_levels=False)[
temp]
plt.semilogy(self._bz.mu_steps, pf, linewidth=3.0)
self._plot_bg_limits()
self._plot_doping(temp)
if output == 'eig':
plt.legend(['PF$_1$', 'PF$_2$', 'PF$_3$'])
if xlim is None:
plt.xlim(-0.5, self._bz.gap + 0.5)
else:
plt.xlim(xlim)
plt.ylabel("Power factor, ($\\mu$W/(mK$^2$))", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30.0)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.tight_layout()
return plt | 0.00228 |
def cli_aliases(self):
r"""Developer script aliases.
"""
scripting_groups = []
aliases = {}
for cli_class in self.cli_classes:
instance = cli_class()
if getattr(instance, "alias", None):
scripting_group = getattr(instance, "scripting_group", None)
if scripting_group:
scripting_groups.append(scripting_group)
entry = (scripting_group, instance.alias)
if (scripting_group,) in aliases:
message = "alias conflict between scripting group"
message += " {!r} and {}"
message = message.format(
scripting_group, aliases[(scripting_group,)].__name__
)
raise Exception(message)
if entry in aliases:
message = "alias conflict between {} and {}"
message = message.format(
aliases[entry].__name__, cli_class.__name__
)
raise Exception(message)
aliases[entry] = cli_class
else:
entry = (instance.alias,)
if entry in scripting_groups:
message = "alias conflict between {}"
message += " and scripting group {!r}"
message = message.format(cli_class.__name__, instance.alias)
raise Exception(message)
if entry in aliases:
message = "alias conflict be {} and {}"
message = message.format(cli_class.__name__, aliases[entry])
raise Exception(message)
aliases[(instance.alias,)] = cli_class
else:
if instance.program_name in scripting_groups:
message = "Alias conflict between {}"
message += " and scripting group {!r}"
message = message.format(cli_class.__name__, instance.program_name)
raise Exception(message)
aliases[(instance.program_name,)] = cli_class
alias_map = {}
for key, value in aliases.items():
if len(key) == 1:
alias_map[key[0]] = value
else:
if key[0] not in alias_map:
alias_map[key[0]] = {}
alias_map[key[0]][key[1]] = value
return alias_map | 0.002298 |
def erase_all_breakpoints(self):
"""
Erases all breakpoints in all processes.
@see:
erase_code_breakpoint,
erase_page_breakpoint,
erase_hardware_breakpoint
"""
# This should be faster but let's not trust the GC so much :P
# self.disable_all_breakpoints()
# self.__codeBP = dict()
# self.__pageBP = dict()
# self.__hardwareBP = dict()
# self.__runningBP = dict()
# self.__hook_objects = dict()
## # erase hooks
## for (pid, address, hook) in self.get_all_hooks():
## self.dont_hook_function(pid, address)
# erase code breakpoints
for (pid, bp) in self.get_all_code_breakpoints():
self.erase_code_breakpoint(pid, bp.get_address())
# erase page breakpoints
for (pid, bp) in self.get_all_page_breakpoints():
self.erase_page_breakpoint(pid, bp.get_address())
# erase hardware breakpoints
for (tid, bp) in self.get_all_hardware_breakpoints():
self.erase_hardware_breakpoint(tid, bp.get_address()) | 0.004363 |
def openOrder(self, orderId, contract, order, orderState):
"""
This wrapper is called to:
* feed in open orders at startup;
* feed in open orders or order updates from other clients and TWS
if clientId=master id;
* feed in manual orders and order updates from TWS if clientId=0;
* handle openOrders and allOpenOrders responses.
"""
if order.whatIf:
# response to whatIfOrder
self._endReq(order.orderId, orderState)
else:
key = self.orderKey(order.clientId, order.orderId, order.permId)
trade = self.trades.get(key)
# ignore '?' values in the order
d = {k: v for k, v in order.dict().items() if v != '?'}
if trade:
trade.order.update(**d)
else:
contract = Contract.create(**contract.dict())
order = Order(**d)
orderStatus = OrderStatus(status=orderState.status)
trade = Trade(contract, order, orderStatus, [], [])
self.trades[key] = trade
self._logger.info(f'openOrder: {trade}')
results = self._results.get('openOrders')
if results is None:
self.ib.openOrderEvent.emit(trade)
else:
# response to reqOpenOrders or reqAllOpenOrders
results.append(order) | 0.001403 |
def verboselogs_class_transform(cls):
"""Make Pylint aware of our custom logger methods."""
if cls.name == 'RootLogger':
for meth in ['notice', 'spam', 'success', 'verbose']:
cls.locals[meth] = [scoped_nodes.Function(meth, None)] | 0.003891 |
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
bigint_as_string=False, sort_keys=False, item_sort_key=None,
for_json=False, ignore_nan=False, int_as_string_bitcount=None,
iterable_as_array=False, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If specified, ``separators`` should be an
``(item_separator, key_separator)`` tuple. The default is ``(', ', ': ')``
if *indent* is ``None`` and ``(',', ': ')`` otherwise. To get the most
compact JSON representation, you should specify ``(',', ':')`` to eliminate
whitespace.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
If *iterable_as_array* is true (default: ``False``),
any object not in the above table that implements ``__iter__()``
will be encoded as a JSON array.
If *bigint_as_string* is true (not the default), ints 2**53 and higher
or lower than -2**53 will be encoded as strings. This is to avoid the
rounding that happens in Javascript otherwise.
If *int_as_string_bitcount* is a positive number (n), then int of size
greater than or equal to 2**n or lower than or equal to -2**n will be
encoded as strings.
If specified, *item_sort_key* is a callable used to sort the items in
each dictionary. This is useful if you want to sort items other than
in alphabetical order by key. This option takes precendence over
*sort_keys*.
If *sort_keys* is true (default: ``False``), the output of dictionaries
will be sorted by item.
If *for_json* is true (default: ``False``), objects with a ``for_json()``
method will use the return value of that method for encoding as JSON
instead of the object.
If *ignore_nan* is true (default: ``False``), then out of range
:class:`float` values (``nan``, ``inf``, ``-inf``) will be serialized as
``null`` in compliance with the ECMA-262 specification. If true, this will
override *allow_nan*.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg. NOTE: You should use *default* instead of subclassing
whenever possible.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not iterable_as_array
and not bigint_as_string and not sort_keys
and not item_sort_key and not for_json
and not ignore_nan and int_as_string_bitcount is None
and not kw
):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
iterable_as_array=iterable_as_array,
bigint_as_string=bigint_as_string,
sort_keys=sort_keys,
item_sort_key=item_sort_key,
for_json=for_json,
ignore_nan=ignore_nan,
int_as_string_bitcount=int_as_string_bitcount,
**kw).encode(obj) | 0.000362 |
def _generate_notebooks_by_category(notebook_object, dict_by_tag):
"""
Internal function that is used for generation of the page "Notebooks by Category".
----------
Parameters
----------
notebook_object : notebook object
Object of "notebook" class where the body will be created.
dict_by_tag : dict
Dictionary where each key is a tag and the respective value will be a list containing the
Notebooks (title and filename) that include this tag.
"""
# ============================ Insertion of an opening text ====================================
markdown_cell = OPEN_IMAGE
# == Generation of a table that group Notebooks by category the information about each signal ==
category_list = list(NOTEBOOK_KEYS.keys())
tag_keys = list(dict_by_tag.keys())
markdown_cell += """\n<table id="notebook_list" width="100%">
<tr>
<td width="20%" class="center_cell group_by_header_grey"> Category </td>
<td width="60%" class="center_cell group_by_header"></td>
<td width="20%" class="center_cell"></td>
</tr>"""
for i, category in enumerate(category_list):
if category != "MainFiles":
if category.lower() in tag_keys:
if i == 0:
first_border = "color1_top"
else:
first_border = ""
nbr_notebooks = len(dict_by_tag[category.lower()])
markdown_cell += "\n\t<tr>" \
"\n\t\t<td rowspan='" + str(nbr_notebooks + 1) + "' class='center_cell open_cell_border_" + str(NOTEBOOK_KEYS[category]) + "'><span style='float:center'><img src='../../images/icons/" + category + ".png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color" + str(NOTEBOOK_KEYS[category]) + "'>" + category + "</span></td>" \
"\n\t\t<td class='center_cell color" + str(NOTEBOOK_KEYS[category]) + "_cell " + first_border + "'><span style='float:center'>" + category + "</span></td>" \
"\n\t\t<td class='center_cell gradient_color" + str(NOTEBOOK_KEYS[category]) + "'></td>" \
"\n\t</tr>"
notebook_list = dict_by_tag[category.lower()]
for j, notebook_file in enumerate(notebook_list):
if j == len(notebook_list) - 1:
last_border = "class='border_cell_bottom_white'"
else:
last_border = ""
split_path = notebook_file.replace("\\", "/").split("/")
notebook_name = split_path[-1].split("&")[0]
notebook_title = split_path[-1].split("&")[1]
markdown_cell += "\n\t<tr " + last_border + ">" \
"\n\t\t<td class='center_cell open_cell_light'> <a href='../" + category + "/" + notebook_name + "'>" + notebook_title + "</a> </td>" \
"\n\t\t<td class='center_cell'> <a href='../" + category + "/" + notebook_name + "'><div class='file_icon'></div></a> </td>" \
"\n\t</tr>"
markdown_cell += "\n</table>"
# ============================ Insertion of an introductory text ===============================
markdown_cell += DESCRIPTION_CATEGORY
# =================== Insertion of the HTML table inside a markdown cell =======================
notebook_object["cells"].append(nb.v4.new_markdown_cell(markdown_cell)) | 0.004172 |
def state(self, statespec=None):
"""
Modify or inquire widget state.
:param statespec: Widget state is returned if `statespec` is None,
otherwise it is set according to the statespec
flags and then a new state spec is returned
indicating which flags were changed.
:type statespec: None or sequence[str]
"""
if statespec:
if "disabled" in statespec:
self.bind('<Button-1>', lambda e: 'break')
elif "!disabled" in statespec:
self.unbind("<Button-1>")
self.bind("<Button-1>", self._box_click, True)
return ttk.Treeview.state(self, statespec)
else:
return ttk.Treeview.state(self) | 0.007335 |
def update_allelic_expression(self, model=3):
"""
A single EM step: Update probability at read level and then re-estimate allelic specific expression
:param model: Normalization model (1: Gene->Allele->Isoform, 2: Gene->Isoform->Allele, 3: Gene->Isoform*Allele, 4: Gene*Isoform*Allele)
:return: Nothing (as it performs in-place operations)
"""
self.update_probability_at_read_level(model)
self.allelic_expression = self.probability.sum(axis=APM.Axis.READ)
if self.target_lengths is not None:
self.allelic_expression = np.divide(self.allelic_expression, self.target_lengths) | 0.007692 |
def package(env, target, source, PACKAGEROOT, NAME, VERSION, DESCRIPTION,
SUMMARY, X_IPK_PRIORITY, X_IPK_SECTION, SOURCE_URL,
X_IPK_MAINTAINER, X_IPK_DEPENDS, **kw):
""" This function prepares the packageroot directory for packaging with the
ipkg builder.
"""
SCons.Tool.Tool('ipkg').generate(env)
# setup the Ipkg builder
bld = env['BUILDERS']['Ipkg']
target, source = stripinstallbuilder(target, source, env)
target, source = putintopackageroot(target, source, env, PACKAGEROOT)
# This should be overrideable from the construction environment,
# which it is by using ARCHITECTURE=.
# Guessing based on what os.uname() returns at least allows it
# to work for both i386 and x86_64 Linux systems.
archmap = {
'i686' : 'i386',
'i586' : 'i386',
'i486' : 'i386',
}
buildarchitecture = os.uname()[4]
buildarchitecture = archmap.get(buildarchitecture, buildarchitecture)
if 'ARCHITECTURE' in kw:
buildarchitecture = kw['ARCHITECTURE']
# setup the kw to contain the mandatory arguments to this function.
# do this before calling any builder or setup function
loc=locals()
del loc['kw']
kw.update(loc)
del kw['source'], kw['target'], kw['env']
# generate the specfile
specfile = gen_ipk_dir(PACKAGEROOT, source, env, kw)
# override the default target.
if str(target[0])=="%s-%s"%(NAME, VERSION):
target=[ "%s_%s_%s.ipk"%(NAME, VERSION, buildarchitecture) ]
# now apply the Ipkg builder
return bld(env, target, specfile, **kw) | 0.006837 |
def get_extreme_poe(array, imtls):
"""
:param array: array of shape (L, G) with L=num_levels, G=num_gsims
:param imtls: DictArray imt -> levels
:returns:
the maximum PoE corresponding to the maximum level for IMTs and GSIMs
"""
return max(array[imtls(imt).stop - 1].max() for imt in imtls) | 0.003115 |
def main():
"""
Commandline interface for building/inspecting top-k lexicons using during decoding.
"""
params = argparse.ArgumentParser(description="Create or inspect a top-k lexicon for use during decoding.")
subparams = params.add_subparsers(title="Commands")
params_create = subparams.add_parser('create', description="Create top-k lexicon for use during decoding. "
"See sockeye_contrib/fast_align/README.md "
"for information on creating input lexical tables.")
arguments.add_lexicon_args(params_create)
arguments.add_lexicon_create_args(params_create)
arguments.add_logging_args(params_create)
params_create.set_defaults(func=create)
params_inspect = subparams.add_parser('inspect', description="Inspect top-k lexicon for use during decoding.")
arguments.add_lexicon_inspect_args(params_inspect)
arguments.add_lexicon_args(params_inspect)
params_inspect.set_defaults(func=inspect)
args = params.parse_args()
if 'func' not in args:
params.print_help()
return 1
else:
args.func(args) | 0.00579 |
def get_object_or_this(model, this=None, *args, **kwargs):
"""
Uses get() to return an object or the value of <this> argument
if object does not exist.
If the <this> argument if not provided None would be returned.
<model> can be either a QuerySet instance or a class.
"""
return get_object_or_None(model, *args, **kwargs) or this | 0.002778 |
def _update_step2(self, layers):
"""Each layer has it's own ranges_grid computed now, unless something went wrong
But all layers are shown with the same ranges (self.state.ranges_viewport)
If any of the ranges is None, take the min/max of each layer
"""
logger.info("done with ranges, now update step2 for layers: %r", layers)
for dimension in range(self.dimensions):
if self.state.ranges_viewport[dimension] is None:
vmin = min([layer.state.ranges_grid[dimension][0] for layer in layers])
vmax = max([layer.state.ranges_grid[dimension][1] for layer in layers])
self.state.ranges_viewport[dimension] = [vmin, vmax]
logger.debug("ranges before aspect check: %r", self.state.ranges_viewport)
self.check_aspect(0)
logger.debug("ranges after aspect check: %r", self.state.ranges_viewport)
# now make sure the layers all have the same ranges_grid
for layer in layers:
# layer.state.ranges_grid = copy.deepcopy(self.state.ranges_viewport)
for d in range(layer.dimensions):
layer.set_range(self.state.ranges_viewport[d][0], self.state.ranges_viewport[d][1], d)
# now we are ready to calculate histograms
promises = [layer.add_tasks_histograms() for layer in layers]
executors = list(set([layer.dataset.executor for layer in layers]))
for executor in executors:
executor.execute()
promises_histograms_done = vaex.promise.listPromise(promises)
promises_histograms_done.then(self._update_step3, self.on_error_or_cancel).end() | 0.007199 |
def persist_upstream_structure(self):
"""Persist json file with the upstream steps structure, that is step names and their connections."""
persist_dir = os.path.join(self.experiment_directory, '{}_upstream_structure.json'.format(self.name))
logger.info('Step {}, saving upstream pipeline structure to {}'.format(self.name, persist_dir))
joblib.dump(self.upstream_structure, persist_dir) | 0.011962 |
def from_bytes(SproutTx, byte_string):
'''
byte-like -> SproutTx
'''
version = byte_string[0:4]
tx_ins = []
tx_ins_num = shared.VarInt.from_bytes(byte_string[4:])
current = 4 + len(tx_ins_num)
for _ in range(tx_ins_num.number):
tx_in = TxIn.from_bytes(byte_string[current:])
current += len(tx_in)
tx_ins.append(tx_in)
tx_outs = []
tx_outs_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_outs_num.number):
tx_out = TxOut.from_bytes(byte_string[current:])
current += len(tx_out)
tx_outs.append(tx_out)
lock_time = byte_string[current:current + 4]
current += 4
tx_joinsplits = None
joinsplit_pubkey = None
joinsplit_sig = None
if utils.le2i(version) == 2: # If we expect joinsplits
tx_joinsplits = []
tx_joinsplits_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_joinsplits_num)
for _ in range(tx_joinsplits_num.number):
joinsplit = z.SproutJoinsplit.from_bytes(byte_string[current:])
current += len(joinsplit)
tx_joinsplits.append(joinsplit)
joinsplit_pubkey = byte_string[current:current + 32]
current += 32
joinsplit_sig = byte_string[current:current + 64]
return SproutTx(
version=version,
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=lock_time,
tx_joinsplits=tx_joinsplits,
joinsplit_pubkey=joinsplit_pubkey,
joinsplit_sig=joinsplit_sig) | 0.001136 |
def twoline2rv(longstr1, longstr2, whichconst, afspc_mode=False):
"""Return a Satellite imported from two lines of TLE data.
Provide the two TLE lines as strings `longstr1` and `longstr2`,
and select which standard set of gravitational constants you want
by providing `gravity_constants`:
`sgp4.earth_gravity.wgs72` - Standard WGS 72 model
`sgp4.earth_gravity.wgs84` - More recent WGS 84 model
`sgp4.earth_gravity.wgs72old` - Legacy support for old SGP4 behavior
Normally, computations are made using various recent improvements
to the algorithm. If you want to turn some of these off and go
back into "afspc" mode, then set `afspc_mode` to `True`.
"""
deg2rad = pi / 180.0; # 0.0174532925199433
xpdotp = 1440.0 / (2.0 *pi); # 229.1831180523293
tumin = whichconst.tumin
satrec = Satellite()
satrec.error = 0;
satrec.whichconst = whichconst # Python extension: remembers its consts
line = longstr1.rstrip()
# try/except is not well supported by Numba
if (len(line) >= 64 and
line.startswith('1 ') and
line[8] == ' ' and
line[23] == '.' and
line[32] == ' ' and
line[34] == '.' and
line[43] == ' ' and
line[52] == ' ' and
line[61] == ' ' and
line[63] == ' '):
_saved_satnum = satrec.satnum = int(line[2:7])
# classification = line[7] or 'U'
# intldesg = line[9:17]
two_digit_year = int(line[18:20])
satrec.epochdays = float(line[20:32])
satrec.ndot = float(line[33:43])
satrec.nddot = float(line[44] + '.' + line[45:50])
nexp = int(line[50:52])
satrec.bstar = float(line[53] + '.' + line[54:59])
ibexp = int(line[59:61])
# numb = int(line[62])
# elnum = int(line[64:68])
else:
raise ValueError(error_message.format(1, LINE1, line))
line = longstr2.rstrip()
if (len(line) >= 69 and
line.startswith('2 ') and
line[7] == ' ' and
line[11] == '.' and
line[16] == ' ' and
line[20] == '.' and
line[25] == ' ' and
line[33] == ' ' and
line[37] == '.' and
line[42] == ' ' and
line[46] == '.' and
line[51] == ' '):
satrec.satnum = int(line[2:7])
if _saved_satnum != satrec.satnum:
raise ValueError('Object numbers in lines 1 and 2 do not match')
satrec.inclo = float(line[8:16])
satrec.nodeo = float(line[17:25])
satrec.ecco = float('0.' + line[26:33].replace(' ', '0'))
satrec.argpo = float(line[34:42])
satrec.mo = float(line[43:51])
satrec.no = float(line[52:63])
#revnum = line[63:68]
#except (AssertionError, IndexError, ValueError):
else:
raise ValueError(error_message.format(2, LINE2, line))
# ---- find no, ndot, nddot ----
satrec.no = satrec.no / xpdotp; # rad/min
satrec.nddot= satrec.nddot * pow(10.0, nexp);
satrec.bstar= satrec.bstar * pow(10.0, ibexp);
# ---- convert to sgp4 units ----
satrec.a = pow( satrec.no*tumin , (-2.0/3.0) );
satrec.ndot = satrec.ndot / (xpdotp*1440.0); # ? * minperday
satrec.nddot= satrec.nddot / (xpdotp*1440.0*1440);
# ---- find standard orbital elements ----
satrec.inclo = satrec.inclo * deg2rad;
satrec.nodeo = satrec.nodeo * deg2rad;
satrec.argpo = satrec.argpo * deg2rad;
satrec.mo = satrec.mo * deg2rad;
satrec.alta = satrec.a*(1.0 + satrec.ecco) - 1.0;
satrec.altp = satrec.a*(1.0 - satrec.ecco) - 1.0;
"""
// ----------------------------------------------------------------
// find sgp4epoch time of element set
// remember that sgp4 uses units of days from 0 jan 1950 (sgp4epoch)
// and minutes from the epoch (time)
// ----------------------------------------------------------------
// ---------------- temp fix for years from 1957-2056 -------------------
// --------- correct fix will occur when year is 4-digit in tle ---------
"""
if two_digit_year < 57:
year = two_digit_year + 2000;
else:
year = two_digit_year + 1900;
mon,day,hr,minute,sec = days2mdhms(year, satrec.epochdays);
sec_whole, sec_fraction = divmod(sec, 1.0)
satrec.epochyr = year
satrec.jdsatepoch = jday(year,mon,day,hr,minute,sec);
satrec.epoch = datetime(year, mon, day, hr, minute, int(sec_whole),
int(sec_fraction * 1000000.0 // 1.0))
# ---------------- initialize the orbit at sgp4epoch -------------------
sgp4init(whichconst, afspc_mode, satrec.satnum, satrec.jdsatepoch-2433281.5, satrec.bstar,
satrec.ecco, satrec.argpo, satrec.inclo, satrec.mo, satrec.no,
satrec.nodeo, satrec)
return satrec | 0.011974 |
def set_hemisphere(self, hemi_str):
'''
Given a hemisphere identifier, set the sign of the coordinate to match that hemisphere
'''
if hemi_str == 'W':
self.degree = abs(self.degree)*-1
self.minute = abs(self.minute)*-1
self.second = abs(self.second)*-1
self._update()
elif hemi_str == 'E':
self.degree = abs(self.degree)
self.minute = abs(self.minute)
self.second = abs(self.second)
self._update()
else:
raise ValueError('Hemisphere identifier for longitudes must be E or W') | 0.006339 |
def _add_new_init_method(cls):
"""
Replace the existing cls.__init__() method with a new one
which calls the original one and in addition performs the
following actions:
(1) Finds all instances of tohu.BaseGenerator in the namespace
and collects them in the dictionary `self.field_gens`.
(2) ..to do..
"""
orig_init = cls.__init__
def new_init_method(self, *args, **kwargs):
logger.debug(f"Initialising new {self} (type: {type(self)})")
# Call original __init__ function to ensure we pick up
# any tohu generators that are defined there.
#
logger.debug(f" orig_init: {orig_init}")
orig_init(self, *args, **kwargs)
#
# Find field generator templates and spawn them to create
# field generators for the new custom generator instance.
#
field_gens_templates = find_field_generator_templates(self)
logger.debug(f'Found {len(field_gens_templates)} field generator template(s):')
debug_print_dict(field_gens_templates)
logger.debug('Spawning field generator templates...')
origs = {}
spawned = {}
dependency_mapping = {}
for (name, gen) in field_gens_templates.items():
origs[name] = gen
spawned[name] = gen.spawn(dependency_mapping)
logger.debug(f'Adding dependency mapping: {gen} -> {spawned[name]}')
self.field_gens = spawned
self.__dict__.update(self.field_gens)
logger.debug(f'Spawned field generators attached to custom generator instance:')
debug_print_dict(self.field_gens)
# Add seed generator
#
#self.seed_generator = SeedGenerator()
# Create class for the items produced by this generator
#
self.__class__.item_cls = make_item_class_for_custom_generator_class(self)
cls.__init__ = new_init_method | 0.00312 |
def get_engine(name):
"""
get an engine from string (engine class without Engine)
"""
name = name.capitalize() + 'Engine'
if name in globals():
return globals()[name]
raise KeyError("engine '%s' does not exist" % name) | 0.003968 |
def check_block_spacing(
self,
first_block_type: LineType,
second_block_type: LineType,
error_message: str,
) -> typing.Generator[AAAError, None, None]:
"""
Checks there is a clear single line between ``first_block_type`` and
``second_block_type``.
Note:
Is tested via ``check_arrange_act_spacing()`` and
``check_act_assert_spacing()``.
"""
numbered_lines = list(enumerate(self))
first_block_lines = filter(lambda l: l[1] is first_block_type, numbered_lines)
try:
first_block_lineno = list(first_block_lines)[-1][0]
except IndexError:
# First block has no lines
return
second_block_lines = filter(lambda l: l[1] is second_block_type, numbered_lines)
try:
second_block_lineno = next(second_block_lines)[0]
except StopIteration:
# Second block has no lines
return
blank_lines = [
bl for bl in numbered_lines[first_block_lineno + 1:second_block_lineno] if bl[1] is LineType.blank_line
]
if not blank_lines:
# Point at line above second block
yield AAAError(
line_number=self.fn_offset + second_block_lineno - 1,
offset=0,
text=error_message.format('none'),
)
return
if len(blank_lines) > 1:
# Too many blank lines - point at the first extra one, the 2nd
yield AAAError(
line_number=self.fn_offset + blank_lines[1][0],
offset=0,
text=error_message.format(len(blank_lines)),
) | 0.003472 |
def verification_digit(numbers):
"""
Returns the verification digit for a given numbre.
The verification digit is calculated as follows:
* A = sum of all even-positioned numbers
* B = A * 3
* C = sum of all odd-positioned numbers
* D = B + C
* The results is the smallset number N, such that (D + N) % 10 == 0
NOTE: Afip's documentation seems to have odd an even mixed up in the
explanation, but all examples follow the above algorithm.
:param list(int) numbers): The numbers for which the digits is to be
calculated.
:return: int
"""
a = sum(numbers[::2])
b = a * 3
c = sum(numbers[1::2])
d = b + c
e = d % 10
if e == 0:
return e
return 10 - e | 0.002404 |
def _read_http_ping(self, size, kind, flag):
"""Read HTTP/2 PING frames.
Structure of HTTP/2 PING frame [RFC 7540]:
+-----------------------------------------------+
| Length (24) |
+---------------+---------------+---------------+
| Type (8) | Flags (8) |
+-+-------------+---------------+-------------------------------+
|R| Stream Identifier (31) |
+---------------------------------------------------------------+
| |
| Opaque Data (64) |
| |
+---------------------------------------------------------------+
Octets Bits Name Description
0 0 http.length Length
3 24 http.type Type (2)
4 32 http.flags Flags
5 40 - Reserved
5 41 http.sid Stream Identifier
9 72 http.data Opaque Data
"""
if size != 8:
raise ProtocolError(f'HTTP/2: [Type {kind}] invalid format', quiet=True)
_flag = dict(
ACK=False, # bit 0
)
for index, bit in enumerate(flag):
if index == 0 and bit:
_flag['ACK'] = True
elif bit:
raise ProtocolError(f'HTTP/2: [Type {kind}] invalid format', quiet=True)
else:
continue
_data = self._read_fileng(8)
data = dict(
flags=_flag,
data=_data,
)
return data | 0.002023 |
def _delete(self, url, data, scope):
"""
Make a DELETE request using the session object to a Degreed endpoint.
Args:
url (str): The url to send a DELETE request to.
data (str): The json encoded payload to DELETE.
scope (str): Must be one of the scopes Degreed expects:
- `CONTENT_PROVIDER_SCOPE`
- `COMPLETION_PROVIDER_SCOPE`
"""
self._create_session(scope)
response = self.session.delete(url, data=data)
return response.status_code, response.text | 0.003401 |
def clean_upload(self, query='/content/uploads/'):
"""
pulp leaves droppings if you don't specifically tell it
to clean up after itself. use this to do so.
"""
query = query + self.uid + '/'
_r = self.connector.delete(query)
if _r.status_code == Constants.PULP_DELETE_OK:
juicer.utils.Log.log_info("Cleaned up after upload request.")
else:
_r.raise_for_status() | 0.004444 |
def can_transfer(self, block_identifier: BlockSpecification) -> bool:
""" Returns True if the channel is opened and the node has deposit in it. """
return self.token_network.can_transfer(
participant1=self.participant1,
participant2=self.participant2,
block_identifier=block_identifier,
channel_identifier=self.channel_identifier,
) | 0.007426 |
def sample_measurement_ops(
self,
measurement_ops: List[ops.GateOperation],
repetitions: int = 1) -> Dict[str, np.ndarray]:
"""Samples from the system at this point in the computation.
Note that this does not collapse the wave function.
In contrast to `sample` which samples qubits, this takes a list of
`cirq.GateOperation` instances whose gates are `cirq.MeasurementGate`
instances and then returns a mapping from the key in the measurement
gate to the resulting bit strings. Different measurement operations must
not act on the same qubits.
Args:
measurement_ops: `GateOperation` instances whose gates are
`MeasurementGate` instances to be sampled form.
repetitions: The number of samples to take.
Returns: A dictionary from measurement gate key to measurement
results. Measurement results are stored in a 2-dimensional
numpy array, the first dimension corresponding to the repetition
and the second to the actual boolean measurement results (ordered
by the qubits being measured.)
Raises:
ValueError: If the operation's gates are not `MeasurementGate`
instances or a qubit is acted upon multiple times by different
operations from `measurement_ops`.
"""
bounds = {} # type: Dict[str, Tuple]
all_qubits = [] # type: List[ops.Qid]
current_index = 0
for op in measurement_ops:
gate = op.gate
if not isinstance(gate, ops.MeasurementGate):
raise ValueError('{} was not a MeasurementGate'.format(gate))
key = protocols.measurement_key(gate)
if key in bounds:
raise ValueError(
'Duplicate MeasurementGate with key {}'.format(key))
bounds[key] = (current_index, current_index + len(op.qubits))
all_qubits.extend(op.qubits)
current_index += len(op.qubits)
indexed_sample = self.sample(all_qubits, repetitions)
return {k: np.array([x[s:e] for x in indexed_sample]) for k, (s, e) in
bounds.items()} | 0.001331 |
def get_single_external_tool_courses(self, course_id, external_tool_id):
"""
Get a single external tool.
Returns the specified external tool.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - external_tool_id
"""ID"""
path["external_tool_id"] = external_tool_id
self.logger.debug("GET /api/v1/courses/{course_id}/external_tools/{external_tool_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/external_tools/{external_tool_id}".format(**path), data=data, params=params, no_data=True) | 0.004926 |
def SignedVarintEncode(value):
"""Encode a signed integer as a zigzag encoded signed integer."""
result = b""
if value < 0:
value += (1 << 64)
bits = value & 0x7f
value >>= 7
while value:
result += HIGH_CHR_MAP[bits]
bits = value & 0x7f
value >>= 7
result += CHR_MAP[bits]
return result | 0.028125 |
def proportion_merge(brands, exemplars):
""" Return the proportion of a brand's followers who also follower an
exemplar. We merge all exemplar followers into one big pseudo-account."""
scores = {}
exemplar_followers = set()
for followers in exemplars.values():
exemplar_followers |= followers
for brand, followers in brands:
scores[brand] = _proportion(followers, exemplar_followers)
return scores | 0.002262 |
def get_message(self, transport, http_version: str, method: bytes,
url: bytes, headers: List[List[bytes]]) -> Dict[str, Any]:
'''
http://channels.readthedocs.io/en/stable/asgi/www.html#request
'''
url_obj = parse_url(url)
if url_obj.schema is None:
if transport.get_extra_info('sslcontext'):
scheme = 'https'
else:
scheme = 'http'
else:
scheme = url_obj.schema.decode()
path = '' if url_obj.path is None else url_obj.path.decode('utf-8')
query = b'' if url_obj.query is None else url_obj.query
return {
'channel': 'http.request',
'reply_channel': None,
'http_version': http_version,
'method': method.decode(),
'scheme': scheme,
'path': path,
'query_string': query,
'root_path': '',
'headers': headers,
'body': b'',
'body_channel': None,
'client': transport.get_extra_info('peername'),
'server': transport.get_extra_info('sockname')
} | 0.002593 |
def _detail_participant(
self,
channel_identifier: ChannelID,
participant: Address,
partner: Address,
block_identifier: BlockSpecification,
) -> ParticipantDetails:
""" Returns a dictionary with the channel participant information. """
data = self._call_and_check_result(
block_identifier,
'getChannelParticipantInfo',
channel_identifier=channel_identifier,
participant=to_checksum_address(participant),
partner=to_checksum_address(partner),
)
return ParticipantDetails(
address=participant,
deposit=data[ParticipantInfoIndex.DEPOSIT],
withdrawn=data[ParticipantInfoIndex.WITHDRAWN],
is_closer=data[ParticipantInfoIndex.IS_CLOSER],
balance_hash=data[ParticipantInfoIndex.BALANCE_HASH],
nonce=data[ParticipantInfoIndex.NONCE],
locksroot=data[ParticipantInfoIndex.LOCKSROOT],
locked_amount=data[ParticipantInfoIndex.LOCKED_AMOUNT],
) | 0.002747 |
def view(self, viewer=None, use_curr_dir=False):
"""View your molecule.
.. note:: This function writes a temporary file and opens it with
an external viewer.
If you modify your molecule afterwards you have to recall view
in order to see the changes.
Args:
viewer (str): The external viewer to use. If it is None,
the default as specified in cc.settings['defaults']['viewer']
is used.
use_curr_dir (bool): If True, the temporary file is written to
the current diretory. Otherwise it gets written to the
OS dependendent temporary directory.
Returns:
None:
"""
if viewer is None:
viewer = settings['defaults']['viewer']
if use_curr_dir:
TEMP_DIR = os.path.curdir
else:
TEMP_DIR = tempfile.gettempdir()
def give_filename(i):
filename = 'ChemCoord_' + str(i) + '.xyz'
return os.path.join(TEMP_DIR, filename)
i = 1
while os.path.exists(give_filename(i)):
i = i + 1
self.to_xyz(give_filename(i))
def open_file(i):
"""Open file and close after being finished."""
try:
subprocess.check_call([viewer, give_filename(i)])
except (subprocess.CalledProcessError, FileNotFoundError):
raise
finally:
if use_curr_dir:
pass
else:
os.remove(give_filename(i))
Thread(target=open_file, args=(i,)).start() | 0.001203 |
def dump_private_keys_or_addrs_chooser(wallet_obj):
'''
Offline-enabled mechanism to dump everything
'''
if wallet_obj.private_key:
puts('Which private keys and addresses do you want?')
else:
puts('Which addresses do you want?')
with indent(2):
puts(colored.cyan('1: Active - have funds to spend'))
puts(colored.cyan('2: Spent - no funds to spend (because they have been spent)'))
puts(colored.cyan('3: Unused - no funds to spend (because the address has never been used)'))
puts(colored.cyan('0: All (works offline) - regardless of whether they have funds to spend (super advanced users only)'))
puts(colored.cyan('\nb: Go Back\n'))
choice = choice_prompt(
user_prompt=DEFAULT_PROMPT,
acceptable_responses=[0, 1, 2, 3],
default_input='1',
show_default=True,
quit_ok=True,
)
if choice is False:
return
if choice == '1':
return dump_selected_keys_or_addrs(wallet_obj=wallet_obj, zero_balance=False, used=True)
elif choice == '2':
return dump_selected_keys_or_addrs(wallet_obj=wallet_obj, zero_balance=True, used=True)
elif choice == '3':
return dump_selected_keys_or_addrs(wallet_obj=wallet_obj, zero_balance=None, used=False)
elif choice == '0':
return dump_all_keys_or_addrs(wallet_obj=wallet_obj) | 0.00494 |
def from_transform(cls, matrix):
r"""
:param matrix: 4x4 3d affine transform matrix
:type matrix: :class:`FreeCAD.Matrix`
:return: a unit, zero offset coordinate system transformed by the given matrix
:rtype: :class:`CoordSystem`
Individual rotation & translation matricies are:
.. math::
R_z & = \begin{bmatrix}
cos(\alpha) & -sin(\alpha) & 0 & 0 \\
sin(\alpha) & cos(\alpha) & 0 & 0 \\
0 & 0 & 1 & 0 \\
0 & 0 & 0 & 1
\end{bmatrix} \qquad & R_y & = \begin{bmatrix}
cos(\beta) & 0 & sin(\beta) & 0 \\
0 & 1 & 0 & 0 \\
-sin(\beta) & 0 & cos(\beta) & 0 \\
0 & 0 & 0 & 1
\end{bmatrix} \\
\\
R_x & = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & cos(\gamma) & -sin(\gamma) & 0 \\
0 & sin(\gamma) & cos(\gamma) & 0 \\
0 & 0 & 0 & 1
\end{bmatrix} \qquad & T_{\text{xyz}} & = \begin{bmatrix}
1 & 0 & 0 & \delta x \\
0 & 1 & 0 & \delta y \\
0 & 0 & 1 & \delta z \\
0 & 0 & 0 & 1
\end{bmatrix}
The ``transform`` is the combination of these:
.. math::
transform = T_{\text{xyz}} \cdot R_z \cdot R_y \cdot R_x = \begin{bmatrix}
a & b & c & \delta x \\
d & e & f & \delta y \\
g & h & i & \delta z \\
0 & 0 & 0 & 1
\end{bmatrix}
Where:
.. math::
a & = cos(\alpha) cos(\beta) \\
b & = cos(\alpha) sin(\beta) sin(\gamma) - sin(\alpha) cos(\gamma) \\
c & = cos(\alpha) sin(\beta) cos(\gamma) + sin(\alpha) sin(\gamma) \\
d & = sin(\alpha) cos(\beta) \\
e & = sin(\alpha) sin(\beta) sin(\gamma) + cos(\alpha) cos(\gamma) \\
f & = sin(\alpha) sin(\beta) cos(\gamma) - cos(\alpha) sin(\gamma) \\
g & = -sin(\beta) \\
h & = cos(\beta) sin(\gamma) \\
i & = cos(\beta) cos(\gamma)
"""
# Create reference points at origin
offset = FreeCAD.Vector(0, 0, 0)
x_vertex = FreeCAD.Vector(1, 0, 0) # vertex along +X-axis
z_vertex = FreeCAD.Vector(0, 0, 1) # vertex along +Z-axis
# Transform reference points
offset = matrix.multiply(offset)
x_vertex = matrix.multiply(x_vertex)
z_vertex = matrix.multiply(z_vertex)
# Get axis vectors (relative to offset vertex)
x_axis = x_vertex - offset
z_axis = z_vertex - offset
# Return new instance
vect_tuple = lambda v: (v.x, v.y, v.z)
return cls(
origin=vect_tuple(offset),
xDir=vect_tuple(x_axis),
normal=vect_tuple(z_axis),
) | 0.00307 |
def make_library(self, diffuse_yaml, catalog_yaml, binning_yaml):
""" Build up the library of all the components
Parameters
----------
diffuse_yaml : str
Name of the yaml file with the library of diffuse component definitions
catalog_yaml : str
Name of the yaml file width the library of catalog split definitions
binning_yaml : str
Name of the yaml file with the binning definitions
"""
ret_dict = {}
#catalog_dict = yaml.safe_load(open(catalog_yaml))
components_dict = Component.build_from_yamlfile(binning_yaml)
diffuse_ret_dict = make_diffuse_comp_info_dict(GalpropMapManager=self._gmm,
DiffuseModelManager=self._dmm,
library=diffuse_yaml,
components=components_dict)
catalog_ret_dict = make_catalog_comp_dict(library=catalog_yaml,
CatalogSourceManager=self._csm)
ret_dict.update(diffuse_ret_dict['comp_info_dict'])
ret_dict.update(catalog_ret_dict['comp_info_dict'])
self._library.update(ret_dict)
return ret_dict | 0.006928 |
def other_object_webhook_handler(event):
"""Handle updates to transfer, charge, invoice, invoiceitem, plan, product and source objects.
Docs for:
- charge: https://stripe.com/docs/api#charges
- coupon: https://stripe.com/docs/api#coupons
- invoice: https://stripe.com/docs/api#invoices
- invoiceitem: https://stripe.com/docs/api#invoiceitems
- plan: https://stripe.com/docs/api#plans
- product: https://stripe.com/docs/api#products
- source: https://stripe.com/docs/api#sources
"""
if event.parts[:2] == ["charge", "dispute"]:
# Do not attempt to handle charge.dispute.* events.
# We do not have a Dispute model yet.
target_cls = models.Dispute
else:
target_cls = {
"charge": models.Charge,
"coupon": models.Coupon,
"invoice": models.Invoice,
"invoiceitem": models.InvoiceItem,
"plan": models.Plan,
"product": models.Product,
"transfer": models.Transfer,
"source": models.Source,
}.get(event.category)
_handle_crud_like_event(target_cls=target_cls, event=event) | 0.027695 |
def find_malformed_single_file_project(self): # type: () -> List[str]
"""
Take first non-setup.py python file. What a mess.
:return:
"""
files = [f for f in os.listdir(".") if os.path.isfile(f)]
candidates = []
# project misnamed & not in setup.py
for file in files:
if file.endswith("setup.py") or not file.endswith(".py"):
continue # duh
candidate = file.replace(".py", "")
if candidate != "setup":
candidates.append(candidate)
# return first
return candidates
# files with shebang
for file in files:
if file.endswith("setup.py"):
continue # duh
if "." not in file:
candidate = files
try:
firstline = self.file_opener.open_this(file, "r").readline()
if (
firstline.startswith("#")
and "python" in firstline
and candidate in self.setup_py_source()
):
candidates.append(candidate)
return candidates
except:
pass
# default.
return candidates | 0.003003 |
def _raw_open(self, flags, mode=0o777):
"""
Open the file pointed by this path and return a file descriptor,
as os.open() does.
"""
if self._closed:
self._raise_closed()
return self._accessor.open(self, flags, mode) | 0.007273 |
def load_or_create_config(self, filename, config=None):
"""Loads a config from disk. Defaults to a random config if none is specified"""
os.makedirs(os.path.dirname(os.path.expanduser(filename)), exist_ok=True)
if os.path.exists(filename):
return self.load(filename)
if(config == None):
config = self.random_config()
self.save(filename, config)
return config | 0.011547 |
def create(cls, api_context, public_key_string):
"""
:type api_context: bunq.sdk.context.ApiContext
:type public_key_string: str
:rtype: client.BunqResponse[Installation]
"""
api_client = client.ApiClient(api_context)
body_bytes = cls.generate_request_body_bytes(
public_key_string
)
response_raw = api_client.post(cls._ENDPOINT_URL_POST, body_bytes, {})
return cls._from_json_array_nested(response_raw) | 0.004016 |
def ssa(scatterer, h_pol=True):
"""Single-scattering albedo for the current setup, with polarization.
Args:
scatterer: a Scatterer instance.
h_pol: If True (default), use horizontal polarization.
If False, use vertical polarization.
Returns:
The single-scattering albedo.
"""
ext_xs = ext_xsect(scatterer, h_pol=h_pol)
return sca_xsect(scatterer, h_pol=h_pol)/ext_xs if ext_xs > 0.0 else 0.0 | 0.004405 |
def main(argv):
"""This function sets up a command-line option parser and then calls match_and_print
to do all of the real work.
"""
import argparse
description = 'Uses Open Tree of Life web services to try to find a taxon ID for each name supplied. ' \
'Using a --context-name=NAME to provide a limited taxonomic context and using the ' \
' --prohibit-fuzzy-matching option can make the matching faster. If there is only' \
'one match finds, then it also calls the equivalent of the ot-taxon-info.py and ot-taxon-subtree.py scripts.'
parser = argparse.ArgumentParser(prog='ot-tnrs-match-names', description=description)
parser.add_argument('names', nargs='+', help='name(s) for which we will try to find OTT IDs')
parser.add_argument('--context-name', default=None, type=str, required=False)
parser.add_argument('--include-dubious',
action='store_true',
default=False,
required=False,
help='return matches to taxa that are not included the synthetic tree because their taxonomic status is doubtful')
parser.add_argument('--subtree',
action='store_true',
default=False,
required=False,
help='print the newick representation of the taxonomic subtree if there is only one matching OTT ID')
parser.add_argument('--include-deprecated', action='store_true', default=False, required=False)
parser.add_argument('--prohibit-fuzzy-matching', action='store_true', default=False, required=False)
args = parser.parse_args(argv)
# The service takes do_approximate_matching
# We use the opposite to make the command-line just include positive directives
# (as opposed to requiring --do-approximate-matching=False) so we use "not"
do_approximate_matching = not args.prohibit_fuzzy_matching
name_list = args.names
if len(name_list) == 0:
name_list = ["Homo sapiens", "Gorilla gorilla"]
sys.stderr.write('Running a demonstration query with {}\n'.format(name_list))
else:
for name in name_list:
if name.startswith('-'):
parser.print_help()
match_and_print(name_list,
context_name=args.context_name,
do_approximate_matching=do_approximate_matching,
include_dubious=args.include_dubious,
include_deprecated=args.include_deprecated,
include_subtree=args.subtree,
output=sys.stdout) | 0.006006 |
def get_weights(self):
"""Returns a dictionary containing the weights of the network.
Returns:
Dictionary mapping variable names to their weights.
"""
self._check_sess()
return {
k: v.eval(session=self.sess)
for k, v in self.variables.items()
} | 0.006079 |
def blog_months(*args):
"""
Put a list of dates for blog posts into the template context.
"""
dates = BlogPost.objects.published().values_list("publish_date", flat=True)
date_dicts = [{"date": datetime(d.year, d.month, 1)} for d in dates]
month_dicts = []
for date_dict in date_dicts:
if date_dict not in month_dicts:
month_dicts.append(date_dict)
for i, date_dict in enumerate(month_dicts):
month_dicts[i]["post_count"] = date_dicts.count(date_dict)
return month_dicts | 0.001876 |
def create_scrolled_window(self, layout_manager, horizontal=Gtk.PolicyType.NEVER, vertical=Gtk.PolicyType.ALWAYS):
"""
Function creates a scrolled window with layout manager
"""
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.add(layout_manager)
scrolled_window.set_policy(horizontal, vertical)
return scrolled_window | 0.007895 |
def main():
"""
Print lines of input along with output.
"""
source_lines = (line.rstrip() for line in sys.stdin)
console = InteractiveInterpreter()
console.runsource('import turicreate')
source = ''
try:
while True:
source = source_lines.next()
more = console.runsource(source)
while more:
next_line = source_lines.next()
print '...', next_line
source += '\n' + next_line
more = console.runsource(source)
except StopIteration:
if more:
print '... '
more = console.runsource(source + '\n') | 0.001506 |
def delete(queue_id):
'''
Delete message(s) from the mail queue
CLI Example:
.. code-block:: bash
salt '*' postfix.delete 5C33CA0DEA
salt '*' postfix.delete ALL
'''
ret = {'message': '',
'result': True
}
if not queue_id:
log.error('Require argument queue_id')
if not queue_id == 'ALL':
queue = show_queue()
_message = None
for item in queue:
if item['queue_id'] == queue_id:
_message = item
if not _message:
ret['message'] = 'No message in queue with ID {0}'.format(queue_id)
ret['result'] = False
return ret
cmd = 'postsuper -d {0}'.format(queue_id)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] == 0:
if queue_id == 'ALL':
ret['message'] = 'Successfully removed all messages'
else:
ret['message'] = 'Successfully removed message with queue id {0}'.format(queue_id)
else:
if queue_id == 'ALL':
ret['message'] = 'Unable to removed all messages'
else:
ret['message'] = 'Unable to remove message with queue id {0}: {1}'.format(queue_id, result['stderr'])
return ret | 0.002379 |
def glyph_order(keys, draw_order=[]):
"""
Orders a set of glyph handles using regular sort and an explicit
sort order. The explicit draw order must take the form of a list
of glyph names while the keys should be glyph names with a custom
suffix. The draw order may only match subset of the keys and any
matched items will take precedence over other entries.
"""
keys = sorted(keys)
def order_fn(glyph):
matches = [item for item in draw_order if glyph.startswith(item)]
return ((draw_order.index(matches[0]), glyph) if matches else
(1e9+keys.index(glyph), glyph))
return sorted(keys, key=order_fn) | 0.002994 |
def search_normalize(self, results):
"""Append user id to search results to be able to initialize found
:class:`User` successfully
"""
for sshkey in results:
sshkey[u'user_id'] = self.user.id # pylint:disable=no-member
return super(SSHKey, self).search_normalize(results) | 0.006173 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.