text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def close(self):
"""Shutdown and free all resources."""
if self._controller is not None:
self._controller.quit()
self._controller = None
if self._process is not None:
self._process.close()
self._process = None | 0.020408 |
def calledBefore(self, spy): #pylint: disable=invalid-name
"""
Compares the order in which two spies were called
E.g.
spy_a()
spy_b()
spy_a.calledBefore(spy_b) # True
spy_b.calledBefore(spy_a) # False
spy_a()
spy_b.calledBefore(spy_a) # True
Args: a Spy to compare with
Return: Boolean True if this spy's first call was called before the given spy's last call
"""
this_call = self.firstCall if self.firstCall is not None else False
given_call = spy.lastCall if spy.lastCall is not None else False
return (this_call and not given_call) or (this_call and given_call and this_call.callId < given_call.callId) | 0.007989 |
def cmddict(self):
"""PrimitiveType base for the ComplexType"""
if self._cmddict is None:
self._cmddict = cmd.getDefaultDict()
return self._cmddict | 0.01087 |
def init_BEM_obj(self):
"""
Define BEM for each DOE type (read the fraction)
self.BEM # list of BEMDef objects
self.r_glaze # Glazing ratio for total building stock
self.SHGC # SHGC addition for total building stock
self.alb_wall # albedo wall addition for total building stock
"""
if not os.path.exists(self.readDOE_file_path):
raise Exception("readDOE.pkl file: '{}' does not exist.".format(readDOE_file_path))
readDOE_file = open(self.readDOE_file_path, 'rb') # open pickle file in binary form
refDOE = pickle.load(readDOE_file)
refBEM = pickle.load(readDOE_file)
refSchedule = pickle.load(readDOE_file)
readDOE_file.close()
# Define building energy models
k = 0
self.r_glaze_total = 0. # Glazing ratio for total building stock
self.SHGC_total = 0. # SHGC addition for total building stock
self.alb_wall_total = 0. # albedo wall addition for total building stock
h_floor = self.flr_h or 3.05 # average floor height
total_urban_bld_area = math.pow(self.charLength, 2)*self.bldDensity * \
self.bldHeight/h_floor # total building floor area
area_matrix = utilities.zeros(16, 3)
self.BEM = [] # list of BEMDef objects
self.Sch = [] # list of Schedule objects
for i in range(16): # 16 building types
for j in range(3): # 3 built eras
if self.bld[i][j] > 0.:
# Add to BEM list
self.BEM.append(refBEM[i][j][self.zone])
self.BEM[k].frac = self.bld[i][j]
self.BEM[k].fl_area = self.bld[i][j] * total_urban_bld_area
# Overwrite with optional parameters if provided
if self.glzR:
self.BEM[k].building.glazingRatio = self.glzR
if self.albRoof:
self.BEM[k].roof.albedo = self.albRoof
if self.vegRoof:
self.BEM[k].roof.vegCoverage = self.vegRoof
if self.SHGC:
self.BEM[k].building.shgc = self.SHGC
if self.albWall:
self.BEM[k].wall.albedo = self.albWall
if self.flr_h:
self.BEM[k].building.floorHeight = self.flr_h
# Keep track of total urban r_glaze, SHGC, and alb_wall for UCM model
self.r_glaze_total += self.BEM[k].frac * self.BEM[k].building.glazingRatio
self.SHGC_total += self.BEM[k].frac * self.BEM[k].building.shgc
self.alb_wall_total += self.BEM[k].frac * self.BEM[k].wall.albedo
# Add to schedule list
self.Sch.append(refSchedule[i][j][self.zone])
k += 1 | 0.003566 |
def _add_id_to_index(self, indexedField, pk, val, conn=None):
'''
_add_id_to_index - Adds an id to an index
internal
'''
if conn is None:
conn = self._get_connection()
conn.sadd(self._get_key_for_index(indexedField, val), pk) | 0.03719 |
def rows(self):
"""Return configuration in a form that can be used to reconstitute a
Metadata object. Returns all of the rows for a dataset.
This is distinct from get_config_value, which returns the value
for the library.
"""
from ambry.orm import Config as SAConfig
from sqlalchemy import or_
rows = []
configs = self.dataset.session\
.query(SAConfig)\
.filter(or_(SAConfig.group == 'config', SAConfig.group == 'process'),
SAConfig.d_vid == self.dataset.vid)\
.all()
for r in configs:
parts = r.key.split('.', 3)
if r.group == 'process':
parts = ['process'] + parts
cr = ((parts[0] if len(parts) > 0 else None,
parts[1] if len(parts) > 1 else None,
parts[2] if len(parts) > 2 else None
), r.value)
rows.append(cr)
return rows | 0.003003 |
def is_file(jottapath, JFS):
"""Check if a file exists on jottacloud"""
log.debug("is_file %r", jottapath)
try:
jf = JFS.getObject(jottapath)
except JFSNotFoundError:
return False
return isinstance(jf, JFSFile) | 0.004065 |
def get_encoding_name(self, encoding):
"""Given an encoding provided by the user, will return a
canonical encoding name; and also validate that the encoding
is supported.
TODO: Support encoding aliases: pc437 instead of cp437.
"""
encoding = CodePages.get_encoding_name(encoding)
if encoding not in self.codepages:
raise ValueError((
'Encoding "{}" cannot be used for the current profile. '
'Valid encodings are: {}'
).format(encoding, ','.join(self.codepages.keys())))
return encoding | 0.003279 |
def alignment_a(self):
"""Computes the rotation matrix that aligns the unit cell with the
Cartesian axes, starting with cell vector a.
* a parallel to x
* b in xy-plane with b_y positive
* c with c_z positive
"""
from molmod.transformations import Rotation
new_x = self.matrix[:, 0].copy()
new_x /= np.linalg.norm(new_x)
new_z = np.cross(new_x, self.matrix[:, 1])
new_z /= np.linalg.norm(new_z)
new_y = np.cross(new_z, new_x)
new_y /= np.linalg.norm(new_y)
return Rotation(np.array([new_x, new_y, new_z])) | 0.003175 |
def read_kv2(self, path, version=None, mount_path='secret'):
"""
Read some data from a key/value version 2 secret engine.
"""
params = {}
if version is not None:
params['version'] = version
read_path = '{}/data/{}'.format(mount_path, path)
return self.read(read_path, **params) | 0.00578 |
def redo(self):
"""Called when an image is set in the channel."""
image = self.channel.get_current_image()
if image is None:
return True
path = image.get('path', None)
if path is None:
self.fv.show_error(
"Cannot open image: no value for metadata key 'path'")
return
# TODO: How to properly reset GUI components?
# They are still showing info from prev FITS.
# No-op for ASDF
if path.endswith('asdf'):
return True
if path != self.img_path:
# <-- New file is being looked at
self.img_path = path
# close previous file opener, if any
if self.file_obj is not None:
try:
self.file_obj.close()
except Exception:
pass
self.file_obj = io_fits.get_fitsloader(logger=self.logger)
# TODO: specify 'readonly' somehow?
self.file_obj.open_file(path)
upper = len(self.file_obj) - 1
self.prep_hdu_menu(self.w.hdu, self.file_obj.hdu_info)
self.num_hdu = upper
self.logger.debug("there are %d hdus" % (upper + 1))
self.w.numhdu.set_text("%d" % (upper + 1))
self.w.hdu.set_enabled(len(self.file_obj) > 0)
name = image.get('name', iohelper.name_image_from_path(path))
idx = image.get('idx', None)
# remove index designation from root of name, if any
match = re.match(r'^(.+)\[(.+)\]$', name)
if match:
name = match.group(1)
self.name_pfx = name
htype = None
if idx is not None:
# set the HDU in the drop down if known
info = self.file_obj.hdu_db.get(idx, None)
if info is not None:
htype = info.htype.lower()
toc_ent = self._toc_fmt % info
self.w.hdu.show_text(toc_ent)
# rebuild the NAXIS controls, if necessary
# No two images in the same channel can have the same name.
# Here we keep track of the name to decide if we need to rebuild
if self.img_name != name:
self.img_name = name
dims = [0, 0]
data = image.get_data()
if data is None:
# <- empty data part to this HDU
self.logger.warning("Empty data part in HDU %s" % (str(idx)))
elif htype in ('bintablehdu', 'tablehdu',):
pass
elif htype not in ('imagehdu', 'primaryhdu', 'compimagehdu'):
self.logger.warning("HDU %s is not an image (%s)" % (
str(idx), htype))
else:
mddata = image.get_mddata()
if mddata is not None:
dims = list(mddata.shape)
dims.reverse()
self.build_naxis(dims, image) | 0.000675 |
def _unpack_actions(raw):
'''
deserialize 1 or more actions; return a list of
Action* objects
'''
actions = []
while len(raw) > 0:
atype, alen = struct.unpack('!HH', raw[:4])
atype = OpenflowActionType(atype)
action = _ActionClassMap.get(atype)()
action.from_bytes(raw[:alen])
raw = raw[alen:]
actions.append(action)
return actions | 0.002451 |
def _structure_unicode(self, obj, cl):
"""Just call ``cl`` with the given ``obj``"""
if not isinstance(obj, (bytes, unicode)):
return cl(str(obj))
else:
return obj | 0.009479 |
def get_config(context, **kw):
"""Fetch the config dict from the Bika Setup for the given portal_type
"""
# get the ID formatting config
config_map = api.get_bika_setup().getIDFormatting()
# allow portal_type override
portal_type = get_type_id(context, **kw)
# check if we have a config for the given portal_type
for config in config_map:
if config['portal_type'].lower() == portal_type.lower():
return config
# return a default config
default_config = {
'form': '%s-{seq}' % portal_type.lower(),
'sequence_type': 'generated',
'prefix': '%s' % portal_type.lower(),
}
return default_config | 0.001464 |
def _get_headers(self):
"""Get all the headers we're going to need:
1. Authorization
2. Content-Type
3. User-agent
Note that the User-agent string contains the library name, the
libary version, and the python version. This will help us track
what people are using, and where we should concentrate our
development efforts."""
user_agent = __api_lib_name__ + '/' + __version__ + '/' + \
PYTHON_VERSION
headers = {'User-Agent': user_agent,
'Content-Type': 'application/x-www-form-urlencoded'}
if self.key:
headers['Authorization'] = 'Bearer ' + self.key
return headers | 0.002825 |
def surface_normal(self, param):
"""Unit vector perpendicular to the detector surface at ``param``.
The orientation is chosen as follows:
- In 2D, the system ``(normal, tangent)`` should be
right-handed.
- In 3D, the system ``(tangent[0], tangent[1], normal)``
should be right-handed.
Here, ``tangent`` is the return value of `surface_deriv` at
``param``.
Parameters
----------
param : `array-like` or sequence
Parameter value(s) at which to evaluate. If ``ndim >= 2``,
a sequence of length `ndim` must be provided.
Returns
-------
normal : `numpy.ndarray`
Unit vector(s) perpendicular to the detector surface at
``param``.
If ``param`` is a single parameter, an array of shape
``(space_ndim,)`` representing a single vector is returned.
Otherwise the shape of the returned array is
- ``param.shape + (space_ndim,)`` if `ndim` is 1,
- ``param.shape[:-1] + (space_ndim,)`` otherwise.
"""
# Checking is done by `surface_deriv`
if self.ndim == 1 and self.space_ndim == 2:
return -perpendicular_vector(self.surface_deriv(param))
elif self.ndim == 2 and self.space_ndim == 3:
deriv = self.surface_deriv(param)
if deriv.ndim > 2:
# Vectorized, need to reshape (N, 2, 3) to (2, N, 3)
deriv = moveaxis(deriv, -2, 0)
normal = np.cross(*deriv, axis=-1)
normal /= np.linalg.norm(normal, axis=-1, keepdims=True)
return normal
else:
raise NotImplementedError(
'no default implementation of `surface_normal` available '
'for `ndim = {}` and `space_ndim = {}`'
''.format(self.ndim, self.space_ndim)) | 0.001034 |
def expect_handshake(self, headers):
"""Expect a handshake from the remote host.
:param headers:
Headers to respond with
:returns:
A future that resolves (with a value of None) when the handshake
is complete.
"""
init_req = yield self.reader.get()
if init_req.message_type != Types.INIT_REQ:
raise errors.UnexpectedError(
"You need to shake my hand first. Got %s" % repr(init_req)
)
self._extract_handshake_headers(init_req)
self._handshake_performed = True
self.writer.put(
messages.InitResponseMessage(
PROTOCOL_VERSION, headers, init_req.id),
)
# The receive loop is started only after the handshake has been
# completed.
self._loop()
raise tornado.gen.Return(init_req) | 0.002242 |
def SetHighlight( self, node, point=None, propagate=True ):
"""Set the currently-highlighted node"""
if node == self.highlightedNode:
return
self.highlightedNode = node
# TODO: restrict refresh to the squares for previous node and new node...
self.UpdateDrawing()
if node and propagate:
wx.PostEvent( self, SquareHighlightEvent( node=node, point=point, map=self ) ) | 0.022883 |
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 1918.
"""
private_10 = IPv4Network(u'10.0.0.0/8')
private_172 = IPv4Network(u'172.16.0.0/12')
private_192 = IPv4Network(u'192.168.0.0/16')
return (self in private_10 or
self in private_172 or
self in private_192) | 0.004396 |
def process_frames_argument(frames):
"""
Check and process 'frames' argument
into a proper iterable for an animation object
## Arguments
# frames
: a seed for an integer-type iterable that is used as a sequence of frame indices
- if integer or integer-valued float (e.g. 1.0):
The 'frames' is interpreted as the number of total frames
and the sequence frame indices becomes [ 0, 1, 2, ..., 'frames' - 1 ]
which is equivalent to range('frames').
- if array-like:
All elements in 'frames' should be integer or integer-valued float.
Then, the 'frames' itself is used as a sequence of frame indices.
"""
result = None
if np.iterable(frames):
try: frames_arr = np.array(frames)
except: raise TypeError("'frames' should be convertable to numpy.array")
for idx in range(len(frames_arr)):
frame_idx = frames_arr[idx]
assert is_real_number(frame_idx)
assert int(frame_idx) == frame_idx
frames_arr[idx] = int(frame_idx)
#self.frames = frames_arr
result = frames_arr
elif is_real_number(frames):
assert int(frames) == frames
frames = int(frames)
#self.frames = range(frames)
result = range(frames)
return result | 0.006084 |
def log_file(self, url=None):
"""
Write to a local log file
"""
if url is None:
url = self.url
f = re.sub("file://", "", url)
try:
with open(f, "a") as of:
of.write(str(self.store.get_json_tuples(True)))
except IOError as e:
print(e)
print("Could not write the content to the file..") | 0.00495 |
def construct_txt_file(self):
"""Construct the header of the txt file"""
textlines = ['Prediction of noncovalent interactions for PDB structure %s' % self.mol.pymol_name.upper(), ]
textlines.append("=" * len(textlines[0]))
textlines.append('Created on %s using PLIP v%s\n' % (time.strftime("%Y/%m/%d"), __version__))
textlines.append('If you are using PLIP in your work, please cite:')
textlines.append('Salentin,S. et al. PLIP: fully automated protein-ligand interaction profiler.')
textlines.append('Nucl. Acids Res. (1 July 2015) 43 (W1): W443-W447. doi: 10.1093/nar/gkv315\n')
if len(self.excluded) != 0:
textlines.append('Excluded molecules as ligands: %s\n' % ','.join([lig for lig in self.excluded]))
if config.DNARECEPTOR:
textlines.append('DNA/RNA in structure was chosen as the receptor part.\n')
return textlines | 0.00863 |
def _compile_literal(self, schema):
""" Compile literal schema: type and value matching """
# Prepare self
self.compiled_type = const.COMPILED_TYPE.LITERAL
self.name = get_literal_name(schema)
# Error partials
schema_type = type(schema)
err_type = self.Invalid(_(u'Wrong value type'), get_type_name(schema_type))
err_value = self.Invalid(_(u'Invalid value'), self.name)
# Matcher
if self.matcher:
def match_literal(v):
return type(v) == schema_type and v == schema, v
return match_literal
# Validator
def validate_literal(v):
# Type check
if type(v) != schema_type:
# expected=<type>, provided=<type>
raise err_type(get_type_name(type(v)))
# Equality check
if v != schema:
# expected=<value>, provided=<value>
raise err_value(get_literal_name(v))
# Fine
return v
return validate_literal | 0.003738 |
def get_system_encoding():
"""
The encoding of the default system locale but falls back to the given
fallback encoding if the encoding is unsupported by python or could
not be determined. See tickets #10335 and #5846
"""
try:
encoding = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(encoding)
except Exception:
encoding = 'ascii'
return encoding | 0.002439 |
def erase_screen(self):
"""
Erase output screen.
"""
self.vt100_output.erase_screen()
self.vt100_output.cursor_goto(0, 0)
self.vt100_output.flush() | 0.010256 |
def str_if_nested_or_str(s):
"""Turn input into a native string if possible."""
if isinstance(s, ALL_STRING_TYPES):
return str(s)
if isinstance(s, (list, tuple)):
return type(s)(map(str_if_nested_or_str, s))
if isinstance(s, (dict, )):
return stringify_dict_contents(s)
return s | 0.003106 |
async def show_help(self):
"""shows this message"""
e = discord.Embed()
messages = ['Welcome to the interactive paginator!\n']
messages.append('This interactively allows you to see pages of text by navigating with ' \
'reactions. They are as follows:\n')
for (emoji, func) in self.reaction_emojis:
messages.append('%s %s' % (emoji, func.__doc__))
e.description = '\n'.join(messages)
e.colour = 0x738bd7 # blurple
e.set_footer(text='We were on page %s before this message.' % self.current_page)
await self.bot.edit_message(self.message, embed=e)
async def go_back_to_current_page():
await asyncio.sleep(60.0)
await self.show_current_page()
self.bot.loop.create_task(go_back_to_current_page()) | 0.008294 |
def r_annotations(self):
""" Route to retrieve annotations by target
:param target_urn: The CTS URN for which to retrieve annotations
:type target_urn: str
:return: a JSON string containing count and list of resources
:rtype: {str: Any}
"""
target = request.args.get("target", None)
wildcard = request.args.get("wildcard", ".", type=str)
include = request.args.get("include")
exclude = request.args.get("exclude")
limit = request.args.get("limit", None, type=int)
start = request.args.get("start", 1, type=int)
expand = request.args.get("expand", False, type=bool)
if target:
try:
urn = MyCapytain.common.reference.URN(target)
except ValueError:
return "invalid urn", 400
count, annotations = self.__queryinterface__.getAnnotations(urn, wildcard=wildcard, include=include,
exclude=exclude, limit=limit, start=start,
expand=expand)
else:
# Note that this implementation is not done for too much annotations
# because we do not implement pagination here
count, annotations = self.__queryinterface__.getAnnotations(None, limit=limit, start=start, expand=expand)
mapped = []
response = {
"@context": type(self).JSONLD_CONTEXT,
"id": url_for(".r_annotations", start=start, limit=limit),
"type": "AnnotationCollection",
"startIndex": start,
"items": [
],
"total": count
}
for a in annotations:
mapped.append({
"id": url_for(".r_annotation", sha=a.sha),
"body": url_for(".r_annotation_body", sha=a.sha),
"type": "Annotation",
"target": a.target.to_json(),
"dc:type": a.type_uri,
"owl:sameAs": [a.uri],
"nemo:slug": a.slug
})
response["items"] = mapped
response = jsonify(response)
return response | 0.003568 |
def direct_dispatch(self, arg, callback):
"""Directly dispatch a work item.
This method MUST only be called from inside of another work item and
will synchronously invoke the work item as if it was passed to
dispatch(). Calling this method from any other thread has undefined
consequences since it will be unsynchronized with respect to items
dispatched from inside the background work queue itself.
"""
try:
self._current_callbacks.appendleft(callback)
exc_info = None
retval = None
retval = self._routine(arg)
except: # pylint:disable=bare-except;We need to capture the exception and feed it back to the caller
exc_info = sys.exc_info()
finally:
self._current_callbacks.popleft()
if callback is not None and retval is not self.STILL_PENDING:
callback(exc_info, retval)
return retval, exc_info | 0.004073 |
def _send_request(self, path, data, method):
"""
Uses the HTTP transport to query the Route53 API. Runs the response
through lxml's parser, before we hand it off for further picking
apart by our call-specific parsers.
:param str path: The RESTful path to tack on to the :py:attr:`endpoint`.
:param data: The params to send along with the request.
:type data: Either a dict or bytes, depending on the request type.
:param str method: One of 'GET', 'POST', or 'DELETE'.
:rtype: lxml.etree._Element
:returns: An lxml Element root.
"""
response_body = self._transport.send_request(path, data, method)
root = etree.fromstring(response_body)
#print(prettyprint_xml(root))
return root | 0.005006 |
def use_unsequestered_assessment_part_view(self):
"""Pass through to provider AssessmentPartLookupSession.use_unsequestered_assessment_part_view"""
# Does this need to be re-implemented to match the other non-sub-package view setters?
self._containable_views['assessment_part'] = UNSEQUESTERED
self._get_sub_package_provider_session('assessment_authoring',
'assessment_part_lookup_session')
for session in self._provider_sessions:
for provider_session_name, provider_session in self._provider_sessions[session].items():
try:
provider_session.use_unsequestered_assessment_part_view()
except AttributeError:
pass | 0.007682 |
def update_refund(self, refund_id, refund_deets):
"""Updates an existing refund transaction."""
request = self._put('transactions/refunds/' + str(refund_id), refund_deets)
return self.responder(request) | 0.013274 |
def get_diff(left, right):
"""Get the difference of two summaries.
Subtracts the values of the right summary from the values of the left
summary.
If similar rows appear on both sides, the are included in the summary with
0 for number of elements and total size.
If the number of elements of a row of the diff is 0, but the total size is
not, it means that objects likely have changed, but not there number, thus
resulting in a changed size.
"""
res = []
for row_r in right:
found = False
for row_l in left:
if row_r[0] == row_l[0]:
res.append([row_r[0], row_r[1] - row_l[1], row_r[2] - row_l[2]])
found = True
if not found:
res.append(row_r)
for row_l in left:
found = False
for row_r in right:
if row_l[0] == row_r[0]:
found = True
if not found:
res.append([row_l[0], -row_l[1], -row_l[2]])
return res | 0.001998 |
def getShocks(self):
'''
Gets permanent and transitory shocks (combining idiosyncratic and aggregate shocks), but
only consumers who update their macroeconomic beliefs this period incorporate all pre-
viously unnoticed aggregate permanent shocks. Agents correctly observe the level of all
real variables (market resources, consumption, assets, etc), but misperceive the aggregate
productivity level.
Parameters
----------
None
Returns
-------
None
'''
# The strange syntax here is so that both StickyEconsumerType and StickyEmarkovConsumerType
# run the getShocks method of their first superclass: AggShockConsumerType and
# AggShockMarkovConsumerType respectively. This will be simplified in Python 3.
super(self.__class__,self).getShocks() # Get permanent and transitory combined shocks
newborns = self.t_age == 0
self.TranShkNow[newborns] = self.TranShkAggNow*self.wRteNow # Turn off idiosyncratic shocks for newborns
self.PermShkNow[newborns] = self.PermShkAggNow
self.getUpdaters() # Randomly draw which agents will update their beliefs
# Calculate innovation to the productivity level perception error
pLvlErrNew = self.getpLvlError()
self.pLvlErrNow *= pLvlErrNew # Perception error accumulation
# Calculate (mis)perceptions of the permanent shock
PermShkPcvd = self.PermShkNow/pLvlErrNew
PermShkPcvd[self.update] *= self.pLvlErrNow[self.update] # Updaters see the true permanent shock and all missed news
self.pLvlErrNow[self.update] = 1.0
self.PermShkNow = PermShkPcvd | 0.011079 |
def filter(self, func):
"""Create a Catalog of a subset of entries based on a condition
Note that, whatever specific class this is performed on, the return
instance is a Catalog. The entries are passed unmodified, so they
will still reference the original catalog instance and include its
details such as directory,.
Parameters
----------
func : function
This should take a CatalogEntry and return True or False. Those
items returning True will be included in the new Catalog, with the
same entry names
Returns
-------
New Catalog
"""
return Catalog.from_dict({key: entry for key, entry in self.items()
if func(entry)}) | 0.002513 |
def _ConvertFloat(value):
"""Convert an floating point number."""
if value == 'nan':
raise ParseError('Couldn\'t parse float "nan", use "NaN" instead.')
try:
# Assume Python compatible syntax.
return float(value)
except ValueError:
# Check alternative spellings.
if value == _NEG_INFINITY:
return float('-inf')
elif value == _INFINITY:
return float('inf')
elif value == _NAN:
return float('nan')
else:
raise ParseError('Couldn\'t parse float: {0}.'.format(value)) | 0.017078 |
def run(self, progress=True, verbose=False):
"""Compute all steps of the simulation. Be careful: if tmax is not set,
this function will result in an infinit loop.
Returns
-------
(t, fields):
last time and result fields.
"""
total_iter = int((self.tmax // self.user_dt) if self.tmax else None)
log = logging.info if verbose else logging.debug
if progress:
with tqdm(initial=(self.i if self.i < total_iter else total_iter),
total=total_iter) as pbar:
for t, fields in self:
pbar.update(1)
log("%s running: t: %g" % (self.id, t))
try:
return t, fields
except UnboundLocalError:
warnings.warn("Simulation already ended")
for t, fields in self:
log("%s running: t: %g" % (self.id, t))
try:
return t, fields
except UnboundLocalError:
warnings.warn("Simulation already ended") | 0.001855 |
def get_observations(self):
"""
Parses the HTML table into a list of dictionaries, each of which
represents a single observation.
"""
if self.empty:
return []
rows = list(self.tbody)
observations = []
for row_observation, row_details in zip(rows[::2], rows[1::2]):
data = {}
cells = OBSERVATION_XPATH(row_observation)
data['name'] = _clean_cell(cells[0])
data['date'] = _clean_cell(cells[1])
data['magnitude'] = _clean_cell(cells[3])
data['obscode'] = _clean_cell(cells[6])
cells = DETAILS_XPATH(row_details)
data['comp1'] = _clean_cell(cells[0])
data['chart'] = _clean_cell(cells[3]).replace('None', '')
data['comment_code'] = _clean_cell(cells[4])
data['notes'] = _clean_cell(cells[5])
observations.append(data)
return observations | 0.002081 |
def add_send_message(self, connection, send_message):
"""Adds a send_message function to the Dispatcher's
dictionary of functions indexed by connection.
Args:
connection (str): A locally unique identifier
provided by the receiver of messages.
send_message (fn): The method that should be called
by the dispatcher to respond to messages which
arrive via connection.
"""
self._send_message[connection] = send_message
LOGGER.debug("Added send_message function "
"for connection %s", connection) | 0.00316 |
def build(self, input_shape=None):
"""Build `Layer`."""
input_shape = tf.TensorShape(input_shape).as_list()
self.input_spec = layers().InputSpec(shape=input_shape)
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = False
if not hasattr(self.layer, "kernel"):
raise ValueError("`WeightNorm` must wrap a layer that"
" contains a `kernel` for weights")
# The kernel's filter or unit dimension is -1
self.layer_depth = int(self.layer.kernel.shape[-1])
self.norm_axes = list(range(self.layer.kernel.shape.ndims - 1))
self.layer.v = self.layer.kernel
self.layer.g = self.layer.add_variable(
name="g",
shape=(self.layer_depth,),
initializer=tf.ones_initializer,
dtype=self.layer.kernel.dtype,
trainable=True)
# with ops.control_dependencies([self.layer.g.assign(
# self._init_norm(self.layer.v))]):
# self._compute_weights()
self._compute_weights()
self.layer.built = True
super(WeightNorm, self).build()
self.built = True | 0.012324 |
def write(self):
""" Encrypts and writes the current state back onto the filesystem """
with open(self.filepath, 'wb') as outfile:
outfile.write(
self.fernet.encrypt(
yaml.dump(self.data, encoding='utf-8'))) | 0.00738 |
def send_persisted_messages(self, websocket):
"""
This method is called immediately after a websocket is openend by the client, so that
persisted messages can be sent back to the client upon connection.
"""
for channel in self._subscription.channels:
message = self._connection.get(channel)
if message:
websocket.send(message) | 0.007389 |
def get_randomized_guid_sample(self, item_count):
""" Fetch a subset of randomzied GUIDs from the whitelist """
dataset = self.get_whitelist()
random.shuffle(dataset)
return dataset[:item_count] | 0.00885 |
def _visible(self, element):
"""Used to filter text elements that have invisible text on the page.
"""
if element.name in self._disallowed_names:
return False
elif re.match(u'<!--.*-->', six.text_type(element.extract())):
return False
return True | 0.006452 |
def publish(self, message, exchange):
"""
Publish a :class:`fedora_messaging.message.Message` to an `exchange`_
on the message broker.
Args:
message (message.Message): The message to publish.
exchange (str): The name of the AMQP exchange to publish to
Raises:
NoFreeChannels: If there are no available channels on this connection.
If this occurs, you can either reduce the number of consumers on this
connection or create an additional connection.
PublishReturned: If the broker rejected the message. This can happen if
there are resource limits that have been reached (full disk, for example)
or if the message will be routed to 0 queues and the exchange is set to
reject such messages.
.. _exchange: https://www.rabbitmq.com/tutorials/amqp-concepts.html#exchanges
"""
message.validate()
try:
yield self._channel.basic_publish(
exchange=exchange,
routing_key=message._encoded_routing_key,
body=message._encoded_body,
properties=message._properties,
)
except (pika.exceptions.NackError, pika.exceptions.UnroutableError) as e:
_std_log.error("Message was rejected by the broker (%s)", str(e))
raise PublishReturned(reason=e)
except pika.exceptions.ChannelClosed:
self._channel = yield self._allocate_channel()
yield self.publish(message, exchange)
except pika.exceptions.ConnectionClosed as e:
raise ConnectionException(reason=e) | 0.005285 |
def get_article_placeholders(self, article):
"""
In the project settings set up the variable
CMS_ARTICLES_PLACEHOLDERS_SEARCH_LIST = {
'include': [ 'slot1', 'slot2', etc. ],
'exclude': [ 'slot3', 'slot4', etc. ],
}
or leave it empty
CMS_ARTICLES_PLACEHOLDERS_SEARCH_LIST = {}
"""
placeholders_search_list = getattr(settings, 'CMS_ARTICLES_PLACEHOLDERS_SEARCH_LIST', {})
included = placeholders_search_list.get('include', [])
excluded = placeholders_search_list.get('exclude', [])
diff = set(included) - set(excluded)
if diff:
return article.placeholders.filter(slot__in=diff)
elif excluded:
return article.placeholders.exclude(slot__in=excluded)
else:
return article.placeholders.all() | 0.003484 |
def get_version():
"""Returns version number, without module import (which can lead to ImportError
if some dependencies are unavailable before install."""
contents = read_file(os.path.join('admirarchy', '__init__.py'))
version = re.search('VERSION = \(([^)]+)\)', contents)
version = version.group(1).replace(', ', '.').strip()
return version | 0.010929 |
def parallel_periodicvar_recovery(simbasedir,
period_tolerance=1.0e-3,
liststartind=None,
listmaxobjects=None,
nworkers=None):
'''This is a parallel driver for `periodicvar_recovery`.
Parameters
----------
simbasedir : str
The base directory where all of the fake LCs and period-finding results
are.
period_tolerance : float
The maximum difference that this function will consider between an
actual period (or its aliases) and a recovered period to consider it as
as a 'recovered' period.
liststartindex : int
The starting index of processing. This refers to the filename list
generated by running `glob.glob` on the period-finding result pickles in
`simbasedir/periodfinding`.
listmaxobjects : int
The maximum number of objects to process in this run. Use this with
`liststartindex` to effectively distribute working on a large list of
input period-finding result pickles over several sessions or machines.
nperiodworkers : int
This is the number of parallel period-finding worker processes to use.
Returns
-------
str
Returns the filename of the pickle produced containing all of the period
recovery results.
'''
# figure out the periodfinding pickles directory
pfpkldir = os.path.join(simbasedir,'periodfinding')
if not os.path.exists(pfpkldir):
LOGERROR('no "periodfinding" subdirectory in %s, can\'t continue' %
simbasedir)
return None
# find all the periodfinding pickles
pfpkl_list = glob.glob(os.path.join(pfpkldir,'*periodfinding*pkl*'))
if len(pfpkl_list) > 0:
if liststartind:
pfpkl_list = pfpkl_list[liststartind:]
if listmaxobjects:
pfpkl_list = pfpkl_list[:listmaxobjects]
tasks = [(x, simbasedir, period_tolerance) for x in pfpkl_list]
pool = mp.Pool(nworkers)
results = pool.map(periodrec_worker, tasks)
pool.close()
pool.join()
resdict = {x['objectid']:x for x in results if x is not None}
actual_periodicvars = np.array(
[x['objectid'] for x in results
if (x is not None and x['actual_vartype'] in PERIODIC_VARTYPES)],
dtype=np.unicode_
)
recovered_periodicvars = np.array(
[x['objectid'] for x in results
if (x is not None and 'actual' in x['best_recovered_status'])],
dtype=np.unicode_
)
alias_twice_periodicvars = np.array(
[x['objectid'] for x in results
if (x is not None and 'twice' in x['best_recovered_status'])],
dtype=np.unicode_
)
alias_half_periodicvars = np.array(
[x['objectid'] for x in results
if (x is not None and 'half' in x['best_recovered_status'])],
dtype=np.unicode_
)
all_objectids = [x['objectid'] for x in results]
outdict = {'simbasedir':os.path.abspath(simbasedir),
'objectids':all_objectids,
'period_tolerance':period_tolerance,
'actual_periodicvars':actual_periodicvars,
'recovered_periodicvars':recovered_periodicvars,
'alias_twice_periodicvars':alias_twice_periodicvars,
'alias_half_periodicvars':alias_half_periodicvars,
'details':resdict}
outfile = os.path.join(simbasedir,'periodicvar-recovery.pkl')
with open(outfile, 'wb') as outfd:
pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)
return outdict
else:
LOGERROR(
'no periodfinding result pickles found in %s, can\'t continue' %
pfpkldir
)
return None | 0.003774 |
def printc(*strings, **keys):
"""
Print to terminal in colors. (python3 only).
Available colors are:
black, red, green, yellow, blue, magenta, cyan, white.
:param c: foreground color ['']
:param bc: background color ['']
:param hidden: do not show text [False]
:param bold: boldface [True]
:param blink: blinking text [False]
:param underline: underline text [False]
:param dim: make text look dimmer [False]
:param invert: invert background anf forward colors [False]
:param box: print a box with specified text character ['']
:param flush: flush buffer after printing [True]
:param end: end character to be printed [return]
:Example:
.. code-block:: python
from vtkplotter.colors import printc
printc('anything', c='red', bold=False, end='' )
printc('anything', 455.5, vtkObject, c='green')
printc(299792.48, c=4) # 4 is blue
.. hint:: |colorprint.py|_
|colorprint|
"""
end = keys.pop("end", "\n")
flush = keys.pop("flush", True)
if not _terminal_has_colors or sys.version_info[0]<3:
for s in strings:
if "~" in str(s): # "in" for some reasons changes s
for k in emoji.keys():
if k in s:
s = s.replace(k, '')
print(s, end=' ')
print(end=end)
if flush:
sys.stdout.flush()
return
c = keys.pop("c", None) # hack to be compatible with python2
bc = keys.pop("bc", None)
hidden = keys.pop("hidden", False)
bold = keys.pop("bold", True)
blink = keys.pop("blink", False)
underline = keys.pop("underline", False)
dim = keys.pop("dim", False)
invert = keys.pop("invert", False)
box = keys.pop("box", "")
if c is True:
c = 'green'
elif c is False:
c = 'red'
try:
txt = str()
ns = len(strings) - 1
separator = " "
offset = 0
for i, s in enumerate(strings):
if i == ns:
separator = ""
#txt += str(s) + separator
if "~" in str(s): # "in" for some reasons changes s
for k in emoji.keys():
if k in str(s):
s = s.replace(k, emoji[k])
offset += 1
txt += str(s) + separator
if c:
if isinstance(c, int):
cf = abs(c) % 8
elif isinstance(c, str):
cf = _terminal_cols[c.lower()]
else:
print("Error in printc(): unknown color c=", c)
exit()
if bc:
if isinstance(bc, int):
cb = abs(bc) % 8
elif isinstance(bc, str):
cb = _terminal_cols[bc.lower()]
else:
print("Error in printc(): unknown color c=", c)
exit()
special, cseq = "", ""
if hidden:
special += "\x1b[8m"
box = ""
else:
if c:
cseq += "\x1b[" + str(30 + cf) + "m"
if bc:
cseq += "\x1b[" + str(40 + cb) + "m"
if underline and not box:
special += "\x1b[4m"
if dim:
special += "\x1b[2m"
if invert:
special += "\x1b[7m"
if bold:
special += "\x1b[1m"
if blink:
special += "\x1b[5m"
if box and not ("\n" in txt):
if len(box) > 1:
box = box[0]
if box in ["_", "=", "-", "+", "~"]:
boxv = "|"
else:
boxv = box
if box == "_" or box == ".":
outtxt = special + cseq + " " + box * (len(txt) + offset + 2) + " \n"
outtxt += boxv + " " * (len(txt) + 2) + boxv + "\n"
else:
outtxt = special + cseq + box * (len(txt) + offset + 4) + "\n"
outtxt += boxv + " " + txt + " " + boxv + "\n"
if box == "_":
outtxt += "|" + box * (len(txt) + offset + 2) + "|" + "\x1b[0m" + end
else:
outtxt += box * (len(txt) + offset + 4) + "\x1b[0m" + end
sys.stdout.write(outtxt)
else:
sys.stdout.write(special + cseq + txt + "\x1b[0m" + end)
except:
print(*strings, end=end)
if flush:
sys.stdout.flush() | 0.002421 |
def _version_find_existing():
"""Returns set of existing versions in this repository. This
information is backed by previously used version tags in git.
Available tags are pulled from origin repository before.
:return:
available versions
:rtype:
set
"""
_tool_run('git fetch origin -t')
git_tags = [x for x in (y.strip() for y in (_tool_run('git tag -l')
.stdout.split('\n'))) if x]
return {tuple(int(n) if n else 0 for n in m.groups())
for m in (_version_re.match(t) for t in git_tags) if m} | 0.00165 |
def is_ecma_regex(regex):
"""Check if given regex is of type ECMA 262 or not.
:rtype: bool
"""
parts = regex.split('/')
if len(parts) == 1:
return False
if len(parts) < 3:
raise ValueError('Given regex isn\'t ECMA regex nor Python regex.')
parts.pop()
parts.append('')
raw_regex = '/'.join(parts)
if raw_regex.startswith('/') and raw_regex.endswith('/'):
return True
return False | 0.002212 |
def keys(self, index=None):
"""Returns a list of keys in the database
"""
if index is not None and index not in self._indexes:
raise ValueError('Index {} does not exist'.format(index))
db = self._indexes[index][0] if index else self._main_db
with self._lmdb.begin(db=db) as txn:
return [
key.decode()
for key in txn.cursor().iternext(keys=True, values=False)
] | 0.004264 |
def _load_plain_yaml(cls, _yaml: YamlDocument) -> Any:
"""
Will just load the yaml without executing any extensions. You will get the plain dictionary
without augmentation. It is equivalent to just perform `yaml.safe_load`. Besides that you
can specify a stream, a file or just a string that contains yaml/json data.
Examples:
>>> jstr = '{"a":1, "b": {"c": 3, "d": "d"}}'
>>> d = DictMentor._load_plain_yaml(jstr)
>>> d['a'], d['b']['c'], d['b']['d']
(1, 3, 'd')
Args:
yaml_: Whether a stream (e.g. file pointer), a file name of an existing file or string
containing yaml/json data.
Returns:
Returns the yaml_ data as a python dictionary.
"""
if Validator.is_stream(yaml_=_yaml):
return yaml.safe_load(_yaml)
if Validator.is_file(yaml_=_yaml):
with open(_yaml) as fhandle: # type: ignore
return yaml.safe_load(fhandle)
if Validator.instance_of(target_type=str, yaml_=_yaml):
return yaml.safe_load(_yaml)
raise TypeError("Argument '_yaml' is whether a stream, nor a file, nor a string") | 0.005705 |
def parse_metadata(metadata_obj: Metadata, metadata_dictionary: dict) -> None:
""" Adds to a Metadata object any DublinCore or dts:Extensions object
found in the given dictionary
:param metadata_obj:
:param metadata_dictionary:
"""
for key, value_set in metadata_dictionary.get("https://w3id.org/dts/api#dublincore", [{}])[0].items():
term = URIRef(key)
for value_dict in value_set:
metadata_obj.add(term, *dict_to_literal(value_dict))
for key, value_set in metadata_dictionary.get("https://w3id.org/dts/api#extensions", [{}])[0].items():
term = URIRef(key)
for value_dict in value_set:
metadata_obj.add(term, *dict_to_literal(value_dict)) | 0.004138 |
def save_as_header(self, response, **kwargs):
"""
* if save_as is False the header will not be added
* if save_as is a filename, it will be used in the header
* if save_as is True or None the filename will be determined from the
file path
"""
save_as = kwargs.get('save_as', None)
if save_as is False:
return
file_obj = kwargs.get('file_obj', None)
if save_as is True or save_as is None:
filename = os.path.basename(file_obj.path)
else:
filename = save_as
response['Content-Disposition'] = smart_str(
'attachment; filename=%s' % filename) | 0.002924 |
def manhattan(h1, h2): # # 7 us @array, 31 us @list \w 100 bins
r"""
Equal to Minowski distance with :math:`p=1`.
See also
--------
minowski
"""
h1, h2 = __prepare_histogram(h1, h2)
return scipy.sum(scipy.absolute(h1 - h2)) | 0.011538 |
def check_df(self, df):
"""
Verifies that df is a pandas DataFrame instance and
that its index and column values are unique.
"""
if isinstance(df, pd.DataFrame):
if not df.index.is_unique:
repeats = df.index[df.index.duplicated()].values
msg = "Index values must be unique but aren't. The following entries appear more than once: {}".format(repeats)
self.logger.error(msg)
raise Exception("GCToo GCToo.check_df " + msg)
if not df.columns.is_unique:
repeats = df.columns[df.columns.duplicated()].values
msg = "Columns values must be unique but aren't. The following entries appear more than once: {}".format(repeats)
raise Exception("GCToo GCToo.check_df " + msg)
else:
return True
else:
msg = "expected Pandas DataFrame, got something else: {} of type: {}".format(df, type(df))
self.logger.error(msg)
raise Exception("GCToo GCToo.check_df " + msg) | 0.004554 |
async def _read_next(self):
"""Read next row """
row = await self._result._read_rowdata_packet_unbuffered()
row = self._conv_row(row)
return row | 0.011364 |
def set_metric(self, slug, value, category=None, expire=None, date=None):
"""Assigns a specific value to the *current* metric. You can use this
to start a metric at a value greater than 0 or to reset a metric.
The given slug will be used to generate Redis keys at the following
granularities: Seconds, Minutes, Hours, Day, Week, Month, and Year.
Parameters:
* ``slug`` -- a unique value to identify the metric; used in
construction of redis keys (see below).
* ``value`` -- The value of the metric.
* ``category`` -- (optional) Assign the metric to a Category (a string)
* ``expire`` -- (optional) Specify the number of seconds in which the
metric will expire.
* ``date`` -- (optional) Specify the timestamp for the metric; default
used to build the keys will be the current date and time in UTC form.
Redis keys for each metric (slug) take the form:
m:<slug>:s:<yyyy-mm-dd-hh-mm-ss> # Second
m:<slug>:i:<yyyy-mm-dd-hh-mm> # Minute
m:<slug>:h:<yyyy-mm-dd-hh> # Hour
m:<slug>:<yyyy-mm-dd> # Day
m:<slug>:w:<yyyy-num> # Week (year - week number)
m:<slug>:m:<yyyy-mm> # Month
m:<slug>:y:<yyyy> # Year
"""
keys = self._build_keys(slug, date=date)
# Add the slug to the set of metric slugs
self.r.sadd(self._metric_slugs_key, slug)
# Construct a dictionary of key/values for use with mset
data = {}
for k in keys:
data[k] = value
self.r.mset(data)
# Add the category if applicable.
if category:
self._categorize(slug, category)
# Expire the Metric in ``expire`` seconds if applicable.
if expire:
for k in keys:
self.r.expire(k, expire) | 0.001027 |
def shadow_normal_module(cls, mod_name=None):
"""
Shadow a module with an instance of LazyModule
:param mod_name:
Name of the module to shadow. By default this is the module that is
making the call into this method. This is not hard-coded as that
module might be called '__main__' if it is executed via 'python -m'
:returns:
A fresh instance of :class:`LazyModule`.
"""
if mod_name is None:
frame = inspect.currentframe()
try:
mod_name = frame.f_back.f_locals['__name__']
finally:
del frame
orig_mod = sys.modules[mod_name]
lazy_mod = cls(orig_mod.__name__, orig_mod.__doc__, orig_mod)
for attr in dir(orig_mod):
setattr(lazy_mod, attr, getattr(orig_mod, attr))
sys.modules[mod_name] = lazy_mod
return lazy_mod | 0.002157 |
def isomap(geom, n_components=8, eigen_solver='auto',
random_state=None, path_method='auto',
distance_matrix=None, graph_distance_matrix = None,
centered_matrix=None, solver_kwds=None):
"""
Parameters
----------
geom : a Geometry object from megaman.geometry.geometry
n_components : integer, optional
The dimension of the projection subspace.
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
By default, arpack is used.
path_method : string, method for computing graph shortest path. One of :
'auto', 'D', 'FW', 'BF', 'J'. See scipy.sparse.csgraph.shortest_path
for more information.
distance_matrix : sparse Ndarray (n_obs, n_obs), optional. Pairwise distance matrix
sparse zeros considered 'infinite'.
graph_distance_matrix : Ndarray (n_obs, n_obs), optional. Pairwise graph distance
matrix. Output of graph_shortest_path.
centered_matrix : Ndarray (n_obs, n_obs), optional. Centered version of
graph_distance_matrix
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
embedding : array, shape=(n_samples, n_components)
The reduced samples.
Notes
-----
"""
# Step 1: use geometry to calculate the distance matrix
if ((distance_matrix is None) and (centered_matrix is None)):
if geom.adjacency_matrix is None:
distance_matrix = geom.compute_adjacency_matrix()
else:
distance_matrix = geom.adjacency_matrix
# Step 2: use graph_shortest_path to construct D_G
## WARNING: D_G is an (NxN) DENSE matrix!!
if ((graph_distance_matrix is None) and (centered_matrix is None)):
graph_distance_matrix = graph_shortest_path(distance_matrix,
method=path_method,
directed=False)
# Step 3: center graph distance matrix
if centered_matrix is None:
centered_matrix = center_matrix(graph_distance_matrix)
# Step 4: compute d largest eigenvectors/values of centered_matrix
lambdas, diffusion_map = eigen_decomposition(centered_matrix, n_components,
largest=True,
eigen_solver=eigen_solver,
random_state=random_state,
solver_kwds=solver_kwds)
# Step 5:
# return Y = [sqrt(lambda_1)*V_1, ..., sqrt(lambda_d)*V_d]
ind = np.argsort(lambdas); ind = ind[::-1] # sort largest
lambdas = lambdas[ind];
diffusion_map = diffusion_map[:, ind]
embedding = diffusion_map[:, 0:n_components] * np.sqrt(lambdas[0:n_components])
return embedding | 0.003431 |
def _remove_old_snapshots(connection, volume):
""" Remove old snapshots
:type connection: boto.ec2.connection.EC2Connection
:param connection: EC2 connection object
:type volume: boto.ec2.volume.Volume
:param volume: Volume to check
:returns: None
"""
if 'AutomatedEBSSnapshotsRetention' not in volume.tags:
logger.warning(
'Missing tag AutomatedEBSSnapshotsRetention for volume {}'.format(
volume.id))
return
retention = int(volume.tags['AutomatedEBSSnapshotsRetention'])
snapshots = connection.get_all_snapshots(filters={'volume-id': volume.id})
# Sort the list based on the start time
snapshots.sort(key=lambda x: x.start_time)
# Remove snapshots we want to keep
snapshots = snapshots[:-int(retention)]
if not snapshots:
logger.info('No old snapshots to remove')
return
for snapshot in snapshots:
logger.info('Deleting snapshot {}'.format(snapshot.id))
try:
snapshot.delete()
except EC2ResponseError as error:
logger.warning('Could not remove snapshot: {}'.format(
error.message))
logger.info('Done deleting snapshots') | 0.000819 |
def xywh_to_tlbr(bbox, img_wh):
""" converts xywh format to (tlx, tly, blx, bly) """
(img_w, img_h) = img_wh
if img_w == 0 or img_h == 0:
img_w = 1
img_h = 1
msg = '[cc2.1] Your csv tables have an invalid ANNOTATION.'
print(msg)
#warnings.warn(msg)
#ht = 1
#wt = 1
# Ensure ANNOTATION is within bounds
(x, y, w, h) = bbox
x1 = max(x, 0)
y1 = max(y, 0)
x2 = min(x + w, img_w - 1)
y2 = min(y + h, img_h - 1)
return (x1, y1, x2, y2) | 0.007619 |
def refresh(self, executor, callbacks, completer_options=None):
"""Creates a SQLCompleter object and populates it with the relevant
completion suggestions in a background thread.
executor - SQLExecute object, used to extract the credentials to connect
to the database.
callbacks - A function or a list of functions to call after the thread
has completed the refresh. The newly created completion
object will be passed in as an argument to each callback.
completer_options - dict of options to pass to SQLCompleter.
"""
if completer_options is None:
completer_options = {}
if self.is_refreshing():
self._restart_refresh.set()
return [(None, None, None, 'Auto-completion refresh restarted.')]
else:
self._completer_thread = threading.Thread(
target=self._bg_refresh,
args=(executor, callbacks, completer_options),
name='completion_refresh')
self._completer_thread.setDaemon(True)
self._completer_thread.start()
return [(None, None, None,
'Auto-completion refresh started in the background.')] | 0.002351 |
def handle_message(self, connection, sender, target, message):
"""
Handles a received message
"""
parts = message.strip().split(' ', 2)
if parts and parts[0].lower() == '!bot':
try:
command = parts[1].lower()
except IndexError:
self.safe_send(connection, target, "No command given")
return
try:
payload = parts[2]
except IndexError:
payload = ""
self.__pool.enqueue(self._handle_command,
connection, sender, target, command, payload) | 0.003091 |
def is_(self, other):
"""
Ensures :attr:`subject` is *other* (object identity check).
"""
self._run(unittest_case.assertIs, (self._subject, other))
return ChainInspector(self._subject) | 0.008929 |
def setup_scout(adapter, institute_id='cust000', user_name='Clark Kent',
user_mail='[email protected]', api_key=None, demo=False):
"""docstring for setup_scout"""
########################## Delete previous information ##########################
LOG.info("Deleting previous database")
for collection_name in adapter.db.collection_names():
if not collection_name.startswith('system'):
LOG.info("Deleting collection %s", collection_name)
adapter.db.drop_collection(collection_name)
LOG.info("Database deleted")
########################## Add a institute ##########################
#####################################################################
# Build a institute with id institute_name
institute_obj = build_institute(
internal_id=institute_id,
display_name=institute_id,
sanger_recipients=[user_mail]
)
# Add the institute to database
adapter.add_institute(institute_obj)
########################## Add a User ###############################
#####################################################################
# Build a user obj
user_obj = dict(
_id=user_mail,
email=user_mail,
name=user_name,
roles=['admin'],
institutes=[institute_id]
)
adapter.add_user(user_obj)
### Get the mim information ###
if not demo:
# Fetch the mim files
try:
mim_files = fetch_mim_files(api_key, mim2genes=True, morbidmap=True, genemap2=True)
except Exception as err:
LOG.warning(err)
raise err
mim2gene_lines = mim_files['mim2genes']
genemap_lines = mim_files['genemap2']
# Fetch the genes to hpo information
hpo_gene_lines = fetch_hpo_genes()
# Fetch the latest version of the hgnc information
hgnc_lines = fetch_hgnc()
# Fetch the latest exac pli score information
exac_lines = fetch_exac_constraint()
else:
mim2gene_lines = [line for line in get_file_handle(mim2gene_reduced_path)]
genemap_lines = [line for line in get_file_handle(genemap2_reduced_path)]
# Fetch the genes to hpo information
hpo_gene_lines = [line for line in get_file_handle(hpogenes_reduced_path)]
# Fetch the reduced hgnc information
hgnc_lines = [line for line in get_file_handle(hgnc_reduced_path)]
# Fetch the latest exac pli score information
exac_lines = [line for line in get_file_handle(exac_reduced_path)]
builds = ['37', '38']
################## Load Genes and transcripts #######################
#####################################################################
for build in builds:
# Fetch the ensembl information
if not demo:
ensembl_genes = fetch_ensembl_genes(build=build)
else:
ensembl_genes = get_file_handle(genes37_reduced_path)
# load the genes
hgnc_genes = load_hgnc_genes(
adapter=adapter,
ensembl_lines=ensembl_genes,
hgnc_lines=hgnc_lines,
exac_lines=exac_lines,
mim2gene_lines=mim2gene_lines,
genemap_lines=genemap_lines,
hpo_lines=hpo_gene_lines,
build=build,
)
# Create a map from ensembl ids to gene objects
ensembl_genes = {}
for gene_obj in hgnc_genes:
ensembl_id = gene_obj['ensembl_id']
ensembl_genes[ensembl_id] = gene_obj
# Fetch the transcripts from ensembl
if not demo:
ensembl_transcripts = fetch_ensembl_transcripts(build=build)
else:
ensembl_transcripts = get_file_handle(transcripts37_reduced_path)
# Load the transcripts for a certain build
transcripts = load_transcripts(adapter, ensembl_transcripts, build, ensembl_genes)
hpo_terms_handle = None
hpo_to_genes_handle = None
hpo_disease_handle = None
if demo:
hpo_terms_handle = get_file_handle(hpoterms_reduced_path)
hpo_to_genes_handle = get_file_handle(hpo_to_genes_reduced_path)
hpo_disease_handle = get_file_handle(hpo_phenotype_to_terms_reduced_path)
load_hpo(
adapter=adapter,
hpo_lines=hpo_terms_handle,
hpo_gene_lines=hpo_to_genes_handle,
disease_lines=genemap_lines,
hpo_disease_lines=hpo_disease_handle
)
# If demo we load a gene panel and some case information
if demo:
parsed_panel = parse_gene_panel(
path=panel_path,
institute='cust000',
panel_id='panel1',
version=1.0,
display_name='Test panel'
)
adapter.load_panel(parsed_panel)
case_handle = get_file_handle(load_path)
case_data = yaml.load(case_handle)
adapter.load_case(case_data)
LOG.info("Creating indexes")
adapter.load_indexes()
LOG.info("Scout instance setup successful") | 0.00296 |
def byte_href_anchors(self, chars=False):
'''
simple, regex-based extractor of anchor tags, so we can
compute BYTE offsets for anchor texts and associate them with
their href.
Generates tuple(href_string, first_byte, byte_length, anchor_text)
'''
input_buffer = self.clean_html
if chars:
input_buffer = input_buffer.decode('utf8')
idx = 0
## split doc up into pieces that end on an anchor tag
parts = input_buffer.split('</a>')
assert len('</a>'.join(parts) ) == len(input_buffer)
for part in parts:
## try to get an A tag out:
m = anchors_re.match(part)
if not m:
idx += len(part) + 4
continue
before = m.group('before')
ahref = m.group('ahref')
## increment the index to get line number for the anchor
idx += len(before) + len(ahref)
first = idx
length = len(m.group('anchor'))
## update the index for the next loop
# include anchor plus the </a>
idx += length + 4
if chars:
yield m.group('href').encode('utf8'), first, length, m.group('anchor').encode('utf8')
else:
yield m.group('href'), first, length, m.group('anchor')
assert idx - 4 == len(input_buffer) | 0.006285 |
def map_copy(source: tcod.map.Map, dest: tcod.map.Map) -> None:
"""Copy map data from `source` to `dest`.
.. deprecated:: 4.5
Use Python's copy module, or see :any:`tcod.map.Map` and assign between
array attributes manually.
"""
if source.width != dest.width or source.height != dest.height:
dest.__init__( # type: ignore
source.width, source.height, source._order
)
dest._Map__buffer[:] = source._Map__buffer[:] | 0.002092 |
def launch(self):
"""Make the script file and return the newly created job id"""
# Make script file #
self.make_script()
# Do it #
sbatch_out = sh.sbatch(self.script_path)
jobs.expire()
# Message #
print Color.i_blu + "SLURM:" + Color.end + " " + str(sbatch_out),
# Return id #
self.id = int(re.findall("Submitted batch job ([0-9]+)", str(sbatch_out))[0])
return self.id | 0.00655 |
def get_storage_path(filename):
""" get_storage_path: returns path to storage directory for downloading content
Args: filename (str): Name of file to store
Returns: string path to file
"""
directory = os.path.join(STORAGE_DIRECTORY, filename[0], filename[1])
# Make storage directory for downloaded files if it doesn't already exist
if not os.path.exists(directory) :
os.makedirs(directory)
return os.path.join(directory, filename) | 0.00625 |
def asDateTime(self):
"""Create :py:class:`datetime.datetime` object from a |ASN.1| object.
Returns
-------
:
new instance of :py:class:`datetime.datetime` object
"""
text = str(self)
if text.endswith('Z'):
tzinfo = TimeMixIn.UTC
text = text[:-1]
elif '-' in text or '+' in text:
if '+' in text:
text, plusminus, tz = string.partition(text, '+')
else:
text, plusminus, tz = string.partition(text, '-')
if self._shortTZ and len(tz) == 2:
tz += '00'
if len(tz) != 4:
raise error.PyAsn1Error('malformed time zone offset %s' % tz)
try:
minutes = int(tz[:2]) * 60 + int(tz[2:])
if plusminus == '-':
minutes *= -1
except ValueError:
raise error.PyAsn1Error('unknown time specification %s' % self)
tzinfo = TimeMixIn.FixedOffset(minutes, '?')
else:
tzinfo = None
if '.' in text or ',' in text:
if '.' in text:
text, _, ms = string.partition(text, '.')
else:
text, _, ms = string.partition(text, ',')
try:
ms = int(ms) * 1000
except ValueError:
raise error.PyAsn1Error('bad sub-second time specification %s' % self)
else:
ms = 0
if self._optionalMinutes and len(text) - self._yearsDigits == 6:
text += '0000'
elif len(text) - self._yearsDigits == 8:
text += '00'
try:
dt = dateandtime.strptime(text, self._yearsDigits == 4 and '%Y%m%d%H%M%S' or '%y%m%d%H%M%S')
except ValueError:
raise error.PyAsn1Error('malformed datetime format %s' % self)
return dt.replace(microsecond=ms, tzinfo=tzinfo) | 0.002038 |
def filter_unused_variable(line, previous_line=''):
"""Return line if used, otherwise return None."""
if re.match(EXCEPT_REGEX, line):
return re.sub(r' as \w+:$', ':', line, count=1)
elif multiline_statement(line, previous_line):
return line
elif line.count('=') == 1:
split_line = line.split('=')
assert len(split_line) == 2
value = split_line[1].lstrip()
if ',' in split_line[0]:
return line
if is_literal_or_name(value):
# Rather than removing the line, replace with it "pass" to avoid
# a possible hanging block with no body.
value = 'pass' + get_line_ending(line)
return get_indentation(line) + value
else:
return line | 0.001307 |
def _diffSchema(diskSchema, memorySchema):
"""
Format a schema mismatch for human consumption.
@param diskSchema: The on-disk schema.
@param memorySchema: The in-memory schema.
@rtype: L{bytes}
@return: A description of the schema differences.
"""
diskSchema = set(diskSchema)
memorySchema = set(memorySchema)
diskOnly = diskSchema - memorySchema
memoryOnly = memorySchema - diskSchema
diff = []
if diskOnly:
diff.append('Only on disk:')
diff.extend(map(repr, diskOnly))
if memoryOnly:
diff.append('Only in memory:')
diff.extend(map(repr, memoryOnly))
return '\n'.join(diff) | 0.001495 |
def args(**kwargs):
""" allows us to temporarily override all the special keyword parameters in
a with context """
kwargs_str = ",".join(["%s=%r" % (k,v) for k,v in kwargs.items()])
raise DeprecationWarning("""
sh.args() has been deprecated because it was never thread safe. use the
following instead:
sh2 = sh({kwargs})
sh2.your_command()
or
sh2 = sh({kwargs})
from sh2 import your_command
your_command()
""".format(kwargs=kwargs_str)) | 0.00625 |
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
app_conf = dci_config.generate_conf()
url = app_conf['SQLALCHEMY_DATABASE_URI']
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True,
)
with context.begin_transaction():
context.run_migrations() | 0.0016 |
def etag(cls, request, *args, **kwargs):
'''Class method to generate an ETag for use with
conditional processing; calls :meth:`datastream_etag` with
class configuration.'''
pid = kwargs[cls.pid_url_kwarg]
date = kwargs.get(cls.as_of_date_url_kwarg, None)
return datastream_etag(
request, pid, cls.datastream_id,
type=cls.object_type, repo=cls.repository_class(request=request),
accept_range_request=cls.accept_range_request, as_of_date=date) | 0.00381 |
def add_timeout_arg(a_func, timeout, **kwargs):
"""Updates a_func so that it gets called with the timeout as its final arg.
This converts a callable, a_func, into another callable with an additional
positional arg.
Args:
a_func (callable): a callable to be updated
timeout (int): to be added to the original callable as it final positional
arg.
kwargs: Addtional arguments passed through to the callable.
Returns:
callable: the original callable updated to the timeout arg
"""
def inner(*args):
"""Updates args with the timeout."""
updated_args = args + (timeout,)
return a_func(*updated_args, **kwargs)
return inner | 0.002821 |
def get_view_mode_id(self, view_mode):
'''Attempts to return a view_mode_id for a given view_mode
taking into account the current skin. If not view_mode_id can
be found, None is returned. 'thumbnail' is currently the only
suppported view_mode.
'''
view_mode_ids = VIEW_MODES.get(view_mode.lower())
if view_mode_ids:
return view_mode_ids.get(xbmc.getSkinDir())
return None | 0.004474 |
def update_thumbnail(api_key, api_secret, video_key, position=7.0, **kwargs):
"""
Function which updates the thumbnail for an EXISTING video utilizing position parameter.
This function is useful for selecting a new thumbnail from with the already existing video content.
Instead of position parameter, user may opt to utilize thumbnail_index parameter.
Please eee documentation for further information.
:param api_key: <string> JWPlatform api-key
:param api_secret: <string> JWPlatform shared-secret
:param video_key: <string> Video's object ID. Can be found within JWPlayer Dashboard.
:param position: <float> Represents seconds into the duration of a video, for thumbnail extraction.
:param kwargs: Arguments conforming to standards found @ https://developer.jwplayer.com/jw-platform/reference/v1/methods/videos/thumbnails/update.html
:return: <dict> Dict which represents the JSON response.
"""
jwplatform_client = jwplatform.Client(api_key, api_secret)
logging.info("Updating video thumbnail.")
try:
response = jwplatform_client.videos.thumbnails.update(
video_key=video_key,
position=position, # Parameter which specifies seconds into video to extract thumbnail from.
**kwargs)
except jwplatform.errors.JWPlatformError as e:
logging.error("Encountered an error updating thumbnail.\n{}".format(e))
sys.exit(e.message)
return response | 0.005453 |
def train(self, train_file, dev_file, test_file, save_dir, pretrained_embeddings=None, min_occur_count=2,
lstm_layers=3, word_dims=100, tag_dims=100, dropout_emb=0.33, lstm_hiddens=400,
dropout_lstm_input=0.33, dropout_lstm_hidden=0.33, mlp_arc_size=500, mlp_rel_size=100,
dropout_mlp=0.33, learning_rate=2e-3, decay=.75, decay_steps=5000, beta_1=.9, beta_2=.9, epsilon=1e-12,
num_buckets_train=40,
num_buckets_valid=10, num_buckets_test=10, train_iters=50000, train_batch_size=5000,
test_batch_size=5000, validate_every=100, save_after=5000, debug=False):
"""Train a deep biaffine dependency parser
Parameters
----------
train_file : str
path to training set
dev_file : str
path to dev set
test_file : str
path to test set
save_dir : str
a directory for saving model and related meta-data
pretrained_embeddings : tuple
(embedding_name, source), used for gluonnlp.embedding.create(embedding_name, source)
min_occur_count : int
threshold of rare words, which will be replaced with UNKs,
lstm_layers : int
layers of lstm
word_dims : int
dimension of word embedding
tag_dims : int
dimension of tag embedding
dropout_emb : float
word dropout
lstm_hiddens : int
size of lstm hidden states
dropout_lstm_input : int
dropout on x in variational RNN
dropout_lstm_hidden : int
dropout on h in variational RNN
mlp_arc_size : int
output size of MLP for arc feature extraction
mlp_rel_size : int
output size of MLP for rel feature extraction
dropout_mlp : float
dropout on the output of LSTM
learning_rate : float
learning rate
decay : float
see ExponentialScheduler
decay_steps : int
see ExponentialScheduler
beta_1 : float
see ExponentialScheduler
beta_2 : float
see ExponentialScheduler
epsilon : float
see ExponentialScheduler
num_buckets_train : int
number of buckets for training data set
num_buckets_valid : int
number of buckets for dev data set
num_buckets_test : int
number of buckets for testing data set
train_iters : int
training iterations
train_batch_size : int
training batch size
test_batch_size : int
test batch size
validate_every : int
validate on dev set every such number of batches
save_after : int
skip saving model in early epochs
debug : bool
debug mode
Returns
-------
DepParser
parser itself
"""
logger = init_logger(save_dir)
config = _Config(train_file, dev_file, test_file, save_dir, pretrained_embeddings, min_occur_count,
lstm_layers, word_dims, tag_dims, dropout_emb, lstm_hiddens, dropout_lstm_input,
dropout_lstm_hidden, mlp_arc_size, mlp_rel_size, dropout_mlp, learning_rate, decay,
decay_steps,
beta_1, beta_2, epsilon, num_buckets_train, num_buckets_valid, num_buckets_test, train_iters,
train_batch_size, debug)
config.save()
self._vocab = vocab = ParserVocabulary(train_file,
pretrained_embeddings,
min_occur_count)
vocab.save(config.save_vocab_path)
vocab.log_info(logger)
with mx.Context(mxnet_prefer_gpu()):
self._parser = parser = BiaffineParser(vocab, word_dims, tag_dims,
dropout_emb,
lstm_layers,
lstm_hiddens, dropout_lstm_input,
dropout_lstm_hidden,
mlp_arc_size,
mlp_rel_size, dropout_mlp, debug)
parser.initialize()
scheduler = ExponentialScheduler(learning_rate, decay, decay_steps)
optimizer = mx.optimizer.Adam(learning_rate, beta_1, beta_2, epsilon,
lr_scheduler=scheduler)
trainer = gluon.Trainer(parser.collect_params(), optimizer=optimizer)
data_loader = DataLoader(train_file, num_buckets_train, vocab)
global_step = 0
best_UAS = 0.
batch_id = 0
epoch = 1
total_epoch = math.ceil(train_iters / validate_every)
logger.info("Epoch {} out of {}".format(epoch, total_epoch))
bar = Progbar(target=min(validate_every, data_loader.samples))
while global_step < train_iters:
for words, tags, arcs, rels in data_loader.get_batches(batch_size=train_batch_size,
shuffle=True):
with autograd.record():
arc_accuracy, rel_accuracy, overall_accuracy, loss = parser.forward(words, tags, arcs,
rels)
loss_value = loss.asscalar()
loss.backward()
trainer.step(train_batch_size)
batch_id += 1
try:
bar.update(batch_id,
exact=[("UAS", arc_accuracy, 2),
# ("LAS", rel_accuracy, 2),
# ("ALL", overall_accuracy, 2),
("loss", loss_value)])
except OverflowError:
pass # sometimes loss can be 0 or infinity, crashes the bar
global_step += 1
if global_step % validate_every == 0:
bar = Progbar(target=min(validate_every, train_iters - global_step))
batch_id = 0
UAS, LAS, speed = evaluate_official_script(parser, vocab, num_buckets_valid,
test_batch_size,
dev_file,
os.path.join(save_dir, 'valid_tmp'))
logger.info('Dev: UAS %.2f%% LAS %.2f%% %d sents/s' % (UAS, LAS, speed))
epoch += 1
if global_step < train_iters:
logger.info("Epoch {} out of {}".format(epoch, total_epoch))
if global_step > save_after and UAS > best_UAS:
logger.info('- new best score!')
best_UAS = UAS
parser.save(config.save_model_path)
# When validate_every is too big
if not os.path.isfile(config.save_model_path) or best_UAS != UAS:
parser.save(config.save_model_path)
return self | 0.004498 |
def ml_acr(tree, character, prediction_method, model, states, avg_br_len, num_nodes, num_tips, freqs=None, sf=None,
kappa=None, force_joint=True):
"""
Calculates ML states on the tree and stores them in the corresponding feature.
:param states: numpy array of possible states
:param prediction_method: str, MPPA (marginal approximation), MAP (max a posteriori) or JOINT
:param tree: ete3.Tree, the tree of interest
:param character: str, character for which the ML states are reconstructed
:param model: str, evolutionary model, F81 (Felsenstein 81-like), JC (Jukes-Cantor-like) or EFT (estimate from tips)
:param avg_br_len: float, average non-zero branch length of the tree.
:param freqs: numpy array of predefined frequencies (or None if they are to be estimated)
:param sf: float, predefined scaling factor (or None if it is to be estimated)
:return: dict, mapping between reconstruction parameters and values
"""
n = len(states)
state2index = dict(zip(states, range(n)))
missing_data = 0.
observed_frequencies = np.zeros(n, np.float64)
for _ in tree:
state = getattr(_, character, set())
if state:
num_node_states = len(state)
for _ in state:
observed_frequencies[state2index[_]] += 1. / num_node_states
else:
missing_data += 1
total_count = observed_frequencies.sum() + missing_data
observed_frequencies /= observed_frequencies.sum()
missing_data /= total_count
logger = logging.getLogger('pastml')
logger.debug('Observed frequencies for {}:{}{}.'
.format(character,
''.join('\n\tfrequency of {}:\t{:.3f}'.format(state, observed_frequencies[
state2index[state]])
for state in states),
'\n\tfraction of missing data:\t{:.3f}'.format(
missing_data) if missing_data else ''))
if freqs is not None and model not in {F81, HKY}:
logging.warning('Some frequencies were specified in the parameter file, '
'but the selected model ({}) ignores them. '
'Use F81 (or HKY for nucleotide characters only) '
'for taking user-specified frequencies into account.'.format(model))
optimise_frequencies = model in {F81, HKY} and freqs is None
if JTT == model:
frequencies = JTT_FREQUENCIES
elif EFT == model:
frequencies = observed_frequencies
elif model in {F81, HKY} and freqs is not None:
frequencies = freqs
else:
frequencies = np.ones(n, dtype=np.float64) / n
initialize_allowed_states(tree, character, states)
alter_zero_tip_allowed_states(tree, character)
if sf:
optimise_sf = False
else:
sf = 1. / avg_br_len
optimise_sf = True
if HKY == model:
if kappa:
optimise_kappa = False
else:
optimise_kappa = True
kappa = 4.
else:
optimise_kappa = False
likelihood = get_bottom_up_likelihood(tree=tree, character=character, frequencies=frequencies, sf=sf, kappa=kappa,
is_marginal=True, model=model)
if not optimise_sf and not optimise_frequencies and not optimise_kappa:
logger.debug('All the parameters are fixed for {}:{}{}{}{}.'
.format(character,
''.join('\n\tfrequency of {}:\t{:.3f}'.format(state, frequencies[
state2index[state]])
for state in states),
'\n\tSF:\t{:.3f}, i.e. {:.3f} changes per avg branch'
.format(sf, sf * avg_br_len),
'\n\tkappa:\t{:.3f}'.format(kappa) if HKY == model else '',
'\n\tlog likelihood:\t{:.3f}'.format(likelihood)))
else:
logger.debug('Initial values for {} parameter optimisation:{}{}{}{}.'
.format(character,
''.join('\n\tfrequency of {}:\t{:.3f}'.format(state, frequencies[
state2index[state]])
for state in states),
'\n\tSF:\t{:.3f}, i.e. {:.3f} changes per avg branch'
.format(sf, sf * avg_br_len),
'\n\tkappa:\t{:.3f}'.format(kappa) if HKY == model else '',
'\n\tlog likelihood:\t{:.3f}'.format(likelihood)))
if optimise_sf:
(_, sf, _), likelihood = optimize_likelihood_params(tree=tree, character=character, frequencies=frequencies,
sf=sf, kappa=kappa,
optimise_frequencies=False, optimise_sf=optimise_sf,
optimise_kappa=False, avg_br_len=avg_br_len,
model=model)
if optimise_frequencies or optimise_kappa:
logger.debug('Pre-optimised SF for {}:{}{}.'
.format(character,
'\n\tSF:\t{:.3f}, i.e. {:.3f} changes per avg branch'
.format(sf, sf * avg_br_len),
'\n\tlog likelihood:\t{:.3f}'.format(likelihood)))
if optimise_frequencies or optimise_kappa:
(frequencies, sf, kappa), likelihood = \
optimize_likelihood_params(tree=tree, character=character, frequencies=frequencies, sf=sf, kappa=kappa,
optimise_frequencies=optimise_frequencies, optimise_sf=optimise_sf,
optimise_kappa=optimise_kappa, avg_br_len=avg_br_len, model=model)
logger.debug('Optimised {} values:{}{}{}{}'
.format(character,
''.join('\n\tfrequency of {}:\t{:.3f}'.format(state, frequencies[
state2index[state]])
for state in states) if optimise_frequencies else '',
'\n\tSF:\t{:.3f}, i.e. {:.3f} changes per avg branch'
.format(sf, sf * avg_br_len),
'\n\tkappa:\t{:.3f}'.format(kappa) if HKY == model else '',
'\n\tlog likelihood:\t{:.3f}'.format(likelihood)))
result = {LOG_LIKELIHOOD: likelihood, CHARACTER: character, METHOD: prediction_method, MODEL: model,
FREQUENCIES: frequencies, SCALING_FACTOR: sf, CHANGES_PER_AVG_BRANCH: sf * avg_br_len, STATES: states,
NUM_NODES: num_nodes, NUM_TIPS: num_tips}
if HKY == model:
result[KAPPA] = kappa
results = []
def process_reconstructed_states(method):
if method == prediction_method or is_meta_ml(prediction_method):
method_character = get_personalized_feature_name(character, method) \
if prediction_method != method else character
convert_allowed_states2feature(tree, character, states, method_character)
res = result.copy()
res[CHARACTER] = method_character
res[METHOD] = method
results.append(res)
def process_restricted_likelihood_and_states(method):
alter_zero_tip_allowed_states(tree, character)
restricted_likelihood = get_bottom_up_likelihood(tree=tree, character=character,
frequencies=frequencies, sf=sf, kappa=kappa,
is_marginal=True, model=model)
unalter_zero_tip_allowed_states(tree, character, state2index)
note_restricted_likelihood(method, restricted_likelihood)
process_reconstructed_states(method)
def note_restricted_likelihood(method, restricted_likelihood):
logger.debug('Log likelihood for {} after {} state selection:\t{:.3f}'
.format(character, method, restricted_likelihood))
result[RESTRICTED_LOG_LIKELIHOOD_FORMAT_STR.format(method)] = restricted_likelihood
if prediction_method != MAP:
# Calculate joint restricted likelihood
restricted_likelihood = get_bottom_up_likelihood(tree=tree, character=character,
frequencies=frequencies, sf=sf, kappa=kappa,
is_marginal=False, model=model)
note_restricted_likelihood(JOINT, restricted_likelihood)
unalter_zero_tip_joint_states(tree, character, state2index)
choose_ancestral_states_joint(tree, character, states, frequencies)
process_reconstructed_states(JOINT)
if is_marginal(prediction_method):
initialize_allowed_states(tree, character, states)
alter_zero_tip_allowed_states(tree, character)
get_bottom_up_likelihood(tree=tree, character=character, frequencies=frequencies, sf=sf, kappa=kappa,
is_marginal=True, model=model)
calculate_top_down_likelihood(tree, character, frequencies, sf, kappa=kappa, model=model)
unalter_zero_tip_allowed_states(tree, character, state2index)
calculate_marginal_likelihoods(tree, character, frequencies)
# check_marginal_likelihoods(tree, feature)
result[MARGINAL_PROBABILITIES] = convert_likelihoods_to_probabilities(tree, character, states)
choose_ancestral_states_map(tree, character, states)
process_restricted_likelihood_and_states(MAP)
if MPPA == prediction_method or is_meta_ml(prediction_method):
if ALL == prediction_method:
pars_acr_results = parsimonious_acr(tree, character, MP, states, num_nodes, num_tips)
results.extend(pars_acr_results)
for pars_acr_res in pars_acr_results:
_parsimonious_states2allowed_states(tree, pars_acr_res[CHARACTER], character, state2index)
alter_zero_tip_allowed_states(tree, character)
restricted_likelihood = get_bottom_up_likelihood(tree=tree, character=character,
frequencies=frequencies, sf=sf, kappa=kappa,
is_marginal=True, model=model)
note_restricted_likelihood(pars_acr_res[METHOD], restricted_likelihood)
result[NUM_SCENARIOS], result[NUM_UNRESOLVED_NODES], result[NUM_STATES_PER_NODE] = \
choose_ancestral_states_mppa(tree, character, states, force_joint=force_joint)
result[NUM_STATES_PER_NODE] /= num_nodes
result[PERC_UNRESOLVED] = result[NUM_UNRESOLVED_NODES] * 100 / num_nodes
logger.debug('{} node{} unresolved ({:.2f}%) for {} by {}, '
'i.e. {:.4f} state{} per node in average.'
.format(result[NUM_UNRESOLVED_NODES], 's are' if result[NUM_UNRESOLVED_NODES] != 1 else ' is',
result[PERC_UNRESOLVED], character, MPPA,
result[NUM_STATES_PER_NODE], 's' if result[NUM_STATES_PER_NODE] > 1 else ''))
process_restricted_likelihood_and_states(MPPA)
return results | 0.004728 |
def step_it_should_pass_with(context):
'''
EXAMPLE:
...
when I run "behave ..."
then it should pass with:
"""
TEXT
"""
'''
assert context.text is not None, "ENSURE: multiline text is provided."
step_command_output_should_contain(context)
assert_that(context.command_result.returncode, equal_to(0),
context.command_result.output) | 0.002336 |
def republish_module_trigger(plpy, td):
"""Trigger called from postgres database when republishing a module.
When a module is republished, the versions of the collections that it is
part of will need to be updated (a minor update).
e.g. there is a collection c1 v2.1, which contains module m1 v3
m1 is updated, we have a new row in the modules table with m1 v4
this trigger will create increment the minor version of c1, so we'll have
c1 v2.2
we need to create a collection tree for c1 v2.2 which is exactly the same
as c1 v2.1, but with m1 v4 instead of m1 v3, and c1 v2.2 instead of c1 v2.2
"""
# Is this an insert from legacy? Legacy always supplies the version.
is_legacy_publication = td['new']['version'] is not None
if not is_legacy_publication:
# Bail out, because this trigger only applies to legacy publications.
return "OK"
plpy.log('Trigger fired on %s' % (td['new']['moduleid'],))
modified = republish_module(td, plpy)
plpy.log('modified: {}'.format(modified))
plpy.log('insert values:\n{}\n'.format('\n'.join([
'{}: {}'.format(key, value)
for key, value in td['new'].items()])))
return modified | 0.000819 |
def new(cls, access_token, environment='prod'):
'''Create a new storage service REST client.
Arguments:
environment: The service environment to be used for the client
access_token: The access token used to authenticate with the
service
Returns:
A storage_service.api.ApiClient instance
Example:
>>> storage_client = ApiClient.new(my_access_token)
'''
request = RequestBuilder \
.request(environment) \
.to_service(cls.SERVICE_NAME, cls.SERVICE_VERSION) \
.throw(
StorageForbiddenException,
lambda resp: 'You are forbidden to do this.'
if resp.status_code == 403 else None
) \
.throw(
StorageNotFoundException,
lambda resp: 'The entity is not found'
if resp.status_code == 404 else None
) \
.throw(
StorageException,
lambda resp: 'Server response: {0} - {1}'.format(resp.status_code, resp.text)
if not resp.ok else None
)
authenticated_request = request.with_token(access_token)
return cls(request, authenticated_request) | 0.002262 |
def setMode(self
,mode
,polarity
,den
,iovalue
,data_length
,reference
,input_range
,clock_enable
,burn_out
,channel):
'''
def setMode(self
,mode = self.AD7730_IDLE_MODE
,polarity = self.AD7730_UNIPOLAR_MODE
,den = self.AD7730_IODISABLE_MODE
,iovalue = 0b00
,data_lenght = self.AD7730_24bitDATA_MODE
,reference = self.AD7730_REFERENCE_5V
,input_range = self.AD7730_40mVIR_MODE
,clock_enable = self.AD7730_MCLK_ENABLE_MODE
,burn_out = self.AD7730_BURNOUT_DISABLE
,channel = self.AD7730_AIN1P_AIN1N
):
'''
mode_MSB = (mode << 5) + (polarity << 4) + (den << 3) + (iovalue << 1) + data_length
mode_LSB = (reference << 7) + (0b0 << 6) + (input_range << 4) + (clock_enable << 3) + (burn_out << 2) + channel
self.single_write(self.AD7730_MODE_REG, [mode_MSB, mode_LSB]) | 0.029009 |
def buid():
"""
Return a new random id that is exactly 22 characters long,
by encoding a UUID4 in URL-safe Base64. See
http://en.wikipedia.org/wiki/Base64#Variants_summary_table
>>> len(buid())
22
>>> buid() == buid()
False
>>> isinstance(buid(), six.text_type)
True
"""
if six.PY3: # pragma: no cover
return urlsafe_b64encode(uuid.uuid4().bytes).decode('utf-8').rstrip('=')
else: # pragma: no cover
return six.text_type(urlsafe_b64encode(uuid.uuid4().bytes).rstrip('=')) | 0.00369 |
def list_nodes_full(**kwargs):
'''
Return all data on nodes
'''
nodes = _query('server/list')
ret = {}
for node in nodes:
name = nodes[node]['label']
ret[name] = nodes[node].copy()
ret[name]['id'] = node
ret[name]['image'] = nodes[node]['os']
ret[name]['size'] = nodes[node]['VPSPLANID']
ret[name]['state'] = nodes[node]['status']
ret[name]['private_ips'] = nodes[node]['internal_ip']
ret[name]['public_ips'] = nodes[node]['main_ip']
return ret | 0.001859 |
def _build_indexes(self):
"""Build indexes from data for fast filtering of data.
Building indexes of data when possible. This is only supported when dealing with a
List of Dictionaries with String values.
"""
if isinstance(self._data, list):
for d in self._data:
if not isinstance(d, dict):
err = u'Cannot build index for non Dict type.'
self._tcex.log.error(err)
raise RuntimeError(err)
data_obj = DataObj(d)
self._master_index.setdefault(id(data_obj), data_obj)
for key, value in d.items():
# bcs - update this
# if not isinstance(value, (types.StringType, float, int)):
# TODO: This is not Python 3 ready
if not isinstance(value, (float, int, str)):
# For comparison operators the value needs to be a StringType
self._tcex.log.debug(u'Can only build index String Types.')
continue
self._indexes.setdefault(key, {}).setdefault(value, []).append(data_obj)
else:
err = u'Only *List* data type is currently supported'
self._tcex.log.error(err)
raise RuntimeError(err) | 0.004402 |
def replace_fact(term, fact, author=''):
"""
Replaces an existing fact by removing it, then adding the new definition
"""
forget_fact(term)
add_fact(term, fact, author)
return random.choice(ACKS) | 0.004566 |
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in df2.
>>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon True True
dog False False
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat((self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with "
"a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError("only list-like or dict-like objects are "
"allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(
algorithms.isin(self.values.ravel(),
values).reshape(self.shape), self.index,
self.columns) | 0.000559 |
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined | 0.000887 |
def _get_licences():
""" Lists all the licenses on command line """
licenses = _LICENSES
for license in licenses:
print("{license_name} [{license_code}]".format(
license_name=licenses[license], license_code=license)) | 0.016878 |
def _map_dict_keys_to_model_attributes(model_type, model_dict):
"""
Maps a dict's keys to the provided models attributes using its attribute_map
attribute. This is (always?) the same as converting camelCase to snake_case.
Note that the function will not influence nested object's keys.
"""
new_dict = {}
for key, value in model_dict.items():
new_dict[_get_k8s_model_attribute(model_type, key)] = value
return new_dict | 0.006536 |
def _compute_precision_recall(input_, labels, threshold,
per_example_weights):
"""Returns the numerator of both, the denominator of precision and recall."""
# To apply per_example_weights, we need to collapse each row to a scalar, but
# we really want the sum.
labels.get_shape().assert_is_compatible_with(input_.get_shape())
relevant = tf.to_float(tf.greater(labels, 0))
retrieved = tf.to_float(tf.greater(input_, threshold))
selected = relevant * retrieved
if per_example_weights is not None:
per_example_weights = _convert_and_assert_per_example_weights_compatible(
input_,
per_example_weights,
dtype=None)
per_example_weights = tf.to_float(tf.greater(per_example_weights, 0))
selected = functions.reduce_batch_sum(selected) * per_example_weights
relevant = functions.reduce_batch_sum(relevant) * per_example_weights
retrieved = functions.reduce_batch_sum(retrieved) * per_example_weights
sum_relevant = tf.reduce_sum(relevant)
sum_retrieved = tf.reduce_sum(retrieved)
selected = tf.reduce_sum(selected)
return selected, sum_retrieved, sum_relevant | 0.011295 |
def log_component_configuration(component, message):
"""Report something about component configuration that the user should better know."""
assert isinstance(component, basestring)
assert isinstance(message, basestring)
__component_logs.setdefault(component, []).append(message) | 0.006803 |
def getTypeFunc(self, data):
"""
Returns a callable that will encode C{data} to C{self.stream}. If
C{data} is unencodable, then C{None} is returned.
"""
if data is None:
return self.writeNull
t = type(data)
# try types that we know will work
if t is str or issubclass(t, str):
return self.writeBytes
if t is unicode or issubclass(t, unicode):
return self.writeString
elif t is bool:
return self.writeBoolean
elif t is float:
return self.writeNumber
elif t in python.int_types:
return self.writeNumber
elif t in (list, tuple):
return self.writeList
elif isinstance(data, (list, tuple)):
return self.writeSequence
elif t is types.GeneratorType:
return self.writeGenerator
elif t is pyamf.UndefinedType:
return self.writeUndefined
elif t in (datetime.date, datetime.datetime, datetime.time):
return self.writeDate
elif xml.is_xml(data):
return self.writeXML
# check for any overridden types
for type_, func in pyamf.TYPE_MAP.iteritems():
try:
if isinstance(data, type_):
return _CustomTypeFunc(self, func)
except TypeError:
if python.callable(type_) and type_(data):
return _CustomTypeFunc(self, func)
# now try some types that won't encode
if t in python.class_types:
# can't encode classes
return None
elif isinstance(data, python.func_types):
# can't encode code objects
return None
elif isinstance(t, types.ModuleType):
# cannot encode module objects
return None
# well, we tried ..
return self.writeObject | 0.001553 |
def query(self, *args):
"""
This method retrieve an iterable object that implements the method
__iter__. The arguments given will compose the parameters in the
request url.
args: strings (String)
return: iterable object of Members metadata
Example:
>>> from crossref.restful import Members
>>> members = Members().query('Korean Association')
members.query('Korean Association').url
'https://api.crossref.org/journals?query=Public+Health+Health+Science'
>>> next(iter(members.query('Korean Association')))
{'prefix': [{'value': '10.20433', 'public-references': False,
'name': 'The New Korean Philosophical Association'}], 'counts': {'total-dois': 0, 'backfile-dois': 0,
'current-dois': 0}, 'coverage': {'references-backfile': 0, 'references-current': 0,
'abstracts-current': 0, 'update-policies-backfile': 0, 'orcids-current': 0, 'orcids-backfile': 0,
'licenses-current': 0, 'affiliations-backfile': 0, 'licenses-backfile': 0, 'update-policies-current': 0,
'resource-links-current': 0, 'resource-links-backfile': 0, 'award-numbers-backfile': 0,
'abstracts-backfile': 0, 'funders-current': 0, 'funders-backfile': 0, 'affiliations-current': 0,
'award-numbers-current': 0}, 'flags': {'deposits-orcids-backfile': False,
'deposits-references-backfile': False, 'deposits-licenses-current': False, 'deposits': False,
'deposits-abstracts-current': False, 'deposits-award-numbers-current': False, 'deposits-articles': False,
'deposits-resource-links-backfile': False, 'deposits-funders-current': False,
'deposits-award-numbers-backfile': False, 'deposits-references-current': False,
'deposits-abstracts-backfile': False, 'deposits-funders-backfile': False,
'deposits-update-policies-current': False, 'deposits-orcids-current': False,
'deposits-licenses-backfile': False, 'deposits-affiliations-backfile': False,
'deposits-update-policies-backfile': False, 'deposits-resource-links-current': False,
'deposits-affiliations-current': False}, 'names': ['The New Korean Philosophical Association'],
'breakdowns': {'dois-by-issued-year': []}, 'location': 'Dongsin Tower, 4th Floor 5, Mullae-dong 6-ga,
Mullae-dong 6-ga Seoul 150-096 South Korea', 'prefixes': ['10.20433'],
'last-status-check-time': 1496034177684, 'id': 8334, 'tokens': ['the', 'new', 'korean', 'philosophical',
'association'], 'primary-name': 'The New Korean Philosophical Association'}
"""
context = str(self.context)
request_url = build_url_endpoint(self.ENDPOINT)
request_params = dict(self.request_params)
if args:
request_params['query'] = ' '.join([str(i) for i in args])
return self.__class__(request_url, request_params, context, self.etiquette) | 0.007573 |
def auto_orient(self):
"""Set the orientation for the image to a reasonable default."""
image = self.get_image()
if image is None:
return
invert_y = not isinstance(image, AstroImage.AstroImage)
# Check for various things to set based on metadata
header = image.get_header()
if header:
# Auto-orientation
orient = header.get('Orientation', None)
if orient is None:
orient = header.get('Image Orientation', None)
if orient is not None:
self.logger.debug("orientation [%s]" % orient)
try:
orient = int(str(orient))
self.logger.info(
"setting orientation from metadata [%d]" % (orient))
flip_x, flip_y, swap_xy = self.orient_map[orient]
self.transform(flip_x, flip_y, swap_xy)
invert_y = False
except Exception as e:
# problems figuring out orientation--let it be
self.logger.error("orientation error: %s" % str(e))
if invert_y:
flip_x, flip_y, swap_xy = self.get_transforms()
#flip_y = not flip_y
flip_y = True
self.transform(flip_x, flip_y, swap_xy) | 0.002221 |
def retcode(plugin, args='', key_name=None):
'''
Run one nagios plugin and return retcode of the execution
'''
data = {}
# Remove all the spaces, the key must not have any space
if key_name is None:
key_name = _format_dict_key(args, plugin)
data[key_name] = {}
status = _execute_cmd(plugin, args, 'cmd.retcode')
data[key_name]['status'] = status
return data | 0.002445 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.