code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def _on_dynamodb_exception(self, error):
"""Dynamically handle DynamoDB exceptions, returning HTTP error
responses.
:param exceptions.DynamoDBException error:
"""
if isinstance(error, exceptions.ConditionalCheckFailedException):
raise web.HTTPError(409, reason='Condition Check Failure')
elif isinstance(error, exceptions.NoCredentialsError):
if _no_creds_should_return_429():
raise web.HTTPError(429, reason='Instance Credentials Failure')
elif isinstance(error, (exceptions.ThroughputExceeded,
exceptions.ThrottlingException)):
raise web.HTTPError(429, reason='Too Many Requests')
if hasattr(self, 'logger'):
self.logger.error('DynamoDB Error: %s', error)
raise web.HTTPError(500, reason=str(error)) | Dynamically handle DynamoDB exceptions, returning HTTP error
responses.
:param exceptions.DynamoDBException error: |
def moveToReplayContext(self, r):
'set the sheet/row/col to the values in the replay row. return sheet'
if not r.sheet:
# assert not r.col and not r.row
return self # any old sheet should do, row/column don't matter
try:
sheetidx = int(r.sheet)
vs = vd().sheets[sheetidx]
except ValueError:
vs = vd().getSheet(r.sheet) or error('no sheet named %s' % r.sheet)
if r.row:
try:
rowidx = int(r.row)
except ValueError:
rowidx = indexMatch(vs.rows, lambda r,vs=vs,k=r.row: keystr(vs.rowkey(r)) == k)
if rowidx is None:
error('no "%s" row' % r.row)
if options.replay_movement:
while vs.cursorRowIndex != rowidx:
vs.cursorRowIndex += 1 if (rowidx - vs.cursorRowIndex) > 0 else -1
while not self.delay(0.5):
pass
else:
vs.cursorRowIndex = rowidx
if r.col:
try:
vcolidx = int(r.col)
except ValueError:
vcolidx = indexMatch(vs.visibleCols, lambda c,name=r.col: name == c.name)
if vcolidx is None:
error('no "%s" column' % r.col)
if options.replay_movement:
while vs.cursorVisibleColIndex != vcolidx:
vs.cursorVisibleColIndex += 1 if (vcolidx - vs.cursorVisibleColIndex) > 0 else -1
while not self.delay(0.5):
pass
assert vs.cursorVisibleColIndex == vcolidx
else:
vs.cursorVisibleColIndex = vcolidx
return vs | set the sheet/row/col to the values in the replay row. return sheet |
def overdrive(self, gain_db=20.0, colour=20.0):
'''Apply non-linear distortion.
Parameters
----------
gain_db : float, default=20
Controls the amount of distortion (dB).
colour : float, default=20
Controls the amount of even harmonic content in the output (dB).
'''
if not is_number(gain_db):
raise ValueError('db_level must be a number.')
if not is_number(colour):
raise ValueError('colour must be a number.')
effect_args = [
'overdrive',
'{:f}'.format(gain_db),
'{:f}'.format(colour)
]
self.effects.extend(effect_args)
self.effects_log.append('overdrive')
return self | Apply non-linear distortion.
Parameters
----------
gain_db : float, default=20
Controls the amount of distortion (dB).
colour : float, default=20
Controls the amount of even harmonic content in the output (dB). |
def recursive_map(func, data):
"""
Apply func to data, and any collection items inside data (using map_collection).
Define func so that it only applies to the type of value that you want it to apply to.
"""
def recurse(item):
return recursive_map(func, item)
items_mapped = map_collection(recurse, data)
return func(items_mapped) | Apply func to data, and any collection items inside data (using map_collection).
Define func so that it only applies to the type of value that you want it to apply to. |
async def _heartbeat_callback(self):
"""如果设置了心跳,则调用这个协程."""
query = {
"MPRPC": self.VERSION,
"HEARTBEAT": "ping"
}
queryb = self.encoder(query)
while True:
await asyncio.sleep(self.heart_beat)
self.writer.write(queryb)
if self.debug is True:
print("ping") | 如果设置了心跳,则调用这个协程. |
def container(
state, host, name,
present=True, image='ubuntu:16.04',
):
'''
Add/remove LXD containers.
Note: does not check if an existing container is based on the specified
image.
+ name: name of the container
+ image: image to base the container on
+ present: whether the container should be present or absent
'''
container = get_container_named(name, host.fact.lxd_containers)
# Container exists and we don't want it
if container and not present:
if container['status'] == 'Running':
yield 'lxc stop {0}'.format(name)
# Command to remove the container:
yield 'lxc delete {0}'.format(name)
# Container doesn't exist and we want it
if not container and present:
# Command to create the container:
yield 'lxc launch {image} {name}'.format(name=name, image=image) | Add/remove LXD containers.
Note: does not check if an existing container is based on the specified
image.
+ name: name of the container
+ image: image to base the container on
+ present: whether the container should be present or absent |
def system_call(cmd, **kwargs):
"""Call cmd and return (stdout, stderr, return_value).
Parameters
----------
cmd: str
Can be either a string containing the command to be run, or a sequence
of strings that are the tokens of the command.
kwargs : dict, optional
Ignored. Available so that this function is compatible with
_redis_wrap.
Notes
-----
This function is ported from QIIME (http://www.qiime.org), previously
named qiime_system_call. QIIME is a GPL project, but we obtained permission
from the authors of this function to port it to pyqi (and keep it under
pyqi's BSD license).
"""
proc = Popen(cmd,
universal_newlines=True,
shell=True,
stdout=PIPE,
stderr=PIPE)
# communicate pulls all stdout/stderr from the PIPEs to
# avoid blocking -- don't remove this line!
stdout, stderr = proc.communicate()
return_value = proc.returncode
if return_value != 0:
raise ValueError("Failed to execute: %s\nstdout: %s\nstderr: %s" %
(cmd, stdout, stderr))
return stdout, stderr, return_value | Call cmd and return (stdout, stderr, return_value).
Parameters
----------
cmd: str
Can be either a string containing the command to be run, or a sequence
of strings that are the tokens of the command.
kwargs : dict, optional
Ignored. Available so that this function is compatible with
_redis_wrap.
Notes
-----
This function is ported from QIIME (http://www.qiime.org), previously
named qiime_system_call. QIIME is a GPL project, but we obtained permission
from the authors of this function to port it to pyqi (and keep it under
pyqi's BSD license). |
def geom_symm_match(g, atwts, ax, theta, do_refl):
""" [Revised match factor calculation]
.. todo:: Complete geom_symm_match docstring
"""
# Imports
import numpy as np
from scipy import linalg as spla
# Convert g and atwts to n-D vectors
g = make_nd_vec(g, nd=None, t=np.float64, norm=False)
atwts = make_nd_vec(atwts, nd=None, t=np.float64, norm=False)
# Ensure proper dimensionality
if not g.shape[0] == 3 * atwts.shape[0]:
raise ValueError("Size of 'g' is not 3*size of 'atwts'")
## end if
# Calculate transformed geometry
gx = symm_op(g, ax, theta, do_refl)
# Push g to a column vector
g = g.reshape((g.shape[0],1))
# Augment g and gx with imaginary atomic weights
ex_wts = atwts.repeat(3,axis=0).T.reshape((atwts.shape[0]*3,1)) * 1.j
g = np.add(g, ex_wts)
gx = np.add(gx, ex_wts)
## # Define calc as the outer product of the augmented vectors
## calc = np.dot(g.reshape((g.shape[0],1)), \
## np.reciprocal(gx.reshape((1,gx.shape[0]))))
##
## # Calculate the complex magnitude of each element and take log10,
## # then abs again
## calc = np.abs(np.log10(np.abs(calc)))
# Expand g and gx as column vectors of coordinates
calc_g = g.reshape((g.shape[0] // 3, 3))
calc_gx = gx.reshape((gx.shape[0] // 3, 3))
##
## # Expand each into a square matrix of identical column vectors
## calc_g = calc_g.repeat(g.shape[0], axis=1)
## calc_gx = gx.repeat(gx.shape[0], axis=1)
# Calc is the absolute distance between the calc-ed values,
# scaled by the maximum of the individual atom distances or unity.
# Calculate the unscaled distances
calc = [[spla.norm(np.subtract(calc_g[i,:], calc_gx[j,:])) \
for j in range(calc_gx.shape[0])] \
for i in range(calc_g.shape[0])]
# Calculate the scale factors
scale_g = np.array([spla.norm(calc_g[i,:]) for i in \
range(calc_g.shape[0])]).reshape((calc_g.shape[0],1)) \
.repeat(calc_g.shape[0], axis=1)
scale_gx = np.array([spla.norm(calc_gx[j,:]) for j in \
range(calc_g.shape[0])]).reshape((1,calc_gx.shape[0])) \
.repeat(calc_gx.shape[0], axis=0)
scale = np.maximum(np.maximum(scale_g, scale_gx),
np.ones_like(scale_g, dtype=np.float64))
# Scale calc
calc = np.divide(calc, scale)
# Take the minimum of each row
mins = np.min(calc, axis=1)
# Take the maximum of the minima for the final factor
fac = np.max(mins)
# Using the atomic weights for checking matching can result in 'fac'
# being greater than unity. Return the minimum of fac and unity.
fac = min(fac, 1.0)
return fac | [Revised match factor calculation]
.. todo:: Complete geom_symm_match docstring |
def query_transactions(self, initial_date, final_date,
page=None,
max_results=None):
""" query transaction by date range """
last_page = False
results = []
while last_page is False:
search_result = self._consume_query_transactions(
initial_date, final_date, page, max_results)
results.extend(search_result.transactions)
if search_result.current_page is None or \
search_result.total_pages is None or \
search_result.current_page == search_result.total_pages:
last_page = True
else:
page = search_result.current_page + 1
return results | query transaction by date range |
def mouseMoveEvent(self, event):
"""Determines if a drag is taking place, and initiates it"""
if (event.pos() - self.dragStartPosition).manhattanLength() < 10:
return
QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor))
factory = self.factoryclass()
mimeData = QtCore.QMimeData()
try:
mimeData.setData("application/x-protocol", factory.serialize())
except:
mimeData.setData("application/x-protocol", cPickle.dumps(factory))
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
pixmap = QtGui.QPixmap()
pixmap = pixmap.grabWidget(self, self.frameRect())
# below makes the pixmap half transparent
# painter = QtGui.QPainter(pixmap)
# painter.setCompositionMode(painter.CompositionMode_DestinationIn)
# painter.fillRect(pixmap.rect(), QtGui.QColor(0, 0, 0, 127))
# painter.end()
drag.setPixmap(pixmap)
drag.setHotSpot(QtCore.QPoint(pixmap.width()/2, pixmap.height()/2))
drag.setPixmap(pixmap)
self.dragActive.emit(True)
result = drag.exec_(QtCore.Qt.MoveAction)
QtGui.QApplication.restoreOverrideCursor() | Determines if a drag is taking place, and initiates it |
def get_file_path(self, name, relative_path):
"""
Return the path to a resource file.
"""
dist = self.get_distribution(name)
if dist is None:
raise LookupError('no distribution named %r found' % name)
return dist.get_resource_path(relative_path) | Return the path to a resource file. |
def create_model(self):
"""Create model from reader.
Returns:
:class:`psamm.datasource.native.NativeModel`.
"""
properties = {
'name': self.name,
'default_flux_limit': 1000
}
# Load objective as biomass reaction
objective = self.get_active_objective()
if objective is not None:
reactions = dict(objective.reactions)
if len(reactions) == 1:
reaction, value = next(iteritems(reactions))
if ((value < 0 and objective.type == 'minimize') or
(value > 0 and objective.type == 'maximize')):
properties['biomass'] = reaction
model = NativeModel(properties)
# Load compartments into model
for compartment in self.compartments:
model.compartments.add_entry(compartment)
# Load compounds into model
for compound in self.species:
model.compounds.add_entry(compound)
# Load reactions into model
for reaction in self.reactions:
model.reactions.add_entry(reaction)
# Create model reaction set
for reaction in model.reactions:
model.model[reaction.id] = None
# Convert reaction limits properties to proper limits
for reaction in model.reactions:
props = reaction.properties
if 'lower_flux' in props or 'upper_flux' in props:
lower = props.get('lower_flux')
upper = props.get('upper_flux')
model.limits[reaction.id] = reaction.id, lower, upper
# Load model limits from FBC V1 bounds if present, i.e. if FBC V1 is
# used instead of V2.
limits_lower = {}
limits_upper = {}
for bounds in self.flux_bounds:
reaction = bounds.reaction
if reaction in model.limits:
continue
if bounds.operation == SBMLFluxBoundEntry.LESS_EQUAL:
if reaction not in limits_upper:
limits_upper[reaction] = bounds.value
else:
raise ParseError(
'Conflicting bounds for {}'.format(reaction))
elif bounds.operation == SBMLFluxBoundEntry.GREATER_EQUAL:
if reaction not in limits_lower:
limits_lower[reaction] = bounds.value
else:
raise ParseError(
'Conflicting bounds for {}'.format(reaction))
elif bounds.operation == SBMLFluxBoundEntry.EQUAL:
if (reaction not in limits_lower and
reaction not in limits_upper):
limits_lower[reaction] = bounds.value
limits_upper[reaction] = bounds.value
else:
raise ParseError(
'Conflicting bounds for {}'.format(reaction))
for reaction in model.reactions:
if reaction.id in limits_lower or reaction.id in limits_upper:
lower = limits_lower.get(reaction.id, None)
upper = limits_upper.get(reaction.id, None)
model.limits[reaction.id] = reaction.id, lower, upper
return model | Create model from reader.
Returns:
:class:`psamm.datasource.native.NativeModel`. |
def read_cstring(self, terminator=b'\x00'):
"""Reads a single null termianted string
:return: string without bytes
:rtype: :class:`bytes`
"""
null_index = self.data.find(terminator, self.offset)
if null_index == -1:
raise RuntimeError("Reached end of buffer")
result = self.data[self.offset:null_index] # bytes without the terminator
self.offset = null_index + len(terminator) # advance offset past terminator
return result | Reads a single null termianted string
:return: string without bytes
:rtype: :class:`bytes` |
def _calc_recip(self):
"""
Perform the reciprocal space summation. Calculates the quantity
E_recip = 1/(2PiV) sum_{G < Gmax} exp(-(G.G/4/eta))/(G.G) S(G)S(-G)
where
S(G) = sum_{k=1,N} q_k exp(-i G.r_k)
S(G)S(-G) = |S(G)|**2
This method is heavily vectorized to utilize numpy's C backend for
speed.
"""
numsites = self._s.num_sites
prefactor = 2 * pi / self._vol
erecip = np.zeros((numsites, numsites), dtype=np.float)
forces = np.zeros((numsites, 3), dtype=np.float)
coords = self._coords
rcp_latt = self._s.lattice.reciprocal_lattice
recip_nn = rcp_latt.get_points_in_sphere([[0, 0, 0]], [0, 0, 0],
self._gmax)
frac_coords = [fcoords for (fcoords, dist, i, img) in recip_nn if dist != 0]
gs = rcp_latt.get_cartesian_coords(frac_coords)
g2s = np.sum(gs ** 2, 1)
expvals = np.exp(-g2s / (4 * self._eta))
grs = np.sum(gs[:, None] * coords[None, :], 2)
oxistates = np.array(self._oxi_states)
# create array where q_2[i,j] is qi * qj
qiqj = oxistates[None, :] * oxistates[:, None]
# calculate the structure factor
sreals = np.sum(oxistates[None, :] * np.cos(grs), 1)
simags = np.sum(oxistates[None, :] * np.sin(grs), 1)
for g, g2, gr, expval, sreal, simag in zip(gs, g2s, grs, expvals,
sreals, simags):
# Uses the identity sin(x)+cos(x) = 2**0.5 sin(x + pi/4)
m = (gr[None, :] + pi / 4) - gr[:, None]
np.sin(m, m)
m *= expval / g2
erecip += m
if self._compute_forces:
pref = 2 * expval / g2 * oxistates
factor = prefactor * pref * (
sreal * np.sin(gr) - simag * np.cos(gr))
forces += factor[:, None] * g[None, :]
forces *= EwaldSummation.CONV_FACT
erecip *= prefactor * EwaldSummation.CONV_FACT * qiqj * 2 ** 0.5
return erecip, forces | Perform the reciprocal space summation. Calculates the quantity
E_recip = 1/(2PiV) sum_{G < Gmax} exp(-(G.G/4/eta))/(G.G) S(G)S(-G)
where
S(G) = sum_{k=1,N} q_k exp(-i G.r_k)
S(G)S(-G) = |S(G)|**2
This method is heavily vectorized to utilize numpy's C backend for
speed. |
def run(self):
"""
Run the given command and yield each line(s) one by one.
.. note::
The difference between this method and :code:`self.execute()`
is that :code:`self.execute()` wait for the process to end
in order to return its output.
"""
with Popen(self.command, stdout=PIPE, shell=True) as process:
# We initiate a process and parse the command to it.
while True:
# We loop infinitly because we want to get the output
# until there is none.
# We get the current line from the process stdout.
#
# Note: we use rstrip() because we are paranoid :-)
current_line = process.stdout.readline().rstrip()
if not current_line:
# The current line is empty or equal to None.
# We break the loop.
break
# The line is not empty nor equal to None.
# We encode and yield the current line
yield self._decode_output(current_line) | Run the given command and yield each line(s) one by one.
.. note::
The difference between this method and :code:`self.execute()`
is that :code:`self.execute()` wait for the process to end
in order to return its output. |
def get_interval(ticker, session) -> Session:
"""
Get interval from defined session
Args:
ticker: ticker
session: session
Returns:
Session of start_time and end_time
Examples:
>>> get_interval('005490 KS Equity', 'day_open_30')
Session(start_time='09:00', end_time='09:30')
>>> get_interval('005490 KS Equity', 'day_normal_30_20')
Session(start_time='09:31', end_time='15:00')
>>> get_interval('005490 KS Equity', 'day_close_20')
Session(start_time='15:01', end_time='15:20')
>>> get_interval('700 HK Equity', 'am_open_30')
Session(start_time='09:30', end_time='10:00')
>>> get_interval('700 HK Equity', 'am_normal_30_30')
Session(start_time='10:01', end_time='11:30')
>>> get_interval('700 HK Equity', 'am_close_30')
Session(start_time='11:31', end_time='12:00')
>>> get_interval('ES1 Index', 'day_exact_2130_2230')
Session(start_time=None, end_time=None)
>>> get_interval('ES1 Index', 'allday_exact_2130_2230')
Session(start_time='21:30', end_time='22:30')
>>> get_interval('ES1 Index', 'allday_exact_2130_0230')
Session(start_time='21:30', end_time='02:30')
>>> get_interval('AMLP US', 'day_open_30')
Session(start_time=None, end_time=None)
>>> get_interval('7974 JP Equity', 'day_normal_180_300') is SessNA
True
>>> get_interval('Z 1 Index', 'allday_normal_30_30')
Session(start_time='01:31', end_time='20:30')
>>> get_interval('GBP Curncy', 'day')
Session(start_time='17:02', end_time='17:00')
"""
if '_' not in session:
session = f'{session}_normal_0_0'
interval = Intervals(ticker=ticker)
ss_info = session.split('_')
return getattr(interval, f'market_{ss_info.pop(1)}')(*ss_info) | Get interval from defined session
Args:
ticker: ticker
session: session
Returns:
Session of start_time and end_time
Examples:
>>> get_interval('005490 KS Equity', 'day_open_30')
Session(start_time='09:00', end_time='09:30')
>>> get_interval('005490 KS Equity', 'day_normal_30_20')
Session(start_time='09:31', end_time='15:00')
>>> get_interval('005490 KS Equity', 'day_close_20')
Session(start_time='15:01', end_time='15:20')
>>> get_interval('700 HK Equity', 'am_open_30')
Session(start_time='09:30', end_time='10:00')
>>> get_interval('700 HK Equity', 'am_normal_30_30')
Session(start_time='10:01', end_time='11:30')
>>> get_interval('700 HK Equity', 'am_close_30')
Session(start_time='11:31', end_time='12:00')
>>> get_interval('ES1 Index', 'day_exact_2130_2230')
Session(start_time=None, end_time=None)
>>> get_interval('ES1 Index', 'allday_exact_2130_2230')
Session(start_time='21:30', end_time='22:30')
>>> get_interval('ES1 Index', 'allday_exact_2130_0230')
Session(start_time='21:30', end_time='02:30')
>>> get_interval('AMLP US', 'day_open_30')
Session(start_time=None, end_time=None)
>>> get_interval('7974 JP Equity', 'day_normal_180_300') is SessNA
True
>>> get_interval('Z 1 Index', 'allday_normal_30_30')
Session(start_time='01:31', end_time='20:30')
>>> get_interval('GBP Curncy', 'day')
Session(start_time='17:02', end_time='17:00') |
def writes(nb, format, **kwargs):
"""Write a notebook to a string in a given format in the current nbformat version.
This function always writes the notebook in the current nbformat version.
Parameters
----------
nb : NotebookNode
The notebook to write.
format : (u'json', u'ipynb', u'py')
The format to write the notebook in.
Returns
-------
s : unicode
The notebook string.
"""
format = unicode(format)
if format == u'json' or format == u'ipynb':
return writes_json(nb, **kwargs)
elif format == u'py':
return writes_py(nb, **kwargs)
else:
raise NBFormatError('Unsupported format: %s' % format) | Write a notebook to a string in a given format in the current nbformat version.
This function always writes the notebook in the current nbformat version.
Parameters
----------
nb : NotebookNode
The notebook to write.
format : (u'json', u'ipynb', u'py')
The format to write the notebook in.
Returns
-------
s : unicode
The notebook string. |
def applet_run(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /applet-xxxx/run API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fapplet-xxxx%2Frun
"""
input_params_cp = Nonce.update_nonce(input_params)
return DXHTTPRequest('/%s/run' % object_id, input_params_cp, always_retry=always_retry, **kwargs) | Invokes the /applet-xxxx/run API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fapplet-xxxx%2Frun |
def ensureBones(self,data):
"""
Helper method ensuring per-entity bone data has been properly initialized.
Should be called at the start of every method accessing per-entity data.
``data`` is the entity to check in dictionary form.
"""
if "_bones" not in data:
data["_bones"]={}
if self.name not in data["_bones"]:
data["_bones"][self.name]={"rot":self.start_rot[:],"length":self.blength} | Helper method ensuring per-entity bone data has been properly initialized.
Should be called at the start of every method accessing per-entity data.
``data`` is the entity to check in dictionary form. |
def format_search(q, **kwargs):
'''Formats the results of a search'''
m = search(q, **kwargs)
count = m['count']
if not count:
raise DapiCommError('Could not find any DAP packages for your query.')
return
for mdap in m['results']:
mdap = mdap['content_object']
return _format_dap_with_description(mdap) | Formats the results of a search |
def update_column(self, header, column):
"""Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
header : str
Header of the column
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If length of `column` is shorter than number of rows.
ValueError:
If no column exists with title `header`.
"""
index = self.get_column_index(header)
if not isinstance(header, basestring):
raise TypeError("header must be of type str")
for row, new_item in zip(self._table, column):
row[index] = new_item | Update a column named `header` in the table.
If length of column is smaller than number of rows, lets say
`k`, only the first `k` values in the column is updated.
Parameters
----------
header : str
Header of the column
column : iterable
Any iterable of appropriate length.
Raises
------
TypeError:
If length of `column` is shorter than number of rows.
ValueError:
If no column exists with title `header`. |
def insert(self, item, safe=None): # pragma: nocover
''' [DEPRECATED] Please use save() instead. This actually calls
the underlying save function, so the name is confusing.
Insert an item into the work queue and flushes.'''
warnings.warn('Insert will be deprecated soon and removed in 1.0. Please use insert',
PendingDeprecationWarning)
self.add(item, safe=safe) | [DEPRECATED] Please use save() instead. This actually calls
the underlying save function, so the name is confusing.
Insert an item into the work queue and flushes. |
def _detect_encoding(data=None):
"""Return the default system encoding. If data is passed, try
to decode the data with the default system encoding or from a short
list of encoding types to test.
Args:
data - list of lists
Returns:
enc - system encoding
"""
import locale
enc_list = ['utf-8', 'latin-1', 'iso8859-1', 'iso8859-2',
'utf-16', 'cp720']
code = locale.getpreferredencoding(False)
if data is None:
return code
if code.lower() not in enc_list:
enc_list.insert(0, code.lower())
for c in enc_list:
try:
for line in data:
line.decode(c)
except (UnicodeDecodeError, UnicodeError, AttributeError):
continue
return c
print("Encoding not detected. Please pass encoding value manually") | Return the default system encoding. If data is passed, try
to decode the data with the default system encoding or from a short
list of encoding types to test.
Args:
data - list of lists
Returns:
enc - system encoding |
def hash(self):
"""Hash value based on file name and .ini file content"""
if self._hash is None:
# Only hash _camera.ini and _para.ini
fsh = [self.path.with_name(self._mid + "_camera.ini"),
self.path.with_name(self._mid + "_para.ini")]
tohash = [hashfile(f) for f in fsh]
tohash.append(self.path.name)
# Hash a maximum of ~1MB of the tdms file
tohash.append(hashfile(self.path, blocksize=65536, count=20))
self._hash = hashobj(tohash)
return self._hash | Hash value based on file name and .ini file content |
def _on_closed(self):
"""Invoked by connections when they are closed."""
self._connected.clear()
if not self._closing:
if self._on_close_callback:
self._on_close_callback()
else:
raise exceptions.ConnectionError('closed') | Invoked by connections when they are closed. |
def t_STRING(t):
r"'([^'\\]+|\\'|\\\\)*'"
t.value = t.value.replace(r'\\', chr(92)).replace(r"\'", r"'")[1:-1]
return t | r"'([^'\\]+|\\'|\\\\)* |
def trk50(msg):
"""True track angle, BDS 5,0 message
Args:
msg (String): 28 bytes hexadecimal message (BDS50) string
Returns:
float: angle in degrees to true north (from 0 to 360)
"""
d = hex2bin(data(msg))
if d[11] == '0':
return None
sign = int(d[12]) # 1 -> west
value = bin2int(d[13:23])
if sign:
value = value - 1024
trk = value * 90.0 / 512.0
# convert from [-180, 180] to [0, 360]
if trk < 0:
trk = 360 + trk
return round(trk, 3) | True track angle, BDS 5,0 message
Args:
msg (String): 28 bytes hexadecimal message (BDS50) string
Returns:
float: angle in degrees to true north (from 0 to 360) |
def list_build_configurations_for_project(id=None, name=None, page_size=200, page_index=0, sort="", q=""):
"""
List all BuildConfigurations associated with the given Project.
"""
data = list_build_configurations_for_project_raw(id, name, page_size, page_index, sort, q)
if data:
return utils.format_json_list(data) | List all BuildConfigurations associated with the given Project. |
def rgb2rgba(rgb):
"""Take a row of RGB bytes, and convert to a row of RGBA bytes."""
rgba = []
for i in range(0, len(rgb), 3):
rgba += rgb[i:i+3]
rgba.append(255)
return rgba | Take a row of RGB bytes, and convert to a row of RGBA bytes. |
def _labels_cost(Xnum, Xcat, centroids, num_dissim, cat_dissim, gamma, membship=None):
"""Calculate labels and cost function given a matrix of points and
a list of centroids for the k-prototypes algorithm.
"""
n_points = Xnum.shape[0]
Xnum = check_array(Xnum)
cost = 0.
labels = np.empty(n_points, dtype=np.uint16)
for ipoint in range(n_points):
# Numerical cost = sum of Euclidean distances
num_costs = num_dissim(centroids[0], Xnum[ipoint])
cat_costs = cat_dissim(centroids[1], Xcat[ipoint], X=Xcat, membship=membship)
# Gamma relates the categorical cost to the numerical cost.
tot_costs = num_costs + gamma * cat_costs
clust = np.argmin(tot_costs)
labels[ipoint] = clust
cost += tot_costs[clust]
return labels, cost | Calculate labels and cost function given a matrix of points and
a list of centroids for the k-prototypes algorithm. |
def set_cookie(self, name: str, value: str, *,
expires: Optional[str]=None,
domain: Optional[str]=None,
max_age: Optional[Union[int, str]]=None,
path: str='/',
secure: Optional[str]=None,
httponly: Optional[str]=None,
version: Optional[str]=None) -> None:
"""Set or update response cookie.
Sets new cookie or updates existent with new value.
Also updates only those params which are not None.
"""
old = self._cookies.get(name)
if old is not None and old.coded_value == '':
# deleted cookie
self._cookies.pop(name, None)
self._cookies[name] = value
c = self._cookies[name]
if expires is not None:
c['expires'] = expires
elif c.get('expires') == 'Thu, 01 Jan 1970 00:00:00 GMT':
del c['expires']
if domain is not None:
c['domain'] = domain
if max_age is not None:
c['max-age'] = str(max_age)
elif 'max-age' in c:
del c['max-age']
c['path'] = path
if secure is not None:
c['secure'] = secure
if httponly is not None:
c['httponly'] = httponly
if version is not None:
c['version'] = version | Set or update response cookie.
Sets new cookie or updates existent with new value.
Also updates only those params which are not None. |
def getReadNoise(self, exten):
"""
Method for returning the readnoise of a detector (in counts).
Returns
-------
readnoise : float
The readnoise of the detector in **units of counts/electrons**.
"""
rn = self._image[exten]._rdnoise
if self.proc_unit == 'native':
rn = self._rdnoise / self.getGain(exten)
return rn | Method for returning the readnoise of a detector (in counts).
Returns
-------
readnoise : float
The readnoise of the detector in **units of counts/electrons**. |
def remove_license_requests(cursor, uuid_, uids):
"""Given a ``uuid`` and list of ``uids`` (user identifiers)
remove the identified users' license acceptance entries.
"""
if not isinstance(uids, (list, set, tuple,)):
raise TypeError("``uids`` is an invalid type: {}".format(type(uids)))
acceptors = list(set(uids))
# Remove the the entries.
cursor.execute("""\
DELETE FROM license_acceptances
WHERE uuid = %s AND user_id = ANY(%s::text[])""", (uuid_, acceptors,)) | Given a ``uuid`` and list of ``uids`` (user identifiers)
remove the identified users' license acceptance entries. |
def ip_address_delete(session, ifname, ifaddr):
"""
Deletes an IP address from interface record identified with the given
"ifname".
The arguments are similar to "ip address delete" command of iproute2.
:param session: Session instance connecting to database.
:param ifname: Name of interface.
:param ifaddr: IPv4 or IPv6 address.
:return: Instance of record or "None" if failed.
"""
def _remove_inet_addr(intf_inet, addr):
addr_list = intf_inet.split(',')
if addr not in addr_list:
LOG.debug(
'Interface "%s" does not have "ifaddr": %s',
intf.ifname, addr)
return intf_inet
else:
addr_list.remove(addr)
return ','.join(addr_list)
intf = ip_link_show(session, ifname=ifname)
if not intf:
LOG.debug('Interface "%s" does not exist', ifname)
return None
if ip.valid_ipv4(ifaddr):
intf.inet = _remove_inet_addr(intf.inet, ifaddr)
elif ip.valid_ipv6(ifaddr):
intf.inet6 = _remove_inet_addr(intf.inet6, ifaddr)
else:
LOG.debug('Invalid IP address for "ifaddr": %s', ifaddr)
return None
return intf | Deletes an IP address from interface record identified with the given
"ifname".
The arguments are similar to "ip address delete" command of iproute2.
:param session: Session instance connecting to database.
:param ifname: Name of interface.
:param ifaddr: IPv4 or IPv6 address.
:return: Instance of record or "None" if failed. |
def update(self, iterable):
"""
Update bag with all elements in iterable.
>>> s = pbag([1])
>>> s.update([1, 2])
pbag([1, 1, 2])
"""
if iterable:
return PBag(reduce(_add_to_counters, iterable, self._counts))
return self | Update bag with all elements in iterable.
>>> s = pbag([1])
>>> s.update([1, 2])
pbag([1, 1, 2]) |
def getBlockParams(ws):
""" Auxiliary method to obtain ``ref_block_num`` and
``ref_block_prefix``. Requires a websocket connection to a
witness node!
"""
dynBCParams = ws.get_dynamic_global_properties()
ref_block_num = dynBCParams["head_block_number"] & 0xFFFF
ref_block_prefix = struct.unpack_from(
"<I", unhexlify(dynBCParams["head_block_id"]), 4
)[0]
return ref_block_num, ref_block_prefix | Auxiliary method to obtain ``ref_block_num`` and
``ref_block_prefix``. Requires a websocket connection to a
witness node! |
def instance_default(self, obj):
''' Get the default value that will be used for a specific instance.
Args:
obj (HasProps) : The instance to get the default value for.
Returns:
object
'''
return self.property.themed_default(obj.__class__, self.name, obj.themed_values()) | Get the default value that will be used for a specific instance.
Args:
obj (HasProps) : The instance to get the default value for.
Returns:
object |
def predict(self, x, batch_size=None, verbose=None, is_distributed=False):
"""Generates output predictions for the input samples,
processing the samples in a batched way.
# Arguments
x: the input data, as a Numpy array or list of Numpy array for local mode.
as RDD[Sample] for distributed mode
is_distributed: used to control run in local or cluster. the default value is False
# Returns
A Numpy array or RDD[Sample] of predictions.
"""
if batch_size or verbose:
raise Exception("we don't support batch_size or verbose for now")
if is_distributed:
if isinstance(x, np.ndarray):
input = to_sample_rdd(x, np.zeros([x.shape[0]]))
# np.asarray(self.bmodel.predict(x_rdd).collect())
elif isinstance(x, RDD):
input = x
return self.bmodel.predict(input)
else:
if isinstance(x, np.ndarray):
return self.bmodel.predict_local(x)
raise Exception("not supported type: %s" % x) | Generates output predictions for the input samples,
processing the samples in a batched way.
# Arguments
x: the input data, as a Numpy array or list of Numpy array for local mode.
as RDD[Sample] for distributed mode
is_distributed: used to control run in local or cluster. the default value is False
# Returns
A Numpy array or RDD[Sample] of predictions. |
def parse_alignment_summary_metrics(fn):
"""
Parse the output from Picard's CollectAlignmentSummaryMetrics and return as
pandas Dataframe.
Parameters
----------
filename : str of filename or file handle
Filename of the Picard output you want to parse.
Returns
-------
df : pandas.DataFrame
Data from output file.
"""
df = pd.read_table(fn, index_col=0, skiprows=range(6) + [10, 11]).T
return df | Parse the output from Picard's CollectAlignmentSummaryMetrics and return as
pandas Dataframe.
Parameters
----------
filename : str of filename or file handle
Filename of the Picard output you want to parse.
Returns
-------
df : pandas.DataFrame
Data from output file. |
def download_results(self, savedir=None, raw=True, calib=False, index=None):
"""Download the previously found and stored Opus obsids.
Parameters
==========
savedir: str or pathlib.Path, optional
If the database root folder as defined by the config.ini should not be used,
provide a different savedir here. It will be handed to PathManager.
"""
obsids = self.obsids if index is None else [self.obsids[index]]
for obsid in obsids:
pm = io.PathManager(obsid.img_id, savedir=savedir)
pm.basepath.mkdir(exist_ok=True)
to_download = []
if raw is True:
to_download.extend(obsid.raw_urls)
if calib is True:
to_download.extend(obsid.calib_urls)
for url in to_download:
basename = Path(url).name
print("Downloading", basename)
store_path = str(pm.basepath / basename)
try:
urlretrieve(url, store_path)
except Exception as e:
urlretrieve(url.replace("https", "http"), store_path)
return str(pm.basepath) | Download the previously found and stored Opus obsids.
Parameters
==========
savedir: str or pathlib.Path, optional
If the database root folder as defined by the config.ini should not be used,
provide a different savedir here. It will be handed to PathManager. |
def get_or_create_exh_obj(full_cname=False, exclude=None, callables_fname=None):
r"""
Return global exception handler if set, otherwise create a new one and return it.
:param full_cname: Flag that indicates whether fully qualified
function/method/class property names are obtained for
functions/methods/class properties that use the
exception manager (True) or not (False).
There is a performance penalty if the flag is True as
the call stack needs to be traced. This argument is
only relevant if the global exception handler is not
set and a new one is created
:type full_cname: boolean
:param exclude: Module exclusion list. A particular callable in an
otherwise fully qualified name is omitted if it belongs
to a module in this list. If None all callables are
included
:type exclude: list of strings or None
:param callables_fname: File name that contains traced modules information.
File can be produced by either the
:py:meth:`pexdoc.pinspect.Callables.save` or
:py:meth:`pexdoc.ExHandle.save_callables`
methods
:type callables_fname: :ref:`FileNameExists` or None
:rtype: :py:class:`pexdoc.ExHandle`
:raises:
* OSError (File *[callables_fname]* could not be found
* RuntimeError (Argument \\`exclude\\` is not valid)
* RuntimeError (Argument \\`callables_fname\\` is not valid)
* RuntimeError (Argument \\`full_cname\\` is not valid)
"""
if not hasattr(__builtin__, "_EXH"):
set_exh_obj(
ExHandle(
full_cname=full_cname, exclude=exclude, callables_fname=callables_fname
)
)
return get_exh_obj() | r"""
Return global exception handler if set, otherwise create a new one and return it.
:param full_cname: Flag that indicates whether fully qualified
function/method/class property names are obtained for
functions/methods/class properties that use the
exception manager (True) or not (False).
There is a performance penalty if the flag is True as
the call stack needs to be traced. This argument is
only relevant if the global exception handler is not
set and a new one is created
:type full_cname: boolean
:param exclude: Module exclusion list. A particular callable in an
otherwise fully qualified name is omitted if it belongs
to a module in this list. If None all callables are
included
:type exclude: list of strings or None
:param callables_fname: File name that contains traced modules information.
File can be produced by either the
:py:meth:`pexdoc.pinspect.Callables.save` or
:py:meth:`pexdoc.ExHandle.save_callables`
methods
:type callables_fname: :ref:`FileNameExists` or None
:rtype: :py:class:`pexdoc.ExHandle`
:raises:
* OSError (File *[callables_fname]* could not be found
* RuntimeError (Argument \\`exclude\\` is not valid)
* RuntimeError (Argument \\`callables_fname\\` is not valid)
* RuntimeError (Argument \\`full_cname\\` is not valid) |
def user_exists(self, username):
"""
Returns whether a user with username ``username`` exists.
:param str username: username of user
:return: whether a user with the specified username exists
:rtype: bool
:raises NetworkFailure: if there is an error communicating with the server
:return:
"""
path = "/users/{}".format(username)
return self._get(path).ok | Returns whether a user with username ``username`` exists.
:param str username: username of user
:return: whether a user with the specified username exists
:rtype: bool
:raises NetworkFailure: if there is an error communicating with the server
:return: |
def p_configure_sentence(self, t):
"""configure_sentence : CONFIGURE VAR
| CONFIGURE VAR LPAREN RECIPE_BEGIN recipe RECIPE_END RPAREN"""
if len(t) == 3:
t[0] = configure(t[2], reference=True, line=t.lineno(1))
else:
t[0] = configure(t[2], t[5], line=t.lineno(1)) | configure_sentence : CONFIGURE VAR
| CONFIGURE VAR LPAREN RECIPE_BEGIN recipe RECIPE_END RPAREN |
def get_cached(self):
""" Get items from feed cache while trying to emulate
how API handles offset/since/before parameters
"""
def id_in_list(list, id):
if id:
if [i for i in list if i.id == id]:
return True
else:
raise PyPumpException("id %r not in feed." % self._since)
tmp = []
if self._before is not None:
# return list based on before param
if not id_in_list(self.feed._items, self._before):
return tmp
if isinstance(self._before, six.string_types):
found = False
for i in self.feed._items:
if not found:
if i.id == self._before:
found = True
continue
else:
tmp.append(i)
self._before = False
return tmp
if self._since is not None:
# return list based on since param
if not id_in_list(self.feed._items, self._since):
return tmp
if isinstance(self._since, six.string_types):
found = False
for i in self.feed._items:
if i.id == self._since:
found = True
break
else:
tmp.append(i)
self._since = False
return reversed(tmp)
if not hasattr(self, 'usedcache'):
self.usedcache = True # invalidate cache
if isinstance(self._offset, int):
# return list based on offset
return self.feed._items[self._offset:]
return self.feed._items
else:
return tmp | Get items from feed cache while trying to emulate
how API handles offset/since/before parameters |
def _wrapper(self, q, start):
"""
_wrapper checks return status of Probe.fnc and provides the result for process managing
:param q: Queue for function results
:param start: Time of function run (used for logging)
:return: Return value or Exception
"""
try:
func_name = self.fnc.__name__
except AttributeError:
func_name = str(self.fnc)
logger.debug("Running \"%s\" with parameters: \"%s\":\t%s/%s"
% (func_name, str(self.kwargs), round(time.time() - start), self.timeout))
try:
result = self.fnc(**self.kwargs)
# let's log only first 50 characters of the response
logger.debug("callback result = %s", str(result)[:50])
q.put(result)
except self.expected_exceptions as ex:
logger.debug("expected exception was caught: %s", ex)
q.put(False)
except Exception as ex:
logger.debug("adding exception %s to queue", ex)
q.put(ex) | _wrapper checks return status of Probe.fnc and provides the result for process managing
:param q: Queue for function results
:param start: Time of function run (used for logging)
:return: Return value or Exception |
def draw(self, tree, bar_desc=None, save_cursor=True, flush=True):
"""Draw ``tree`` to the terminal
:type tree: dict
:param tree: ``tree`` should be a tree representing a hierarchy; each
key should be a string describing that hierarchy level and value
should also be ``dict`` except for leaves which should be
``BarDescriptors``. See ``BarDescriptor`` for a tree example.
:type bar_desc: BarDescriptor|NoneType
:param bar_desc: For describing non-leaf bars in that will be
drawn from ``tree``; certain attributes such as ``value``
and ``kwargs["max_value"]`` will of course be overridden
if provided.
:type flush: bool
:param flush: If this is set, output written will be flushed
:type save_cursor: bool
:param save_cursor: If this is set, cursor location will be saved before
drawing; this will OVERWRITE a previous save, so be sure to set
this accordingly (to your needs).
"""
if save_cursor:
self.cursor.save()
tree = deepcopy(tree)
# TODO: Automatically collapse hierarchy so something
# will always be displayable (well, unless the top-level)
# contains too many to display
lines_required = self.lines_required(tree)
ensure(lines_required <= self.cursor.term.height,
LengthOverflowError,
"Terminal is not long ({} rows) enough to fit all bars "
"({} rows).".format(self.cursor.term.height, lines_required))
bar_desc = BarDescriptor(type=Bar) if not bar_desc else bar_desc
self._calculate_values(tree, bar_desc)
self._draw(tree)
if flush:
self.cursor.flush() | Draw ``tree`` to the terminal
:type tree: dict
:param tree: ``tree`` should be a tree representing a hierarchy; each
key should be a string describing that hierarchy level and value
should also be ``dict`` except for leaves which should be
``BarDescriptors``. See ``BarDescriptor`` for a tree example.
:type bar_desc: BarDescriptor|NoneType
:param bar_desc: For describing non-leaf bars in that will be
drawn from ``tree``; certain attributes such as ``value``
and ``kwargs["max_value"]`` will of course be overridden
if provided.
:type flush: bool
:param flush: If this is set, output written will be flushed
:type save_cursor: bool
:param save_cursor: If this is set, cursor location will be saved before
drawing; this will OVERWRITE a previous save, so be sure to set
this accordingly (to your needs). |
def get_reports():
"""
Returns energy data from 1960 to 2014 across various factors.
"""
if False:
# If there was a Test version of this method, it would go here. But alas.
pass
else:
rows = _Constants._DATABASE.execute("SELECT data FROM energy".format(
hardware=_Constants._HARDWARE))
data = [r[0] for r in rows]
data = [_Auxiliary._byteify(_json.loads(r)) for r in data]
return _Auxiliary._byteify(data) | Returns energy data from 1960 to 2014 across various factors. |
def _execute(self, workdir, with_mpirun=False, exec_args=None):
"""
Execute the executable in a subprocess inside workdir.
Some executables fail if we try to launch them with mpirun.
Use with_mpirun=False to run the binary without it.
"""
qadapter = self.manager.qadapter
if not with_mpirun: qadapter.name = None
if self.verbose:
print("Working in:", workdir)
script = qadapter.get_script_str(
job_name=self.name,
launch_dir=workdir,
executable=self.executable,
qout_path="qout_file.path",
qerr_path="qerr_file.path",
stdin=self.stdin_fname,
stdout=self.stdout_fname,
stderr=self.stderr_fname,
exec_args=exec_args
)
# Write the script.
script_file = os.path.join(workdir, "run" + self.name + ".sh")
with open(script_file, "w") as fh:
fh.write(script)
os.chmod(script_file, 0o740)
qjob, process = qadapter.submit_to_queue(script_file)
self.stdout_data, self.stderr_data = process.communicate()
self.returncode = process.returncode
#raise self.Error("%s returned %s\n cmd_str: %s" % (self, self.returncode, self.cmd_str))
return self.returncode | Execute the executable in a subprocess inside workdir.
Some executables fail if we try to launch them with mpirun.
Use with_mpirun=False to run the binary without it. |
def _handleClassAttr(self):
'''
_handleClassAttr - Hack to ensure "class" and "style" show up in attributes when classes are set,
and doesn't when no classes are present on associated tag.
TODO: I don't like this hack.
'''
if len(self.tag._classNames) > 0:
dict.__setitem__(self, "class", self.tag.className)
else:
try:
dict.__delitem__(self, "class")
except:
pass
styleAttr = self.tag.style
if styleAttr.isEmpty() is False:
dict.__setitem__(self, "style", styleAttr)
else:
try:
dict.__delitem__(self, "style")
except:
pass | _handleClassAttr - Hack to ensure "class" and "style" show up in attributes when classes are set,
and doesn't when no classes are present on associated tag.
TODO: I don't like this hack. |
def commit(self) -> None:
"""
Attempt to commit all changes to LDAP database. i.e. forget all
rollbacks. However stay inside transaction management.
"""
if len(self._transactions) == 0:
raise RuntimeError("commit called outside transaction")
# If we have nested transactions, we don't actually commit, but push
# rollbacks up to previous transaction.
if len(self._transactions) > 1:
for on_rollback in reversed(self._transactions[-1]):
self._transactions[-2].insert(0, on_rollback)
_debug("commit")
self.reset() | Attempt to commit all changes to LDAP database. i.e. forget all
rollbacks. However stay inside transaction management. |
def Many2ManyThroughModel(field):
'''Create a Many2Many through model with two foreign key fields and a
CompositeFieldId depending on the two foreign keys.'''
from stdnet.odm import ModelType, StdModel, ForeignKey, CompositeIdField
name_model = field.model._meta.name
name_relmodel = field.relmodel._meta.name
# The two models are the same.
if name_model == name_relmodel:
name_relmodel += '2'
through = field.through
# Create the through model
if through is None:
name = '{0}_{1}'.format(name_model, name_relmodel)
class Meta:
app_label = field.model._meta.app_label
through = ModelType(name, (StdModel,), {'Meta': Meta})
field.through = through
# The first field
field1 = ForeignKey(field.model,
related_name=field.name,
related_manager_class=makeMany2ManyRelatedManager(
field.relmodel,
name_model,
name_relmodel)
)
field1.register_with_model(name_model, through)
# The second field
field2 = ForeignKey(field.relmodel,
related_name=field.related_name,
related_manager_class=makeMany2ManyRelatedManager(
field.model,
name_relmodel,
name_model)
)
field2.register_with_model(name_relmodel, through)
pk = CompositeIdField(name_model, name_relmodel)
pk.register_with_model('id', through) | Create a Many2Many through model with two foreign key fields and a
CompositeFieldId depending on the two foreign keys. |
def get_decomp_and_e_above_hull(self, entry, allow_negative=False):
"""
Provides the decomposition and energy above convex hull for an entry.
Due to caching, can be much faster if entries with the same composition
are processed together.
Args:
entry: A PDEntry like object
allow_negative: Whether to allow negative e_above_hulls. Used to
calculate equilibrium reaction energies. Defaults to False.
Returns:
(decomp, energy above convex hull) Stable entries should have
energy above hull of 0. The decomposition is provided as a dict of
{Entry: amount}.
"""
if entry in self.stable_entries:
return {entry: 1}, 0
comp = entry.composition
facet, simplex = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self.pd_coords(comp))
decomp = {self.qhull_entries[f]: amt
for f, amt in zip(facet, decomp_amts)
if abs(amt) > PhaseDiagram.numerical_tol}
energies = [self.qhull_entries[i].energy_per_atom for i in facet]
ehull = entry.energy_per_atom - np.dot(decomp_amts, energies)
if allow_negative or ehull >= -PhaseDiagram.numerical_tol:
return decomp, ehull
raise ValueError("No valid decomp found!") | Provides the decomposition and energy above convex hull for an entry.
Due to caching, can be much faster if entries with the same composition
are processed together.
Args:
entry: A PDEntry like object
allow_negative: Whether to allow negative e_above_hulls. Used to
calculate equilibrium reaction energies. Defaults to False.
Returns:
(decomp, energy above convex hull) Stable entries should have
energy above hull of 0. The decomposition is provided as a dict of
{Entry: amount}. |
def error(self, message, *args, **kwargs):
"""Log error event.
Compatible with logging.error signature.
"""
self.system.error(message, *args, **kwargs) | Log error event.
Compatible with logging.error signature. |
def _get_dump_item_context(self, index, name, opts):
"""
Return a formated dict context
"""
c = {
'item_no': index,
'label': name,
'name': name,
'models': ' '.join(opts['models']),
'natural_key': '',
}
if opts.get('use_natural_key', False):
c['natural_key'] = ' -n'
c.update(self.get_global_context())
return c | Return a formated dict context |
def link(self):
"""str: full path of the linked file entry."""
if not self.IsLink():
return ''
location = getattr(self.path_spec, 'location', None)
if location is None:
return ''
return self._file_system.GetDataByPath(location) | str: full path of the linked file entry. |
def set_motor_position(self, motor_name, position):
""" Sets the motor target position. """
self.call_remote_api('simxSetJointTargetPosition',
self.get_object_handle(motor_name),
position,
sending=True) | Sets the motor target position. |
def _get_possible_circular_ref_contigs(self, nucmer_hits, log_fh=None, log_outprefix=None):
'''Returns a dict ref name => tuple(hit at start, hit at end) for each ref sequence in the hash nucmer_hits (each value is a list of nucmer hits)'''
writing_log_file = None not in [log_fh, log_outprefix]
maybe_circular = {}
all_nucmer_hits = []
for l in nucmer_hits.values():
all_nucmer_hits.extend(l)
nucmer_hits_by_qry = self._hits_hashed_by_query(all_nucmer_hits)
for ref_name, list_of_hits in nucmer_hits.items():
if writing_log_file:
print(log_outprefix, ref_name, 'Checking ' + str(len(list_of_hits)) + ' nucmer hits', sep='\t', file=log_fh)
longest_start_hit = self._get_longest_hit_at_ref_start(list_of_hits)
longest_end_hit = self._get_longest_hit_at_ref_end(list_of_hits)
if longest_start_hit == longest_end_hit:
second_longest_start_hit = self._get_longest_hit_at_ref_start(list_of_hits, hits_to_exclude={longest_start_hit})
second_longest_end_hit = self._get_longest_hit_at_ref_end(list_of_hits, hits_to_exclude={longest_end_hit})
if second_longest_start_hit is not None:
longest_start_hit = self._get_hit_nearest_ref_start([longest_start_hit, second_longest_start_hit])
if second_longest_end_hit is not None:
longest_end_hit = self._get_hit_nearest_ref_end([longest_end_hit, second_longest_end_hit])
if (
longest_start_hit is not None
and longest_end_hit is not None
and longest_start_hit != longest_end_hit
and self._hits_have_same_query(longest_start_hit, longest_end_hit)
):
if writing_log_file:
print(log_outprefix, ref_name, 'potential pair of nucmer hits for circularization:', sep='\t', file=log_fh)
print(log_outprefix, ref_name, '', longest_start_hit, sep='\t', file=log_fh)
print(log_outprefix, ref_name, '', longest_end_hit, sep='\t', file=log_fh)
shortest_hit_length = self._min_qry_hit_length([longest_start_hit, longest_end_hit])
has_longer_hit = self._has_qry_hit_longer_than(
nucmer_hits_by_qry[longest_start_hit.qry_name],
shortest_hit_length,
hits_to_exclude={longest_start_hit, longest_end_hit}
)
if writing_log_file and has_longer_hit:
print(log_outprefix, ref_name, 'cannot use this pair because longer match was found', sep='\t', file=log_fh)
can_circularise = self._can_circularise(longest_start_hit, longest_end_hit)
if writing_log_file and not can_circularise:
print(log_outprefix, ref_name, 'cannot use this pair because positions/orientations of matches no good', sep='\t', file=log_fh)
if (not has_longer_hit) and can_circularise:
print(log_outprefix, ref_name, 'can use this pair of hits', sep='\t', file=log_fh)
maybe_circular[ref_name] = (longest_start_hit, longest_end_hit)
return maybe_circular | Returns a dict ref name => tuple(hit at start, hit at end) for each ref sequence in the hash nucmer_hits (each value is a list of nucmer hits) |
def AD(frame, high_col='high', low_col='low', close_col='close', vol_col='Volume'):
"""Chaikin A/D Line"""
return _frame_to_series(frame, [high_col, low_col, close_col, vol_col], talib.AD) | Chaikin A/D Line |
def parse_navigation_html_to_tree(html, id):
"""Parse the given ``html`` (an etree object) to a tree.
The ``id`` is required in order to assign the top-level tree id value.
"""
def xpath(x):
return html.xpath(x, namespaces=HTML_DOCUMENT_NAMESPACES)
try:
value = xpath('//*[@data-type="binding"]/@data-value')[0]
is_translucent = value == 'translucent'
except IndexError:
is_translucent = False
if is_translucent:
id = TRANSLUCENT_BINDER_ID
tree = {'id': id,
'title': xpath('//*[@data-type="document-title"]/text()')[0],
'contents': [x for x in _nav_to_tree(xpath('//xhtml:nav')[0])]
}
return tree | Parse the given ``html`` (an etree object) to a tree.
The ``id`` is required in order to assign the top-level tree id value. |
def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = set(self.funcs.keys())
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods |= set(self.instance._listMethods())
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods |= set(list_public_methods(self.instance))
return sorted(methods) | system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server. |
def source_title_header_element(feature, parent):
"""Retrieve source title header string from definitions."""
_ = feature, parent # NOQA
header = source_title_header['string_format']
return header.capitalize() | Retrieve source title header string from definitions. |
def get_cache_item(self):
'''Gets the cached item. Raises AttributeError if it hasn't been set.'''
if settings.DEBUG:
raise AttributeError('Caching disabled in DEBUG mode')
return getattr(self.template, self.options['template_cache_key']) | Gets the cached item. Raises AttributeError if it hasn't been set. |
def as_obj(func):
""" A decorator used to return a JSON response with a dict
representation of the model instance. It expects the decorated function
to return a Model instance. It then converts the instance to dicts
and serializes it into a json response
Examples:
>>> @app.route('/api/shipments/<id>')
... @as_obj
... def get_shipment(id):
... return Shipment.get(id)
"""
@wraps(func)
def wrapper(*args, **kwargs):
response = func(*args, **kwargs)
return render_json_obj_with_requested_structure(response)
return wrapper | A decorator used to return a JSON response with a dict
representation of the model instance. It expects the decorated function
to return a Model instance. It then converts the instance to dicts
and serializes it into a json response
Examples:
>>> @app.route('/api/shipments/<id>')
... @as_obj
... def get_shipment(id):
... return Shipment.get(id) |
def AllBalancesZeroOrLess(self):
"""
Flag indicating if all balances are 0 or less.
Returns:
bool: True if all balances are <= 0. False, otherwise.
"""
for key, fixed8 in self.Balances.items():
if fixed8.value > 0:
return False
return True | Flag indicating if all balances are 0 or less.
Returns:
bool: True if all balances are <= 0. False, otherwise. |
def wait_for_lock(self, lockname, locktime=60, auto_renewal=False):
''' Gets a lock or waits until it is able to get it '''
pid = os.getpid()
caller = inspect.stack()[0][3]
try:
# rl = redlock.Redlock([{"host": settings.REDIS_SERVERS['std_redis']['host'], "port": settings.REDIS_SERVERS['std_redis']['port'], "db": settings.REDIS_SERVERS['std_redis']['db']}, ])
rl = redis_lock.Lock(self, lockname, expire=locktime, auto_renewal=auto_renewal)
except AssertionError:
if self.logger:
self.logger.error('Process {0} ({1}) could not get lock {2}. Going ahead without locking!!! {3}'.format(pid, caller, lockname, traceback.format_exc()))
return False
cont = 1
t0 = time.time()
lock = None
while not lock:
time.sleep(.05)
cont += 1
if cont % 20 == 0:
if self.logger:
self.logger.debug('Process {0} ({1}) waiting for lock {2}. {3} seconds elapsed.'.format(pid, caller, lockname, time.time() - t0))
# lock = rl.lock(lockname, locktime_ms)
try:
lock = rl.acquire()
except RedisError:
pass
if self.logger:
self.logger.debug('Process {0} ({1}) got lock {2} for {3} seconds'.format(pid, caller, lockname, locktime))
return rl | Gets a lock or waits until it is able to get it |
def _log_sum_sq(x, axis=None):
"""Computes log(sum(x**2))."""
return tf.reduce_logsumexp(
input_tensor=2. * tf.math.log(tf.abs(x)), axis=axis) | Computes log(sum(x**2)). |
def send(self, config, log, obs_id, beam_id):
"""
Send the pulsar data to the ftp server
Args:
config (dict): Dictionary of settings
log (logging.Logger): Python logging object
obs_id: observation id
beam_id: beam id
"""
log.info('Starting Pulsar Data Transfer...')
socket = self._ftp.transfercmd('STOR {0}_{1}'.format(obs_id, beam_id))
socket.send(json.dumps(config).encode())
socket.send(bytearray(1000 * 1000))
# Overwrites the metadata name in the config dict
# and re-sends the data to the receiver.
config['metadata']['name'] = 'candidate_two'
socket.send(json.dumps(config).encode())
socket.send(bytearray(1000 * 1000))
socket.close()
log.info('Pulsar Data Transfer Completed...') | Send the pulsar data to the ftp server
Args:
config (dict): Dictionary of settings
log (logging.Logger): Python logging object
obs_id: observation id
beam_id: beam id |
def _find_plugin_dir(module_type):
'''Find the directory containing the plugin definition for the given type.
Do this by searching all the paths where plugins can live for a dir that
matches the type name.'''
for install_dir in _get_plugin_install_dirs():
candidate = os.path.join(install_dir, module_type)
if os.path.isdir(candidate):
return candidate
else:
raise PluginCandidateError(
'No plugin found for `{}` module in paths:\n{}'.format(
module_type, '\n'.join(_get_plugin_install_dirs()))) | Find the directory containing the plugin definition for the given type.
Do this by searching all the paths where plugins can live for a dir that
matches the type name. |
def read_vensim(mdl_file):
"""
Construct a model from Vensim `.mdl` file.
Parameters
----------
mdl_file : <string>
The relative path filename for a raw Vensim `.mdl` file
Returns
-------
model: a PySD class object
Elements from the python model are loaded into the PySD class and ready to run
Examples
--------
>>> model = read_vensim('../tests/test-models/samples/teacup/teacup.mdl')
"""
from .py_backend.vensim.vensim2py import translate_vensim
from .py_backend import functions
py_model_file = translate_vensim(mdl_file)
model = functions.Model(py_model_file)
model.mdl_file = mdl_file
return model | Construct a model from Vensim `.mdl` file.
Parameters
----------
mdl_file : <string>
The relative path filename for a raw Vensim `.mdl` file
Returns
-------
model: a PySD class object
Elements from the python model are loaded into the PySD class and ready to run
Examples
--------
>>> model = read_vensim('../tests/test-models/samples/teacup/teacup.mdl') |
def _update_conda_devel():
"""Update to the latest development conda package.
"""
conda_bin = _get_conda_bin()
channels = _get_conda_channels(conda_bin)
assert conda_bin, "Could not find anaconda distribution for upgrading bcbio"
subprocess.check_call([conda_bin, "install", "--quiet", "--yes"] + channels +
["bcbio-nextgen>=%s" % version.__version__.replace("a0", "a")])
return os.path.dirname(os.path.dirname(conda_bin)) | Update to the latest development conda package. |
def from_json_str(cls, json_str):
"""Convert json string representation into class instance.
Args:
json_str: json representation as string.
Returns:
New instance of the class with data loaded from json string.
"""
return cls.from_json(json.loads(json_str, cls=JsonDecoder)) | Convert json string representation into class instance.
Args:
json_str: json representation as string.
Returns:
New instance of the class with data loaded from json string. |
def allocate_IPv6(
self,
name,
id_network_type,
id_environment,
description,
id_environment_vip=None):
"""Inserts a new VLAN.
:param name: Name of Vlan. String with a maximum of 50 characters.
:param id_network_type: Identifier of the Netwok Type. Integer value and greater than zero.
:param id_environment: Identifier of the Environment. Integer value and greater than zero.
:param description: Description of Vlan. String with a maximum of 200 characters.
:param id_environment_vip: Identifier of the Environment Vip. Integer value and greater than zero.
:return: Following dictionary:
::
{'vlan': {'id': < id_vlan >,
'nome': < nome_vlan >,
'num_vlan': < num_vlan >,
'id_tipo_rede': < id_tipo_rede >,
'id_ambiente': < id_ambiente >,
'bloco1': < bloco1 >,
'bloco2': < bloco2 >,
'bloco3': < bloco3 >,
'bloco4': < bloco4 >,
'bloco5': < bloco5 >,
'bloco6': < bloco6 >,
'bloco7': < bloco7 >,
'bloco8': < bloco8 >,
'bloco': < bloco >,
'mask_bloco1': < mask_bloco1 >,
'mask_bloco2': < mask_bloco2 >,
'mask_bloco3': < mask_bloco3 >,
'mask_bloco4': < mask_bloco4 >,
'mask_bloco5': < mask_bloco5 >,
'mask_bloco6': < mask_bloco6 >,
'mask_bloco7': < mask_bloco7 >,
'mask_bloco8': < mask_bloco8 >,
'descricao': < descricao >,
'acl_file_name': < acl_file_name >,
'acl_valida': < acl_valida >,
'acl_file_name_v6': < acl_file_name_v6 >,
'acl_valida_v6': < acl_valida_v6 >,
'ativada': < ativada >}}
:raise VlanError: VLAN name already exists, VLAN name already exists, DC division of the environment invalid or does not exist VLAN number available.
:raise VlanNaoExisteError: VLAN not found.
:raise TipoRedeNaoExisteError: Network Type not registered.
:raise AmbienteNaoExisteError: Environment not registered.
:raise EnvironmentVipNotFoundError: Environment VIP not registered.
:raise InvalidParameterError: Name of Vlan and/or the identifier of the Environment is null or invalid.
:raise IPNaoDisponivelError: There is no network address is available to create the VLAN.
:raise ConfigEnvironmentInvalidError: Invalid Environment Configuration or not registered
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
vlan_map = dict()
vlan_map['name'] = name
vlan_map['id_network_type'] = id_network_type
vlan_map['id_environment'] = id_environment
vlan_map['description'] = description
vlan_map['id_environment_vip'] = id_environment_vip
code, xml = self.submit({'vlan': vlan_map}, 'POST', 'vlan/ipv6/')
return self.response(code, xml) | Inserts a new VLAN.
:param name: Name of Vlan. String with a maximum of 50 characters.
:param id_network_type: Identifier of the Netwok Type. Integer value and greater than zero.
:param id_environment: Identifier of the Environment. Integer value and greater than zero.
:param description: Description of Vlan. String with a maximum of 200 characters.
:param id_environment_vip: Identifier of the Environment Vip. Integer value and greater than zero.
:return: Following dictionary:
::
{'vlan': {'id': < id_vlan >,
'nome': < nome_vlan >,
'num_vlan': < num_vlan >,
'id_tipo_rede': < id_tipo_rede >,
'id_ambiente': < id_ambiente >,
'bloco1': < bloco1 >,
'bloco2': < bloco2 >,
'bloco3': < bloco3 >,
'bloco4': < bloco4 >,
'bloco5': < bloco5 >,
'bloco6': < bloco6 >,
'bloco7': < bloco7 >,
'bloco8': < bloco8 >,
'bloco': < bloco >,
'mask_bloco1': < mask_bloco1 >,
'mask_bloco2': < mask_bloco2 >,
'mask_bloco3': < mask_bloco3 >,
'mask_bloco4': < mask_bloco4 >,
'mask_bloco5': < mask_bloco5 >,
'mask_bloco6': < mask_bloco6 >,
'mask_bloco7': < mask_bloco7 >,
'mask_bloco8': < mask_bloco8 >,
'descricao': < descricao >,
'acl_file_name': < acl_file_name >,
'acl_valida': < acl_valida >,
'acl_file_name_v6': < acl_file_name_v6 >,
'acl_valida_v6': < acl_valida_v6 >,
'ativada': < ativada >}}
:raise VlanError: VLAN name already exists, VLAN name already exists, DC division of the environment invalid or does not exist VLAN number available.
:raise VlanNaoExisteError: VLAN not found.
:raise TipoRedeNaoExisteError: Network Type not registered.
:raise AmbienteNaoExisteError: Environment not registered.
:raise EnvironmentVipNotFoundError: Environment VIP not registered.
:raise InvalidParameterError: Name of Vlan and/or the identifier of the Environment is null or invalid.
:raise IPNaoDisponivelError: There is no network address is available to create the VLAN.
:raise ConfigEnvironmentInvalidError: Invalid Environment Configuration or not registered
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response. |
def _parse_remote_response(self, response):
"""
Parse JWKS from the HTTP response.
Should be overriden by subclasses for adding support of e.g. signed
JWKS.
:param response: HTTP response from the 'jwks_uri' endpoint
:return: response parsed as JSON
"""
# Check if the content type is the right one.
try:
if response.headers["Content-Type"] != 'application/json':
logger.warning('Wrong Content_type ({})'.format(
response.headers["Content-Type"]))
except KeyError:
pass
logger.debug("Loaded JWKS: %s from %s" % (response.text, self.source))
try:
return json.loads(response.text)
except ValueError:
return None | Parse JWKS from the HTTP response.
Should be overriden by subclasses for adding support of e.g. signed
JWKS.
:param response: HTTP response from the 'jwks_uri' endpoint
:return: response parsed as JSON |
def order_events(events, d=False):
"""
Group events that occur on the same day, then sort them alphabetically
by title, then sort by day. Returns a list of tuples that looks like
[(day: [events])], where day is the day of the event(s), and [events]
is an alphabetically sorted list of the events for the day.
"""
ordered_events = {}
for event in events:
try:
for occ in event.occurrence:
try:
ordered_events[occ].append(event)
except Exception:
ordered_events[occ] = [event]
except AttributeError: # no occurrence for this event
# This shouldn't happen, since an event w/o an occurrence
# shouldn't get this far, but if it does, just skip it since
# it shouldn't be displayed on the calendar anyway.
pass
if d:
# return as a dict without sorting by date
return ordered_events
else:
# return ordered_events as a list tuples sorted by date
return sorted(ordered_events.items()) | Group events that occur on the same day, then sort them alphabetically
by title, then sort by day. Returns a list of tuples that looks like
[(day: [events])], where day is the day of the event(s), and [events]
is an alphabetically sorted list of the events for the day. |
def softDeactivate(rh):
"""
Deactivate a virtual machine by first shutting down Linux and
then log it off.
Input:
Request Handle with the following properties:
function - 'POWERVM'
subfunction - 'SOFTOFF'
userid - userid of the virtual machine
parms['maxQueries'] - Maximum number of queries to issue.
Optional.
parms['maxWait'] - Maximum time to wait in seconds.
Optional,
unless 'maxQueries' is specified.
parms['poll'] - Polling interval in seconds. Optional,
unless 'maxQueries' is specified.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error
"""
rh.printSysLog("Enter powerVM.softDeactivate, userid: " +
rh.userid)
strCmd = "echo 'ping'"
iucvResults = execCmdThruIUCV(rh, rh.userid, strCmd)
if iucvResults['overallRC'] == 0:
# We could talk to the machine, tell it to shutdown nicely.
strCmd = "shutdown -h now"
iucvResults = execCmdThruIUCV(rh, rh.userid, strCmd)
if iucvResults['overallRC'] == 0:
time.sleep(15)
else:
# Shutdown failed. Let CP take down the system
# after we log the results.
rh.printSysLog("powerVM.softDeactivate " + rh.userid +
" is unreachable. Treating it as already shutdown.")
else:
# Could not ping the machine. Treat it as a success
# after we log the results.
rh.printSysLog("powerVM.softDeactivate " + rh.userid +
" is unreachable. Treating it as already shutdown.")
# Tell z/VM to log off the system.
parms = ["-T", rh.userid]
smcliResults = invokeSMCLI(rh, "Image_Deactivate", parms)
if smcliResults['overallRC'] == 0:
pass
elif (smcliResults['overallRC'] == 8 and smcliResults['rc'] == 200 and
(smcliResults['rs'] == 12 or + smcliResults['rs'] == 16)):
# Tolerable error.
# Machine is already logged off or is logging off.
rh.printLn("N", rh.userid + " is already logged off.")
else:
# SMAPI API failed.
rh.printLn("ES", smcliResults['response'])
rh.updateResults(smcliResults) # Use results from invokeSMCLI
if rh.results['overallRC'] == 0 and 'maxQueries' in rh.parms:
# Wait for the system to log off.
waitResults = waitForVMState(
rh,
rh.userid,
'off',
maxQueries=rh.parms['maxQueries'],
sleepSecs=rh.parms['poll'])
if waitResults['overallRC'] == 0:
rh.printLn("N", "Userid '" + rh.userid +
" is in the desired state: off")
else:
rh.updateResults(waitResults)
rh.printSysLog("Exit powerVM.softDeactivate, rc: " +
str(rh.results['overallRC']))
return rh.results['overallRC'] | Deactivate a virtual machine by first shutting down Linux and
then log it off.
Input:
Request Handle with the following properties:
function - 'POWERVM'
subfunction - 'SOFTOFF'
userid - userid of the virtual machine
parms['maxQueries'] - Maximum number of queries to issue.
Optional.
parms['maxWait'] - Maximum time to wait in seconds.
Optional,
unless 'maxQueries' is specified.
parms['poll'] - Polling interval in seconds. Optional,
unless 'maxQueries' is specified.
Output:
Request Handle updated with the results.
Return code - 0: ok, non-zero: error |
def _completion_move(self, p_step, p_size):
"""
Visually selects completion specified by p_step (positive numbers
forwards, negative numbers backwards) and inserts it into edit_text.
If p_step results in value out of range of currently evaluated
completion candidates, list is rewinded to the start (if cycling
forwards) or to the end (if cycling backwards).
"""
current_position = self.completion_box.focus_position
try:
self.completion_box.set_focus(current_position + p_step)
except IndexError:
position = 0 if p_step > 0 else len(self.completion_box) - 1
self.completion_box.set_focus(position)
maxcols, = p_size
size = (maxcols, self.completion_box.height)
self.completion_box.calculate_visible(size)
candidate = self.completion_box.focus.original_widget.text
self.insert_completion(candidate) | Visually selects completion specified by p_step (positive numbers
forwards, negative numbers backwards) and inserts it into edit_text.
If p_step results in value out of range of currently evaluated
completion candidates, list is rewinded to the start (if cycling
forwards) or to the end (if cycling backwards). |
def optimize(thumbnail_file, jpg_command=None, png_command=None,
gif_command=None):
"""
A post processing function to optimize file size. Accepts commands
to optimize JPG, PNG and GIF images as arguments. Example:
THUMBNAILS = {
# Other options...
'POST_PROCESSORS': [
{
'processor': 'thumbnails.post_processors.optimize',
'png_command': 'optipng -force -o3 "%(filename)s"',
'jpg_command': 'jpegoptim -f --strip-all "%(filename)s"',
},
],
}
Note: using output redirection in commands may cause unpredictable results.
For example 'optipng -force -o3 "%(filename)s" &> /dev/null' may cause
optimize command to fail on some systems.
"""
temp_dir = get_or_create_temp_dir()
thumbnail_filename = os.path.join(temp_dir, "%s" % shortuuid.uuid())
f = open(thumbnail_filename, 'wb')
f.write(thumbnail_file.read())
f.close()
# Detect filetype
filetype = imghdr.what(thumbnail_filename)
# Construct command to optimize image based on filetype
command = None
if filetype == "jpg" or filetype == "jpeg":
command = jpg_command
elif filetype == "png":
command = png_command
elif filetype == "gif":
command = gif_command
# Run Command
if command:
command = command % {'filename': thumbnail_filename}
call(command, shell=True)
optimized_file = File(open(thumbnail_filename, 'rb'))
os.remove(thumbnail_filename)
return optimized_file | A post processing function to optimize file size. Accepts commands
to optimize JPG, PNG and GIF images as arguments. Example:
THUMBNAILS = {
# Other options...
'POST_PROCESSORS': [
{
'processor': 'thumbnails.post_processors.optimize',
'png_command': 'optipng -force -o3 "%(filename)s"',
'jpg_command': 'jpegoptim -f --strip-all "%(filename)s"',
},
],
}
Note: using output redirection in commands may cause unpredictable results.
For example 'optipng -force -o3 "%(filename)s" &> /dev/null' may cause
optimize command to fail on some systems. |
def wait_any(futures, timeout=None):
'''Wait for the completion of any (the first) one of multiple futures
:param list futures: A list of :class:`Future`\s
:param timeout:
The maximum time to wait. With ``None``, will block indefinitely.
:type timeout: float or None
:returns:
One of the futures from the provided list -- the first one to become
complete (or any of the ones that were already complete).
:raises WaitTimeout: if a timeout is provided and hit
'''
for fut in futures:
if fut.complete:
return fut
wait = _Wait(futures)
for fut in futures:
fut._waits.add(wait)
if wait.done.wait(timeout):
raise errors.WaitTimeout()
return wait.completed_future | Wait for the completion of any (the first) one of multiple futures
:param list futures: A list of :class:`Future`\s
:param timeout:
The maximum time to wait. With ``None``, will block indefinitely.
:type timeout: float or None
:returns:
One of the futures from the provided list -- the first one to become
complete (or any of the ones that were already complete).
:raises WaitTimeout: if a timeout is provided and hit |
def on_queue_declareok(self, method_frame):
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command. When this command is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method method_frame: The Queue.DeclareOk frame
"""
logger.info('Binding to rabbit', exchange=self._exchange, queue=self._queue)
self._channel.queue_bind(self.on_bindok, self._queue, self._exchange) | Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command. When this command is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method method_frame: The Queue.DeclareOk frame |
def f_add_result_group(self, *args, **kwargs):
"""Adds an empty result group under the current node.
Adds the full name of the current node as prefix to the name of the group.
If current node is a single run (root) adds the prefix `'results.runs.run_08%d%'` to the
full name where `'08%d'` is replaced by the index of the current run.
The `name` can also contain subgroups separated via colons, for example:
`name=subgroup1.subgroup2.subgroup3`. These other parent groups will be automatically
be created.
"""
return self._nn_interface._add_generic(self, type_name=RESULT_GROUP,
group_type_name=RESULT_GROUP,
args=args, kwargs=kwargs) | Adds an empty result group under the current node.
Adds the full name of the current node as prefix to the name of the group.
If current node is a single run (root) adds the prefix `'results.runs.run_08%d%'` to the
full name where `'08%d'` is replaced by the index of the current run.
The `name` can also contain subgroups separated via colons, for example:
`name=subgroup1.subgroup2.subgroup3`. These other parent groups will be automatically
be created. |
def render_chart_to_file(self, template_name: str, chart: Any, path: str):
"""
Render a chart or page to local html files.
:param chart: A Chart or Page object
:param path: The destination file which the html code write to
:param template_name: The name of template file.
"""
tpl = self.env.get_template(template_name)
html = tpl.render(chart=self.generate_js_link(chart))
write_utf8_html_file(path, self._reg_replace(html)) | Render a chart or page to local html files.
:param chart: A Chart or Page object
:param path: The destination file which the html code write to
:param template_name: The name of template file. |
def _CreateIndexIfNotExists(self, index_name, mappings):
"""Creates an Elasticsearch index if it does not exist.
Args:
index_name (str): mame of the index.
mappings (dict[str, object]): mappings of the index.
Raises:
RuntimeError: if the Elasticsearch index cannot be created.
"""
try:
if not self._client.indices.exists(index_name):
self._client.indices.create(
body={'mappings': mappings}, index=index_name)
except elasticsearch.exceptions.ConnectionError as exception:
raise RuntimeError(
'Unable to create Elasticsearch index with error: {0!s}'.format(
exception)) | Creates an Elasticsearch index if it does not exist.
Args:
index_name (str): mame of the index.
mappings (dict[str, object]): mappings of the index.
Raises:
RuntimeError: if the Elasticsearch index cannot be created. |
def get_client_cache_key(request_or_attempt: Union[HttpRequest, Any], credentials: dict = None) -> str:
"""
Build cache key name from request or AccessAttempt object.
:param request_or_attempt: HttpRequest or AccessAttempt object
:param credentials: credentials containing user information
:return cache_key: Hash key that is usable for Django cache backends
"""
if isinstance(request_or_attempt, HttpRequest):
username = get_client_username(request_or_attempt, credentials)
ip_address = get_client_ip_address(request_or_attempt)
user_agent = get_client_user_agent(request_or_attempt)
else:
username = request_or_attempt.username
ip_address = request_or_attempt.ip_address
user_agent = request_or_attempt.user_agent
filter_kwargs = get_client_parameters(username, ip_address, user_agent)
cache_key_components = ''.join(filter_kwargs.values())
cache_key_digest = md5(cache_key_components.encode()).hexdigest()
cache_key = f'axes-{cache_key_digest}'
return cache_key | Build cache key name from request or AccessAttempt object.
:param request_or_attempt: HttpRequest or AccessAttempt object
:param credentials: credentials containing user information
:return cache_key: Hash key that is usable for Django cache backends |
def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ):
"""
Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error
"""
if config_dir is None:
config_dir = get_config_dir()
# ingest keys
tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir )
for key_info in recipient_key_infos:
res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to stash key %s' % key_info['key_id']}
# copy over our key
try:
sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir )
except Exception, e:
log.exception(e)
shutil.rmtree(tmpdir)
return {'error': 'No such private key'}
res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir )
if res is None:
shutil.rmtree(tmpdir)
return {'error': 'Failed to load sender private key'}
recipient_key_ids = [r['key_id'] for r in recipient_key_infos]
# do the encryption
gpg = gnupg.GPG( homedir=tmpdir )
res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True )
shutil.rmtree(tmpdir)
if res.status != 'encryption ok':
log.debug("encrypt_file error: %s" % res.__dict__)
log.debug("recipients: %s" % recipient_key_ids)
log.debug("signer: %s" % sender_key_info['key_id'])
return {'error': 'Failed to encrypt data'}
return {'status': True} | Encrypt a stream of data for a set of keys.
@sender_key_info should be a dict with
{
'key_id': ...
'key_data': ...
'app_name'; ...
}
Return {'status': True} on success
Return {'error': ...} on error |
def _wordAfterCursor(self):
"""Get word, which is located before cursor
"""
cursor = self._qpart.textCursor()
textAfterCursor = cursor.block().text()[cursor.positionInBlock():]
match = _wordAtStartRegExp.search(textAfterCursor)
if match:
return match.group(0)
else:
return '' | Get word, which is located before cursor |
def enable_reporting(self):
"""Call this method to explicitly enable reporting.
The current report will be uploaded, plus the previously recorded ones,
and the configuration will be updated so that future runs also upload
automatically.
"""
if self.status == Stats.ENABLED:
return
if not self.enableable:
logger.critical("Can't enable reporting")
return
self.status = Stats.ENABLED
self.write_config(self.status) | Call this method to explicitly enable reporting.
The current report will be uploaded, plus the previously recorded ones,
and the configuration will be updated so that future runs also upload
automatically. |
def get_content(self, url, params=None, limit=0, place_holder=None,
root_field='data', thing_field='children',
after_field='after', object_filter=None, **kwargs):
"""A generator method to return reddit content from a URL.
Starts at the initial url, and fetches content using the `after`
JSON data until `limit` entries have been fetched, or the
`place_holder` has been reached.
:param url: the url to start fetching content from
:param params: dictionary containing extra GET data to put in the url
:param limit: the number of content entries to fetch. If limit <= 0,
fetch the default for your account (25 for unauthenticated
users). If limit is None, then fetch as many entries as possible
(reddit returns at most 100 per request, however, PRAW will
automatically make additional requests as necessary).
:param place_holder: if not None, the method will fetch `limit`
content, stopping if it finds content with `id` equal to
`place_holder`. The place_holder item is the last item to be
yielded from this generator. Note that the use of `place_holder` is
not 100% reliable as the place holder item may no longer exist due
to being removed or deleted.
:param root_field: indicates the field in the json response that holds
the data. Most objects use 'data', however some (flairlist) don't
have the 'data' object. Use None for the root object.
:param thing_field: indicates the field under the root_field which
contains the list of things. Most objects use 'children'.
:param after_field: indicates the field which holds the after item
element
:param object_filter: if set to an integer value, fetch content from
the corresponding list index in the JSON response. For example
the JSON response for submission duplicates is a list of objects,
and the object we want to fetch from is at index 1. So we set
object_filter=1 to filter out the other useless list elements.
:type place_holder: a string corresponding to a reddit base36 id
without prefix, e.g. 'asdfasdf'
:returns: a list of reddit content, of type Subreddit, Comment,
Submission or user flair.
"""
_use_oauth = kwargs.get('_use_oauth', self.is_oauth_session())
objects_found = 0
params = params or {}
fetch_all = fetch_once = False
if limit is None:
fetch_all = True
params['limit'] = 1024 # Just use a big number
elif limit > 0:
params['limit'] = limit
else:
fetch_once = True
if hasattr(self, '_url_update'):
url = self._url_update(url) # pylint: disable=E1101
# While we still need to fetch more content to reach our limit, do so.
while fetch_once or fetch_all or objects_found < limit:
if _use_oauth: # Set the necessary _use_oauth value
assert self._use_oauth is False
self._use_oauth = _use_oauth
try:
page_data = self.request_json(url, params=params)
if object_filter:
page_data = page_data[object_filter]
finally: # Restore _use_oauth value
if _use_oauth:
self._use_oauth = False
fetch_once = False
root = page_data.get(root_field, page_data)
for thing in root[thing_field]:
yield thing
objects_found += 1
# Terminate when we've reached the limit, or place holder
if objects_found == limit or (place_holder and
thing.id == place_holder):
return
# Set/update the 'after' parameter for the next iteration
if root.get(after_field):
# We use `root.get` to also test if the value evaluates to True
params['after'] = root[after_field]
else:
return | A generator method to return reddit content from a URL.
Starts at the initial url, and fetches content using the `after`
JSON data until `limit` entries have been fetched, or the
`place_holder` has been reached.
:param url: the url to start fetching content from
:param params: dictionary containing extra GET data to put in the url
:param limit: the number of content entries to fetch. If limit <= 0,
fetch the default for your account (25 for unauthenticated
users). If limit is None, then fetch as many entries as possible
(reddit returns at most 100 per request, however, PRAW will
automatically make additional requests as necessary).
:param place_holder: if not None, the method will fetch `limit`
content, stopping if it finds content with `id` equal to
`place_holder`. The place_holder item is the last item to be
yielded from this generator. Note that the use of `place_holder` is
not 100% reliable as the place holder item may no longer exist due
to being removed or deleted.
:param root_field: indicates the field in the json response that holds
the data. Most objects use 'data', however some (flairlist) don't
have the 'data' object. Use None for the root object.
:param thing_field: indicates the field under the root_field which
contains the list of things. Most objects use 'children'.
:param after_field: indicates the field which holds the after item
element
:param object_filter: if set to an integer value, fetch content from
the corresponding list index in the JSON response. For example
the JSON response for submission duplicates is a list of objects,
and the object we want to fetch from is at index 1. So we set
object_filter=1 to filter out the other useless list elements.
:type place_holder: a string corresponding to a reddit base36 id
without prefix, e.g. 'asdfasdf'
:returns: a list of reddit content, of type Subreddit, Comment,
Submission or user flair. |
def delete_split(self, split_name):
""" Delete a split of the dataset.
Parameters
----------
split_name : str
name of the split to delete
"""
if self.has_split(split_name):
shutil.rmtree(os.path.join(self.split_dir, split_name)) | Delete a split of the dataset.
Parameters
----------
split_name : str
name of the split to delete |
def readline(self, prompt='', use_raw=None):
"""Read a line of input. Prompt and use_raw exist to be
compatible with other input routines and are ignored.
EOFError will be raised on EOF.
"""
line = self.input.readline()
if not line: raise EOFError
return line.rstrip("\n") | Read a line of input. Prompt and use_raw exist to be
compatible with other input routines and are ignored.
EOFError will be raised on EOF. |
def ReadFile(self, filename):
"""Reads artifact definitions from a file.
Args:
filename (str): name of the file to read from.
Yields:
ArtifactDefinition: an artifact definition.
"""
with io.open(filename, 'r', encoding='utf-8') as file_object:
for artifact_definition in self.ReadFileObject(file_object):
yield artifact_definition | Reads artifact definitions from a file.
Args:
filename (str): name of the file to read from.
Yields:
ArtifactDefinition: an artifact definition. |
def add_to_message(data, indent_level=0) -> list:
"""Adds data to the message object"""
message = []
if isinstance(data, str):
message.append(indent(
dedent(data.strip('\n')).strip(),
indent_level * ' '
))
return message
for line in data:
offset = 0 if isinstance(line, str) else 1
message += add_to_message(line, indent_level + offset)
return message | Adds data to the message object |
def saml_name_id_format_to_hash_type(name_format):
"""
Translate pySAML2 name format to satosa format
:type name_format: str
:rtype: satosa.internal_data.UserIdHashType
:param name_format: SAML2 name format
:return: satosa format
"""
msg = "saml_name_id_format_to_hash_type is deprecated and will be removed."
_warnings.warn(msg, DeprecationWarning)
name_id_format_to_hash_type = {
NAMEID_FORMAT_TRANSIENT: UserIdHashType.transient,
NAMEID_FORMAT_PERSISTENT: UserIdHashType.persistent,
NAMEID_FORMAT_EMAILADDRESS: UserIdHashType.emailaddress,
NAMEID_FORMAT_UNSPECIFIED: UserIdHashType.unspecified,
}
return name_id_format_to_hash_type.get(
name_format, UserIdHashType.transient
) | Translate pySAML2 name format to satosa format
:type name_format: str
:rtype: satosa.internal_data.UserIdHashType
:param name_format: SAML2 name format
:return: satosa format |
def grant_user_access(self, user, db_names, strict=True):
"""
Gives access to the databases listed in `db_names` to the user.
"""
return self._user_manager.grant_user_access(user, db_names,
strict=strict) | Gives access to the databases listed in `db_names` to the user. |
def get_exchanges(self, vhost=None):
"""
:returns: A list of dicts
:param string vhost: A vhost to query for exchanges, or None (default),
which triggers a query for all exchanges in all vhosts.
"""
if vhost:
vhost = quote(vhost, '')
path = Client.urls['exchanges_by_vhost'] % vhost
else:
path = Client.urls['all_exchanges']
exchanges = self._call(path, 'GET')
return exchanges | :returns: A list of dicts
:param string vhost: A vhost to query for exchanges, or None (default),
which triggers a query for all exchanges in all vhosts. |
async def rollback(self):
"""Roll back this transaction."""
if not self._parent._is_active:
return
await self._do_rollback()
self._is_active = False | Roll back this transaction. |
def _get_initial_args(objective_function,
initial_population,
initial_position,
population_size,
population_stddev,
max_iterations,
func_tolerance,
position_tolerance,
differential_weight,
crossover_prob,
seed):
"""Processes initial args."""
was_iterable = False
if initial_position is not None:
initial_position, was_iterable = _ensure_list(initial_position)
if initial_population is not None:
initial_population, was_iterable = _ensure_list(initial_population)
population = _get_starting_population(initial_population,
initial_position,
population_size,
population_stddev,
seed=seed)
differential_weight = tf.convert_to_tensor(
value=differential_weight, dtype=population[0].dtype.base_dtype)
crossover_prob = tf.convert_to_tensor(value=crossover_prob)
population_values = objective_function(*population)
if max_iterations is not None:
max_iterations = tf.convert_to_tensor(value=max_iterations)
func_tolerance = tf.convert_to_tensor(
value=func_tolerance, dtype=population_values.dtype.base_dtype)
position_tolerance = tf.convert_to_tensor(
value=position_tolerance, dtype=population[0].dtype.base_dtype)
return (was_iterable,
population,
population_values,
max_iterations,
func_tolerance,
position_tolerance,
differential_weight,
crossover_prob) | Processes initial args. |
def _generate_ffmpeg_cmd(
self,
cmd: List[str],
input_source: Optional[str],
output: Optional[str],
extra_cmd: Optional[str] = None,
) -> None:
"""Generate ffmpeg command line."""
self._argv = [self._ffmpeg]
# start command init
if input_source is not None:
self._put_input(input_source)
self._argv.extend(cmd)
# exists a extra cmd from customer
if extra_cmd is not None:
self._argv.extend(shlex.split(extra_cmd))
self._merge_filters()
self._put_output(output) | Generate ffmpeg command line. |
def sc_cuts_alg(self, viewer, event, msg=True):
"""Adjust cuts algorithm interactively.
"""
if self.cancut:
direction = self.get_direction(event.direction)
self._cycle_cuts_alg(viewer, msg, direction=direction)
return True | Adjust cuts algorithm interactively. |
def end_experience_collection_timer(self):
"""
Inform Metrics class that experience collection is done.
"""
if self.time_start_experience_collection:
curr_delta = time() - self.time_start_experience_collection
if self.delta_last_experience_collection is None:
self.delta_last_experience_collection = curr_delta
else:
self.delta_last_experience_collection += curr_delta
self.time_start_experience_collection = None | Inform Metrics class that experience collection is done. |
def register_hooked(self,
hooks, # type: Union[Type[Hook], Sequence[Type[Hook]]]
func, # type: Hooked
args_gen=None # type: Optional[ArgsGen]
):
# type: (Type[Hook], Callable, Optional[Callable]) -> None
"""Register func to be run when any of the hooks are run by parent
Args:
hooks: A Hook class or list of Hook classes of interest
func: The callable that should be run on that Hook
args_gen: Optionally specify the argument names that should be
passed to func. If not given then use func.call_types.keys
"""
if self.hooked is None:
self.hooked = {}
if args_gen is None:
args_gen = getattr(func, "call_types", {}).keys
if not isinstance(hooks, Sequence):
hooks = [hooks]
for hook_cls in hooks:
self.hooked[hook_cls] = (func, args_gen) | Register func to be run when any of the hooks are run by parent
Args:
hooks: A Hook class or list of Hook classes of interest
func: The callable that should be run on that Hook
args_gen: Optionally specify the argument names that should be
passed to func. If not given then use func.call_types.keys |
Subsets and Splits