code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def add_with_properties(self, model, name=None, update_dict=None, bulk=True, **kwargs):
"""
Add a part and update its properties in one go.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:param model: model of the part which to add a new instance, should follow the model tree in KE-chain
:type model: :class:`Part`
:param name: (optional) name provided for the new instance as string otherwise use the name of the model
:type name: basestring or None
:param update_dict: dictionary with keys being property names (str) or property_id (from the property models)
and values being property values
:type update_dict: dict or None
:param bulk: True to use the bulk_update_properties API endpoint for KE-chain versions later then 2.1.0b
:type bulk: boolean or None
:param kwargs: (optional) additional keyword arguments that will be passed inside the update request
:type kwargs: dict or None
:return: the newly created :class:`Part`
:raises NotFoundError: when the property name is not a valid property of this part
:raises APIError: in case an Error occurs
Examples
--------
>>> bike = client.scope('Bike Project').part('Bike')
>>> wheel_model = client.scope('Bike Project').model('Wheel')
>>> bike.add_with_properties(wheel_model, 'Wooden Wheel', {'Spokes': 11, 'Material': 'Wood'})
"""
if self.category != Category.INSTANCE:
raise APIError("Part should be of category INSTANCE")
name = name or model.name
action = 'new_instance_with_properties'
properties_update_dict = dict()
for prop_name_or_id, property_value in update_dict.items():
if is_uuid(prop_name_or_id):
properties_update_dict[prop_name_or_id] = property_value
else:
properties_update_dict[model.property(prop_name_or_id).id] = property_value
if bulk:
r = self._client._request('POST', self._client._build_url('parts'),
data=dict(
name=name,
model=model.id,
parent=self.id,
properties=json.dumps(properties_update_dict),
**kwargs
),
params=dict(select_action=action))
if r.status_code != requests.codes.created: # pragma: no cover
raise APIError('{}: {}'.format(str(r), r.content))
return Part(r.json()['results'][0], client=self._client)
else: # do the old way
new_part = self.add(model, name=name) # type: Part
new_part.update(update_dict=update_dict, bulk=bulk)
return new_part | Add a part and update its properties in one go.
In order to prevent the backend from updating the frontend you may add `suppress_kevents=True` as
additional keyword=value argument to this method. This will improve performance of the backend
against a trade-off that someone looking at the frontend won't notice any changes unless the page
is refreshed.
:param model: model of the part which to add a new instance, should follow the model tree in KE-chain
:type model: :class:`Part`
:param name: (optional) name provided for the new instance as string otherwise use the name of the model
:type name: basestring or None
:param update_dict: dictionary with keys being property names (str) or property_id (from the property models)
and values being property values
:type update_dict: dict or None
:param bulk: True to use the bulk_update_properties API endpoint for KE-chain versions later then 2.1.0b
:type bulk: boolean or None
:param kwargs: (optional) additional keyword arguments that will be passed inside the update request
:type kwargs: dict or None
:return: the newly created :class:`Part`
:raises NotFoundError: when the property name is not a valid property of this part
:raises APIError: in case an Error occurs
Examples
--------
>>> bike = client.scope('Bike Project').part('Bike')
>>> wheel_model = client.scope('Bike Project').model('Wheel')
>>> bike.add_with_properties(wheel_model, 'Wooden Wheel', {'Spokes': 11, 'Material': 'Wood'}) |
def get_event(self, client, check):
"""
Returns an event for a given client & check name.
"""
data = self._request('GET', '/events/{}/{}'.format(client, check))
return data.json() | Returns an event for a given client & check name. |
def stop_server(self):
"""
Stop serving. Also stops the thread.
"""
if self.rpc_server is not None:
try:
self.rpc_server.socket.shutdown(socket.SHUT_RDWR)
except:
log.warning("Failed to shut down server socket")
self.rpc_server.shutdown() | Stop serving. Also stops the thread. |
def _any_bound_condition_fails_criterion(agent, criterion):
"""Returns True if any bound condition fails to meet the specified
criterion.
Parameters
----------
agent: Agent
The agent whose bound conditions we evaluate
criterion: function
Evaluates criterion(a) for each a in a bound condition and returns True
if any agents fail to meet the criterion.
Returns
-------
any_meets: bool
True if and only if any of the agents in a bound condition fail to match
the specified criteria
"""
bc_agents = [bc.agent for bc in agent.bound_conditions]
for b in bc_agents:
if not criterion(b):
return True
return False | Returns True if any bound condition fails to meet the specified
criterion.
Parameters
----------
agent: Agent
The agent whose bound conditions we evaluate
criterion: function
Evaluates criterion(a) for each a in a bound condition and returns True
if any agents fail to meet the criterion.
Returns
-------
any_meets: bool
True if and only if any of the agents in a bound condition fail to match
the specified criteria |
def delete(self, request, bot_id, hook_id, id, format=None):
"""
Delete an existing telegram recipient
---
responseMessages:
- code: 401
message: Not authenticated
"""
bot = self.get_bot(bot_id, request.user)
hook = self.get_hook(hook_id, bot, request.user)
recipient = self.get_recipient(id, hook, request.user)
recipient.delete()
return Response(status=status.HTTP_204_NO_CONTENT) | Delete an existing telegram recipient
---
responseMessages:
- code: 401
message: Not authenticated |
def from_json(cls, service_dict):
"""Create a service object from a JSON string."""
sd = service_dict.copy()
service_endpoint = sd.get(cls.SERVICE_ENDPOINT)
if not service_endpoint:
logger.error(
'Service definition in DDO document is missing the "serviceEndpoint" key/value.')
raise IndexError
_type = sd.get('type')
if not _type:
logger.error('Service definition in DDO document is missing the "type" key/value.')
raise IndexError
sd.pop(cls.SERVICE_ENDPOINT)
sd.pop('type')
return cls(
service_endpoint,
_type,
sd
) | Create a service object from a JSON string. |
def clone(self, document):
''' Serialize a document, remove its _id, and deserialize as a new
object '''
wrapped = document.wrap()
if '_id' in wrapped:
del wrapped['_id']
return type(document).unwrap(wrapped, session=self) | Serialize a document, remove its _id, and deserialize as a new
object |
def tt_qr(X, left_to_right=True):
"""
Orthogonalizes a TT tensor from left to right or
from right to left.
:param: X - thensor to orthogonalise
:param: direction - direction. May be 'lr/LR' or 'rl/RL'
for left/right orthogonalization
:return: X_orth, R - orthogonal tensor and right (left)
upper (lower) triangular matrix
>>> import tt, numpy as np
>>> x = tt.rand(np.array([2, 3, 4, 5]), d=4)
>>> x_q, r = tt_qr(x, left_to_right=True)
>>> np.allclose((rm[0][0]*x_q).norm(), x.norm())
True
>>> x_u, l = tt_qr(x, left_to_right=False)
>>> np.allclose((l[0][0]*x_u).norm(), x.norm())
True
"""
# Get rid of redundant ranks (they cause technical difficulties).
X = X.round(eps=0)
numDims = X.d
coresX = tt.tensor.to_list(X)
if left_to_right:
# Left to right orthogonalization of the X cores.
for dim in xrange(0, numDims-1):
coresX = cores_orthogonalization_step(
coresX, dim, left_to_right=left_to_right)
last_core = coresX[numDims-1]
r1, n, r2 = last_core.shape
last_core, rr = np.linalg.qr(reshape(last_core, (-1, r2)))
coresX[numDims-1] = reshape(last_core, (r1, n, -1))
else:
# Right to left orthogonalization of X cores
for dim in xrange(numDims-1, 0, -1):
coresX = cores_orthogonalization_step(
coresX, dim, left_to_right=left_to_right)
last_core = coresX[0]
r1, n, r2 = last_core.shape
last_core, rr = np.linalg.qr(
np.transpose(reshape(last_core, (r1, -1)))
)
coresX[0] = reshape(
np.transpose(last_core),
(-1, n, r2))
rr = np.transpose(rr)
return tt.tensor.from_list(coresX), rr | Orthogonalizes a TT tensor from left to right or
from right to left.
:param: X - thensor to orthogonalise
:param: direction - direction. May be 'lr/LR' or 'rl/RL'
for left/right orthogonalization
:return: X_orth, R - orthogonal tensor and right (left)
upper (lower) triangular matrix
>>> import tt, numpy as np
>>> x = tt.rand(np.array([2, 3, 4, 5]), d=4)
>>> x_q, r = tt_qr(x, left_to_right=True)
>>> np.allclose((rm[0][0]*x_q).norm(), x.norm())
True
>>> x_u, l = tt_qr(x, left_to_right=False)
>>> np.allclose((l[0][0]*x_u).norm(), x.norm())
True |
def remove_breakpoint(self, event_type, bp=None, filter_func=None):
"""
Removes a breakpoint.
:param bp: The breakpoint to remove.
:param filter_func: A filter function to specify whether each breakpoint should be removed or not.
"""
if bp is None and filter_func is None:
raise ValueError('remove_breakpoint(): You must specify either "bp" or "filter".')
try:
if bp is not None:
self._breakpoints[event_type].remove(bp)
else:
self._breakpoints[event_type] = [ b for b in self._breakpoints[event_type] if not filter_func(b) ]
except ValueError:
# the breakpoint is not found
l.error('remove_breakpoint(): Breakpoint %s (type %s) is not found.', bp, event_type) | Removes a breakpoint.
:param bp: The breakpoint to remove.
:param filter_func: A filter function to specify whether each breakpoint should be removed or not. |
def add(self, album, objects, object_type=None, **kwds):
"""
Endpoint: /album/<id>/<type>/add.json
Add objects (eg. Photos) to an album.
The objects are a list of either IDs or Trovebox objects.
If Trovebox objects are used, the object type is inferred
automatically.
Returns the updated album object.
"""
return self._add_remove("add", album, objects, object_type,
**kwds) | Endpoint: /album/<id>/<type>/add.json
Add objects (eg. Photos) to an album.
The objects are a list of either IDs or Trovebox objects.
If Trovebox objects are used, the object type is inferred
automatically.
Returns the updated album object. |
def docs(root_url, path):
"""Generate URL for path in the Taskcluster docs."""
root_url = root_url.rstrip('/')
path = path.lstrip('/')
if root_url == OLD_ROOT_URL:
return 'https://docs.taskcluster.net/{}'.format(path)
else:
return '{}/docs/{}'.format(root_url, path) | Generate URL for path in the Taskcluster docs. |
def normal(self):
'''
:return: Line
Returns a Line normal (perpendicular) to this Line.
'''
d = self.B - self.A
return Line([-d.y, d.x], [d.y, -d.x]) | :return: Line
Returns a Line normal (perpendicular) to this Line. |
def write(url, content, **args):
"""Put the object/collection into a file URL."""
with HTTPResource(url, **args) as resource:
resource.write(content) | Put the object/collection into a file URL. |
def resort_client_actions(portal):
"""Resorts client action views
"""
sorted_actions = [
"edit",
"contacts",
"view", # this redirects to analysisrequests
"analysisrequests",
"batches",
"samplepoints",
"profiles",
"templates",
"specs",
"orders",
"reports_listing"
]
type_info = portal.portal_types.getTypeInfo("Client")
actions = filter(lambda act: act.id in sorted_actions, type_info._actions)
missing = filter(lambda act: act.id not in sorted_actions, type_info._actions)
# Sort the actions
actions = sorted(actions, key=lambda act: sorted_actions.index(act.id))
if missing:
# Move the actions not explicitily sorted to the end
actions.extend(missing)
# Reset the actions to type info
type_info._actions = actions | Resorts client action views |
def multiCall(*commands, dependent=True, bundle=False,
print_result=False, print_commands=False):
"""
Calls the function 'call' multiple times, given sets of commands
"""
results = []
dependent_failed = False
for command in commands:
if not dependent_failed:
response = call(command, print_result=print_result,
print_commands=print_commands)
# TODO Will an error ever return a code other than '1'?
if (response.returncode == 1) and dependent:
dependent_failed = True
else:
response = None
results.append(response)
if bundle:
result = Result()
for response in results:
if not response:
continue
elif response.returncode == 1:
result.returncode = 1
result.extendInformation(response)
processed_response = result
else:
processed_response = results
return processed_response | Calls the function 'call' multiple times, given sets of commands |
def ensure_mingw_drive(win32_path):
r""" replaces windows drives with mingw style drives
Args:
win32_path (str):
CommandLine:
python -m utool.util_path --test-ensure_mingw_drive
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> win32_path = r'C:/Program Files/Foobar'
>>> result = ensure_mingw_drive(win32_path)
>>> print(result)
/c/Program Files/Foobar
"""
win32_drive, _path = splitdrive(win32_path)
mingw_drive = '/' + win32_drive[:-1].lower()
mingw_path = mingw_drive + _path
return mingw_path | r""" replaces windows drives with mingw style drives
Args:
win32_path (str):
CommandLine:
python -m utool.util_path --test-ensure_mingw_drive
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> win32_path = r'C:/Program Files/Foobar'
>>> result = ensure_mingw_drive(win32_path)
>>> print(result)
/c/Program Files/Foobar |
def getFactors(self, aLocation, axisOnly=False, allFactors=False):
"""
Return a list of all factors and math items at aLocation.
factor, mathItem, deltaName
all = True: include factors that are zero or near-zero
"""
deltas = []
aLocation.expand(self.getAxisNames())
limits = getLimits(self._allLocations(), aLocation)
for deltaLocationTuple, (mathItem, deltaName) in sorted(self.items()):
deltaLocation = Location(deltaLocationTuple)
deltaLocation.expand( self.getAxisNames())
factor = self._accumulateFactors(aLocation, deltaLocation, limits, axisOnly)
if not (factor-_EPSILON < 0 < factor+_EPSILON) or allFactors:
# only add non-zero deltas.
deltas.append((factor, mathItem, deltaName))
deltas = sorted(deltas, key=itemgetter(0), reverse=True)
return deltas | Return a list of all factors and math items at aLocation.
factor, mathItem, deltaName
all = True: include factors that are zero or near-zero |
def reprString(self, string, length):
"""
Output a string of length tokens in the original form.
If string is an integer, it is considered as an offset in the text.
Otherwise string is considered as a sequence of ids (see voc and
tokId).
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.reprString(0, 3)
'mis'
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.reprString([1, 4, 1, 3, 3, 2], 5)
'isipp'
>>> SA=SuffixArray('missi ssi ppi', UNIT_WORD)
>>> SA.reprString(0, 3)
'missi ssi ppi'
>>> SA=SuffixArray('missi ssi ppi', UNIT_WORD)
>>> SA.reprString([1, 3, 2], 3)
'missi ssi ppi'
"""
if isinstance(string, int):
length = min(length, self.length - string)
string = self.string[string:string + length]
voc = self.voc
res = self.tokSep.join((voc[id] for id in string[:length]))
if self.unit == UNIT_WORD:
res = res.replace(" \n", "\n")
res = res.replace("\n ", "\n")
if self.unit == UNIT_CHARACTER:
res = res.encode(self.encoding)
return res | Output a string of length tokens in the original form.
If string is an integer, it is considered as an offset in the text.
Otherwise string is considered as a sequence of ids (see voc and
tokId).
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.reprString(0, 3)
'mis'
>>> SA=SuffixArray('mississippi', UNIT_BYTE)
>>> SA.reprString([1, 4, 1, 3, 3, 2], 5)
'isipp'
>>> SA=SuffixArray('missi ssi ppi', UNIT_WORD)
>>> SA.reprString(0, 3)
'missi ssi ppi'
>>> SA=SuffixArray('missi ssi ppi', UNIT_WORD)
>>> SA.reprString([1, 3, 2], 3)
'missi ssi ppi' |
def clear_caches(delete_all=False):
"""Fortpy caches many things, that should be completed after each completion
finishes.
:param delete_all: Deletes also the cache that is normally not deleted,
like parser cache, which is important for faster parsing.
"""
global _time_caches
if delete_all:
_time_caches = []
_parser = { "default": CodeParser() }
else:
# normally just kill the expired entries, not all
for tc in _time_caches:
# check time_cache for expired entries
for key, (t, value) in list(tc.items()):
if t < time.time():
# delete expired entries
del tc[key] | Fortpy caches many things, that should be completed after each completion
finishes.
:param delete_all: Deletes also the cache that is normally not deleted,
like parser cache, which is important for faster parsing. |
def save_model(self, request, obj, form, change):
"""
Saves the message for the recipient and looks in the form instance
for other possible recipients. Prevents duplication by excludin the
original recipient from the list of optional recipients.
When changing an existing message and choosing optional recipients,
the message is effectively resent to those users.
"""
obj.save()
if notification:
# Getting the appropriate notice labels for the sender and recipients.
if obj.parent_msg is None:
sender_label = 'messages_sent'
recipients_label = 'messages_received'
else:
sender_label = 'messages_replied'
recipients_label = 'messages_reply_received'
# Notification for the sender.
notification.send([obj.sender], sender_label, {'message': obj,})
if form.cleaned_data['group'] == 'all':
# send to all users
recipients = User.objects.exclude(pk=obj.recipient.pk)
else:
# send to a group of users
recipients = []
group = form.cleaned_data['group']
if group:
group = Group.objects.get(pk=group)
recipients.extend(
list(group.user_set.exclude(pk=obj.recipient.pk)))
# create messages for all found recipients
for user in recipients:
obj.pk = None
obj.recipient = user
obj.save()
if notification:
# Notification for the recipient.
notification.send([user], recipients_label, {'message' : obj,}) | Saves the message for the recipient and looks in the form instance
for other possible recipients. Prevents duplication by excludin the
original recipient from the list of optional recipients.
When changing an existing message and choosing optional recipients,
the message is effectively resent to those users. |
def add_requirements(self, metadata_path):
"""Add additional requirements from setup.cfg to file metadata_path"""
additional = list(self.setupcfg_requirements())
if not additional: return
pkg_info = read_pkg_info(metadata_path)
if 'Provides-Extra' in pkg_info or 'Requires-Dist' in pkg_info:
warnings.warn('setup.cfg requirements overwrite values from setup.py')
del pkg_info['Provides-Extra']
del pkg_info['Requires-Dist']
for k, v in additional:
pkg_info[k] = v
write_pkg_info(metadata_path, pkg_info) | Add additional requirements from setup.cfg to file metadata_path |
def copy(self):
"""
Creates copy of the digest CTX to allow to compute digest
while being able to hash more data
"""
new_digest = Digest(self.digest_type)
libcrypto.EVP_MD_CTX_copy(new_digest.ctx, self.ctx)
return new_digest | Creates copy of the digest CTX to allow to compute digest
while being able to hash more data |
def fill_triangle(setter, x0, y0, x1, y1, x2, y2, color=None, aa=False):
"""Draw solid triangle with points x0,y0 - x1,y1 - x2,y2"""
a = b = y = last = 0
if y0 > y1:
y0, y1 = y1, y0
x0, x1 = x1, x0
if y1 > y2:
y2, y1 = y1, y2
x2, x1 = x1, x2
if y0 > y1:
y0, y1 = y1, y0
x0, x1 = x1, x0
if y0 == y2: # Handle awkward all-on-same-line case as its own thing
a = b = x0
if x1 < a:
a = x1
elif x1 > b:
b = x1
if x2 < a:
a = x2
elif x2 > b:
b = x2
_draw_fast_hline(setter, a, y0, b - a + 1, color, aa)
dx01 = x1 - x0
dy01 = y1 - y0
dx02 = x2 - x0
dy02 = y2 - y0
dx12 = x2 - x1
dy12 = y2 - y1
sa = 0
sb = 0
# For upper part of triangle, find scanline crossings for segments
# 0-1 and 0-2. If y1=y2 (flat-bottomed triangle), the scanline y1
# is included here (and second loop will be skipped, avoiding a /0
# error there), otherwise scanline y1 is skipped here and handled
# in the second loop...which also avoids a /0 error here if y0=y1
# (flat-topped triangle).
if y1 == y2:
last = y1 # include y1 scanline
else:
last = y1 - 1 # skip it
for y in range(y, last + 1):
a = x0 + sa / dy01
b = x0 + sb / dy02
sa += dx01
sb += dx02
if a > b:
a, b = b, a
_draw_fast_hline(setter, a, y, b - a + 1, color, aa)
# For lower part of triangle, find scanline crossings for segments
# 0-2 and 1-2. This loop is skipped if y1=y2.
sa = dx12 * (y - y1)
sb = dx02 * (y - y0)
for y in range(y, y2 + 1):
a = x1 + sa / dy12
b = x0 + sb / dy02
sa += dx12
sb += dx02
if a > b:
a, b = b, a
_draw_fast_hline(setter, a, y, b - a + 1, color, aa) | Draw solid triangle with points x0,y0 - x1,y1 - x2,y2 |
def is_all_field_none(self):
"""
:rtype: bool
"""
if self._id_ is not None:
return False
if self._created is not None:
return False
if self._updated is not None:
return False
if self._type_ is not None:
return False
if self._cvc2 is not None:
return False
if self._status is not None:
return False
if self._expiry_time is not None:
return False
return True | :rtype: bool |
def key_81_CosSin_2009():
r"""Key 81 pt CosSin filter, as published in [Key09]_.
Taken from file ``FilterModules.f90`` provided with 1DCSEM_.
License: `Apache License, Version 2.0,
<http://www.apache.org/licenses/LICENSE-2.0>`_.
"""
dlf = DigitalFilter('Key 81 CosSin (2009)', 'key_81_CosSin_2009')
dlf.base = np.array([
3.354626279025119e-04, 4.097349789797864e-04, 5.004514334406104e-04,
6.112527611295723e-04, 7.465858083766792e-04, 9.118819655545162e-04,
1.113775147844802e-03, 1.360368037547893e-03, 1.661557273173934e-03,
2.029430636295734e-03, 2.478752176666358e-03, 3.027554745375813e-03,
3.697863716482929e-03, 4.516580942612666e-03, 5.516564420760772e-03,
6.737946999085467e-03, 8.229747049020023e-03, 1.005183574463358e-02,
1.227733990306844e-02, 1.499557682047770e-02, 1.831563888873418e-02,
2.237077185616559e-02, 2.732372244729256e-02, 3.337326996032607e-02,
4.076220397836620e-02, 4.978706836786394e-02, 6.081006262521795e-02,
7.427357821433388e-02, 9.071795328941247e-02, 1.108031583623339e-01,
1.353352832366127e-01, 1.652988882215865e-01, 2.018965179946554e-01,
2.465969639416064e-01, 3.011942119122020e-01, 3.678794411714423e-01,
4.493289641172216e-01, 5.488116360940264e-01, 6.703200460356393e-01,
8.187307530779818e-01, 1e0, 1.221402758160170e+00,
1.491824697641270e+00, 1.822118800390509e+00, 2.225540928492468e+00,
2.718281828459046e+00, 3.320116922736548e+00, 4.055199966844675e+00,
4.953032424395115e+00, 6.049647464412947e+00, 7.389056098930650e+00,
9.025013499434122e+00, 1.102317638064160e+01, 1.346373803500169e+01,
1.644464677109706e+01, 2.008553692318767e+01, 2.453253019710935e+01,
2.996410004739703e+01, 3.659823444367799e+01, 4.470118449330084e+01,
5.459815003314424e+01, 6.668633104092515e+01, 8.145086866496814e+01,
9.948431564193386e+01, 1.215104175187350e+02, 1.484131591025766e+02,
1.812722418751512e+02, 2.214064162041872e+02, 2.704264074261528e+02,
3.302995599096489e+02, 4.034287934927351e+02, 4.927490410932563e+02,
6.018450378720822e+02, 7.350951892419732e+02, 8.978472916504184e+02,
1.096633158428459e+03, 1.339430764394418e+03, 1.635984429995927e+03,
1.998195895104119e+03, 2.440601977624501e+03, 2.980957987041728e+03])
dlf.factor = np.array([1.2214027581601701])
dlf.cos = np.array([
1.746412733678043e-02, -7.658725022064888e-02, 1.761673907472465e-01,
-2.840940679113589e-01, 3.680388960144733e-01, -4.115498161707958e-01,
4.181209762362728e-01, -3.967204599348831e-01, 3.608829691008270e-01,
-3.171870084102961e-01, 2.744932842186247e-01, -2.324673650676961e-01,
1.971144816936984e-01, -1.634915360178986e-01, 1.381406405905393e-01,
-1.125728533897677e-01, 9.619580319372194e-02, -7.640431432353632e-02,
6.748891657821673e-02, -5.097864570224415e-02, 4.853609305288441e-02,
-3.293272689265632e-02, 3.677175984620380e-02, -1.969323595300588e-02,
3.053726798991684e-02, -9.301135480582538e-03, 2.895215492109734e-02,
-1.875526095801418e-04, 3.181452657662026e-02, 9.025726238227111e-03,
3.955376604096631e-02, 1.966766645672513e-02, 5.318782805621459e-02,
3.300575875620110e-02, 7.409212944640006e-02, 4.972863917303501e-02,
1.029344264288086e-01, 6.776855697600163e-02, 1.357865756912759e-01,
7.511614666518443e-02, 1.522218287240260e-01, 3.034571997381229e-02,
8.802563675323094e-02, -1.689255322598353e-01, -1.756581788680092e-01,
-6.123863775740898e-01, -5.098359641153184e-01, -6.736869803920745e-01,
4.599561125225532e-01, 8.907010262082216e-01, 1.039153770711999e+00,
-2.178135931072732e+00, 8.040971159674268e-01, 5.659848584656202e-01,
-9.349050336534268e-01, 8.006099486213468e-01, -5.944960111930493e-01,
4.369614304892440e-01, -3.292566347310282e-01, 2.547426420681868e-01,
-2.010899026277397e-01, 1.609467208423519e-01, -1.299975550484158e-01,
1.056082501090365e-01, -8.608337452556068e-02, 7.027252107999236e-02,
-5.735742622053085e-02, 4.673270108060494e-02, -3.793635725863799e-02,
3.060786160620013e-02, -2.446220554726340e-02, 1.927399223200865e-02,
-1.486843016804444e-02, 1.111747692371507e-02, -7.939442960305236e-03,
5.298852472637883e-03, -3.200104589830043e-03, 1.665382777953919e-03,
-6.913074254614758e-04, 1.999065225130592e-04,
-2.955159288961187e-05])
dlf.sin = np.array([
7.478326513505658e-07, -2.572850425065560e-06, 5.225955618519281e-06,
-7.352539610140040e-06, 8.768819961093828e-06, -8.560004370841340e-06,
8.101932279460349e-06, -5.983552716117552e-06, 5.036792825138655e-06,
-1.584355068233649e-06, 1.426050228179462e-06, 3.972863429067356e-06,
-1.903788077376088e-06, 1.144652944379527e-05, -4.327773998196030e-06,
2.297298998355334e-05, -4.391227697686659e-06, 4.291202395830839e-05,
1.760279032167125e-06, 8.017887907026914e-05, 2.364651853689879e-05,
1.535031685829202e-04, 8.375427119939347e-05, 3.030115685600468e-04,
2.339455351760637e-04, 6.157392107422657e-04, 5.921808556382737e-04,
1.281873037121434e-03, 1.424276189020714e-03, 2.718506171172064e-03,
3.324504626808429e-03, 5.839859904586436e-03, 7.608663600764702e-03,
1.263571470998938e-02, 1.714199295539484e-02, 2.735013970005427e-02,
3.794840483226463e-02, 5.858519896601026e-02, 8.166914231915734e-02,
1.215508018998907e-01, 1.658946642767184e-01, 2.324389477118542e-01,
2.938956625118840e-01, 3.572525844816433e-01, 3.479235360502319e-01,
2.294314115090992e-01, -1.250412450354792e-01, -6.340986743027450e-01,
-9.703404081656508e-01, -2.734109755210948e-01, 1.321852608494946e+00,
6.762199721133603e-01, -2.093257651144232e+00, 1.707842350925794e+00,
-8.844618831465598e-01, 3.720792781726873e-01, -1.481509947473694e-01,
6.124339615448667e-02, -2.726194382687923e-02, 1.307668436907975e-02,
-6.682101544475918e-03, 3.599101395415812e-03, -2.030735143712865e-03,
1.197624324158372e-03, -7.382202519234128e-04, 4.756906961407787e-04,
-3.199977708080284e-04, 2.238628518300115e-04, -1.618377502708346e-04,
1.199233854156409e-04, -9.025345928219504e-05, 6.830860296946832e-05,
-5.143409372298764e-05, 3.804574823200909e-05, -2.720604959632104e-05,
1.839913059679674e-05, -1.140157702141663e-05, 6.172802138985788e-06,
-2.706562852604888e-06, 8.403636781016683e-07,
-1.356300450956746e-07])
return dlf | r"""Key 81 pt CosSin filter, as published in [Key09]_.
Taken from file ``FilterModules.f90`` provided with 1DCSEM_.
License: `Apache License, Version 2.0,
<http://www.apache.org/licenses/LICENSE-2.0>`_. |
def _close(self, args):
"""Request a connection close
This method indicates that the sender wants to close the
connection. This may be due to internal conditions (e.g. a
forced shut-down) or due to an error handling a specific
method, i.e. an exception. When a close is due to an
exception, the sender provides the class and method id of the
method which caused the exception.
RULE:
After sending this method any received method except the
Close-OK method MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with the Close-OK method.
RULE:
When a server receives the Close method from a client it
MUST delete all server-side resources associated with the
client's context. A client CANNOT reconnect to a context
after sending or receiving a Close method.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method.
"""
reply_code = args.read_short()
reply_text = args.read_shortstr()
class_id = args.read_short()
method_id = args.read_short()
self._x_close_ok()
raise error_for_code(reply_code, reply_text,
(class_id, method_id), ConnectionError) | Request a connection close
This method indicates that the sender wants to close the
connection. This may be due to internal conditions (e.g. a
forced shut-down) or due to an error handling a specific
method, i.e. an exception. When a close is due to an
exception, the sender provides the class and method id of the
method which caused the exception.
RULE:
After sending this method any received method except the
Close-OK method MUST be discarded.
RULE:
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with the Close-OK method.
RULE:
When a server receives the Close method from a client it
MUST delete all server-side resources associated with the
client's context. A client CANNOT reconnect to a context
after sending or receiving a Close method.
PARAMETERS:
reply_code: short
The reply code. The AMQ reply codes are defined in AMQ
RFC 011.
reply_text: shortstr
The localised reply text. This text can be logged as an
aid to resolving issues.
class_id: short
failing method class
When the close is provoked by a method exception, this
is the class of the method.
method_id: short
failing method ID
When the close is provoked by a method exception, this
is the ID of the method. |
def add_line(self, line='', *, empty=False):
"""Adds a line to the current page.
If the line exceeds the :attr:`max_size` then an exception
is raised.
Parameters
-----------
line: :class:`str`
The line to add.
empty: :class:`bool`
Indicates if another empty line should be added.
Raises
------
RuntimeError
The line was too big for the current :attr:`max_size`.
"""
max_page_size = self.max_size - self._prefix_len - 2
if len(line) > max_page_size:
raise RuntimeError('Line exceeds maximum page size %s' % (max_page_size))
if self._count + len(line) + 1 > self.max_size:
self.close_page()
self._count += len(line) + 1
self._current_page.append(line)
if empty:
self._current_page.append('')
self._count += 1 | Adds a line to the current page.
If the line exceeds the :attr:`max_size` then an exception
is raised.
Parameters
-----------
line: :class:`str`
The line to add.
empty: :class:`bool`
Indicates if another empty line should be added.
Raises
------
RuntimeError
The line was too big for the current :attr:`max_size`. |
def _create_arg_dict(self, tenant_id, data, in_sub, out_sub):
"""Create the argument dictionary. """
in_seg, in_vlan = self.get_in_seg_vlan(tenant_id)
out_seg, out_vlan = self.get_out_seg_vlan(tenant_id)
in_ip_dict = self.get_in_ip_addr(tenant_id)
out_ip_dict = self.get_out_ip_addr(tenant_id)
excl_list = [in_ip_dict.get('subnet'), out_ip_dict.get('subnet')]
arg_dict = {'tenant_id': tenant_id,
'tenant_name': data.get('tenant_name'),
'in_seg': in_seg, 'in_vlan': in_vlan,
'out_seg': out_seg, 'out_vlan': out_vlan,
'router_id': data.get('router_id'),
'in_sub': in_sub, 'out_sub': out_sub,
'in_gw': in_ip_dict.get('gateway'),
'out_gw': out_ip_dict.get('gateway'),
'excl_list': excl_list}
return arg_dict | Create the argument dictionary. |
def fetch_json(self, uri_path, http_method='GET', query_params=None,
body=None, headers=None):
'''
Make a call to Trello API and capture JSON response. Raises an error
when it fails.
Returns:
dict: Dictionary with the JSON data
'''
query_params = query_params or {}
headers = headers or {}
query_params = self.add_authorisation(query_params)
uri = self.build_uri(uri_path, query_params)
allowed_methods = ("POST", "PUT", "DELETE")
if http_method in allowed_methods and 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
response, content = self.client.request(
uri=uri,
method=http_method,
body=body,
headers=headers
)
self.check_errors(uri, response)
return json.loads(content.decode('utf-8')) | Make a call to Trello API and capture JSON response. Raises an error
when it fails.
Returns:
dict: Dictionary with the JSON data |
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
except pkg_resources.DistributionNotFound:
pass
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download() | Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script. |
def infer_format(filename:str) -> str:
"""Return extension identifying format of given filename"""
_, ext = os.path.splitext(filename)
return ext | Return extension identifying format of given filename |
def client_for(service, service_module, thrift_service_name=None):
"""Build a synchronous client class for the given Thrift service.
The generated class accepts a TChannelSyncClient and an optional
hostport as initialization arguments.
Given ``CommentService`` defined in ``comment.thrift`` and registered
with Hyperbahn under the name "comment", here's how this might be used:
.. code-block:: python
from tchannel.sync import TChannelSyncClient
from tchannel.sync.thrift import client_for
from comment import CommentService
CommentServiceClient = client_for('comment', CommentService)
tchannel_sync = TChannelSyncClient('my-service')
comment_client = CommentServiceClient(tchannel_sync)
future = comment_client.postComment(
articleId,
CommentService.Comment("hi")
)
result = future.result()
:param service:
Name of the Hyperbahn service being called.
:param service_module:
The Thrift-generated module for that service. This usually has
the same name as definied for the service in the IDL.
:param thrift_service_name:
If the Thrift service has a different name than its module, use
this parameter to specify it.
:returns:
An Thrift-like class, ready to be instantiated and used
with TChannelSyncClient.
"""
assert service_module, 'service_module is required'
service = service or '' # may be blank for non-hyperbahn use cases
if not thrift_service_name:
thrift_service_name = service_module.__name__.rsplit('.', 1)[-1]
method_names = get_service_methods(service_module.Iface)
def init(
self,
tchannel,
hostport=None,
trace=False,
protocol_headers=None,
):
self.async_thrift = self.__async_client_class__(
tchannel=tchannel,
hostport=hostport,
trace=trace,
protocol_headers=protocol_headers,
)
self.threadloop = tchannel._threadloop
init.__name__ = '__init__'
methods = {
'__init__': init,
'__async_client_class__': async_client_for(
service=service,
service_module=service_module,
thrift_service_name=thrift_service_name,
)
}
methods.update({
method_name: generate_method(method_name)
for method_name in method_names
})
return type(thrift_service_name + 'Client', (object,), methods) | Build a synchronous client class for the given Thrift service.
The generated class accepts a TChannelSyncClient and an optional
hostport as initialization arguments.
Given ``CommentService`` defined in ``comment.thrift`` and registered
with Hyperbahn under the name "comment", here's how this might be used:
.. code-block:: python
from tchannel.sync import TChannelSyncClient
from tchannel.sync.thrift import client_for
from comment import CommentService
CommentServiceClient = client_for('comment', CommentService)
tchannel_sync = TChannelSyncClient('my-service')
comment_client = CommentServiceClient(tchannel_sync)
future = comment_client.postComment(
articleId,
CommentService.Comment("hi")
)
result = future.result()
:param service:
Name of the Hyperbahn service being called.
:param service_module:
The Thrift-generated module for that service. This usually has
the same name as definied for the service in the IDL.
:param thrift_service_name:
If the Thrift service has a different name than its module, use
this parameter to specify it.
:returns:
An Thrift-like class, ready to be instantiated and used
with TChannelSyncClient. |
def _bse_cli_list_ref_formats(args):
'''Handles the list-ref-formats subcommand'''
all_refformats = api.get_reference_formats()
if args.no_description:
liststr = all_refformats.keys()
else:
liststr = format_columns(all_refformats.items())
return '\n'.join(liststr) | Handles the list-ref-formats subcommand |
async def notifications(dev: Device, notification: str, listen_all: bool):
"""List available notifications and listen to them.
Using --listen-all [notification] allows to listen to all notifications
from the given subsystem.
If the subsystem is omited, notifications from all subsystems are
requested.
"""
notifications = await dev.get_notifications()
async def handle_notification(x):
click.echo("got notification: %s" % x)
if listen_all:
if notification is not None:
await dev.services[notification].listen_all_notifications(
handle_notification
)
else:
click.echo("Listening to all possible notifications")
await dev.listen_notifications(fallback_callback=handle_notification)
elif notification:
click.echo("Subscribing to notification %s" % notification)
for notif in notifications:
if notif.name == notification:
await notif.activate(handle_notification)
click.echo("Unable to find notification %s" % notification)
else:
click.echo(click.style("Available notifications", bold=True))
for notification in notifications:
click.echo("* %s" % notification) | List available notifications and listen to them.
Using --listen-all [notification] allows to listen to all notifications
from the given subsystem.
If the subsystem is omited, notifications from all subsystems are
requested. |
def get(self, request):
"""Handle HTTP GET request.
Returns template and context from generate_page_title and
generate_sections to populate template.
"""
sections_list = self.generate_sections()
p = Paginator(sections_list, 25)
page = request.GET.get('page')
try:
sections = p.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
sections = p.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), return last page of results.
sections = p.page(p.num_pages)
context = {
'sections': sections,
'page_title': self.generate_page_title(),
'browse_type': self.browse_type
}
return render(
request,
self.template_path,
context
) | Handle HTTP GET request.
Returns template and context from generate_page_title and
generate_sections to populate template. |
def __check_prefix_conflict(self, existing_ni_or_ns_uri, incoming_prefix):
"""If existing_ni_or_ns_uri is a _NamespaceInfo object (which must
be in this set), then caller wants to map incoming_prefix to that
namespace. This function verifies that the prefix isn't already mapped
to a different namespace URI. If it is, an exception is raised.
Otherwise, existing_ni_or_ns_uri is treated as a string namespace URI
which must not already exist in this set. Caller wants to map
incoming_prefix to that URI. If incoming_prefix maps to anything
already, that represents a prefix conflict and an exception is raised.
"""
if incoming_prefix not in self.__prefix_map:
return
# Prefix found in the prefix map. Check that there are no conflicts
prefix_check_ni = self.__prefix_map[incoming_prefix]
if isinstance(existing_ni_or_ns_uri, _NamespaceInfo):
existing_ni = existing_ni_or_ns_uri # makes following code clearer?
if prefix_check_ni is not existing_ni:
# A different obj implies a different namespace URI is
# already assigned to the prefix.
raise DuplicatePrefixError(incoming_prefix, prefix_check_ni.uri, existing_ni.uri)
else:
ns_uri = existing_ni_or_ns_uri # makes following code clearer?
assert not self.contains_namespace(ns_uri) # TODO (bworrell): Should this be a raise?
raise DuplicatePrefixError(incoming_prefix, prefix_check_ni.uri, ns_uri) | If existing_ni_or_ns_uri is a _NamespaceInfo object (which must
be in this set), then caller wants to map incoming_prefix to that
namespace. This function verifies that the prefix isn't already mapped
to a different namespace URI. If it is, an exception is raised.
Otherwise, existing_ni_or_ns_uri is treated as a string namespace URI
which must not already exist in this set. Caller wants to map
incoming_prefix to that URI. If incoming_prefix maps to anything
already, that represents a prefix conflict and an exception is raised. |
def encode(self, payload):
"""
Returns an encoded token for the given payload dictionary.
"""
token = jwt.encode(payload, self.signing_key, algorithm=self.algorithm)
return token.decode('utf-8') | Returns an encoded token for the given payload dictionary. |
def pkg(pkg_path,
pkg_sum,
hash_type,
test=None,
**kwargs):
'''
Execute a packaged state run, the packaged state run will exist in a
tarball available locally. This packaged state
can be generated using salt-ssh.
CLI Example:
.. code-block:: bash
salt '*' state.pkg /tmp/salt_state.tgz 760a9353810e36f6d81416366fc426dc md5
'''
# TODO - Add ability to download from salt master or other source
popts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
if not os.path.isfile(pkg_path):
return {}
if not salt.utils.hashutils.get_hash(pkg_path, hash_type) == pkg_sum:
return {}
root = tempfile.mkdtemp()
s_pkg = tarfile.open(pkg_path, 'r:gz')
# Verify that the tarball does not extract outside of the intended root
members = s_pkg.getmembers()
for member in members:
if salt.utils.stringutils.to_unicode(member.path).startswith((os.sep, '..{0}'.format(os.sep))):
return {}
elif '..{0}'.format(os.sep) in salt.utils.stringutils.to_unicode(member.path):
return {}
s_pkg.extractall(root)
s_pkg.close()
lowstate_json = os.path.join(root, 'lowstate.json')
with salt.utils.files.fopen(lowstate_json, 'r') as fp_:
lowstate = salt.utils.json.load(fp_)
# Check for errors in the lowstate
for chunk in lowstate:
if not isinstance(chunk, dict):
return lowstate
pillar_json = os.path.join(root, 'pillar.json')
if os.path.isfile(pillar_json):
with salt.utils.files.fopen(pillar_json, 'r') as fp_:
pillar_override = salt.utils.json.load(fp_)
else:
pillar_override = None
roster_grains_json = os.path.join(root, 'roster_grains.json')
if os.path.isfile(roster_grains_json):
with salt.utils.files.fopen(roster_grains_json, 'r') as fp_:
roster_grains = salt.utils.json.load(fp_)
if os.path.isfile(roster_grains_json):
popts['grains'] = roster_grains
popts['fileclient'] = 'local'
popts['file_roots'] = {}
popts['test'] = _get_test_value(test, **kwargs)
envs = os.listdir(root)
for fn_ in envs:
full = os.path.join(root, fn_)
if not os.path.isdir(full):
continue
popts['file_roots'][fn_] = [full]
st_ = salt.state.State(popts, pillar_override=pillar_override)
snapper_pre = _snapper_pre(popts, kwargs.get('__pub_jid', 'called localy'))
ret = st_.call_chunks(lowstate)
ret = st_.call_listen(lowstate, ret)
try:
shutil.rmtree(root)
except (IOError, OSError):
pass
_set_retcode(ret)
_snapper_post(popts, kwargs.get('__pub_jid', 'called localy'), snapper_pre)
return ret | Execute a packaged state run, the packaged state run will exist in a
tarball available locally. This packaged state
can be generated using salt-ssh.
CLI Example:
.. code-block:: bash
salt '*' state.pkg /tmp/salt_state.tgz 760a9353810e36f6d81416366fc426dc md5 |
def exclude(prop):
'''Don't replicate property that is normally replicated: ordering column,
many-to-one relation that is marked for replication from other side.'''
if isinstance(prop, QueryableAttribute):
prop = prop.property
assert isinstance(prop, (Column, ColumnProperty, RelationshipProperty))
_excluded.add(prop)
if isinstance(prop, RelationshipProperty):
# Also exclude columns that participate in this relationship
for local in prop.local_columns:
_excluded.add(local) | Don't replicate property that is normally replicated: ordering column,
many-to-one relation that is marked for replication from other side. |
def apply_dict_of_variables_vfunc(
func, *args, signature, join='inner', fill_value=None
):
"""Apply a variable level function over dicts of DataArray, DataArray,
Variable and ndarray objects.
"""
args = [_as_variables_or_variable(arg) for arg in args]
names = join_dict_keys(args, how=join)
grouped_by_name = collect_dict_values(args, names, fill_value)
result_vars = OrderedDict()
for name, variable_args in zip(names, grouped_by_name):
result_vars[name] = func(*variable_args)
if signature.num_outputs > 1:
return _unpack_dict_tuples(result_vars, signature.num_outputs)
else:
return result_vars | Apply a variable level function over dicts of DataArray, DataArray,
Variable and ndarray objects. |
def create_transient(self, input_stream, original_name, length=None):
'''Create TransientFile and file on FS from given input stream and
original file name.'''
ext = os.path.splitext(original_name)[1]
transient = self.new_transient(ext)
if not os.path.isdir(self.transient_root):
os.makedirs(self.transient_root)
self._copy_file(input_stream, transient.path, length=length)
return transient | Create TransientFile and file on FS from given input stream and
original file name. |
def LEA(cpu, dest, src):
"""
Loads effective address.
Computes the effective address of the second operand (the source operand) and stores it in the first operand
(destination operand). The source operand is a memory address (offset part) specified with one of the processors
addressing modes; the destination operand is a general-purpose register. The address-size and operand-size
attributes affect the action performed by this instruction. The operand-size
attribute of the instruction is determined by the chosen register; the address-size attribute is determined by the
attribute of the code segment.
:param cpu: current CPU.
:param dest: destination operand.
:param src: source operand.
"""
dest.write(Operators.EXTRACT(src.address(), 0, dest.size)) | Loads effective address.
Computes the effective address of the second operand (the source operand) and stores it in the first operand
(destination operand). The source operand is a memory address (offset part) specified with one of the processors
addressing modes; the destination operand is a general-purpose register. The address-size and operand-size
attributes affect the action performed by this instruction. The operand-size
attribute of the instruction is determined by the chosen register; the address-size attribute is determined by the
attribute of the code segment.
:param cpu: current CPU.
:param dest: destination operand.
:param src: source operand. |
def analyze_beam_spot(scan_base, combine_n_readouts=1000, chunk_size=10000000, plot_occupancy_hists=False, output_pdf=None, output_file=None):
''' Determines the mean x and y beam spot position as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The occupancy is determined
for the given combined events and stored into a pdf file. At the end the beam x and y is plotted into a scatter plot with absolute positions in um.
Parameters
----------
scan_base: list of str
scan base names (e.g.: ['//data//SCC_50_fei4_self_trigger_scan_390', ]
combine_n_readouts: int
the number of read outs to combine (e.g. 1000)
max_chunk_size: int
the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer
output_pdf: PdfPages
PdfPages file object, if none the plot is printed to screen
'''
time_stamp = []
x = []
y = []
for data_file in scan_base:
with tb.open_file(data_file + '_interpreted.h5', mode="r+") as in_hit_file_h5:
# get data and data pointer
meta_data_array = in_hit_file_h5.root.meta_data[:]
hit_table = in_hit_file_h5.root.Hits
# determine the event ranges to analyze (timestamp_start, start_event_number, stop_event_number)
parameter_ranges = np.column_stack((analysis_utils.get_ranges_from_array(meta_data_array['timestamp_start'][::combine_n_readouts]), analysis_utils.get_ranges_from_array(meta_data_array['event_number'][::combine_n_readouts])))
# create a event_numer index (important)
analysis_utils.index_event_number(hit_table)
# initialize the analysis and set settings
analyze_data = AnalyzeRawData()
analyze_data.create_tot_hist = False
analyze_data.create_bcid_hist = False
analyze_data.histogram.set_no_scan_parameter()
# variables for read speed up
index = 0 # index where to start the read out, 0 at the beginning, increased during looping
best_chunk_size = chunk_size
progress_bar = progressbar.ProgressBar(widgets=['', progressbar.Percentage(), ' ', progressbar.Bar(marker='*', left='|', right='|'), ' ', progressbar.AdaptiveETA()], maxval=hit_table.shape[0], term_width=80)
progress_bar.start()
# loop over the selected events
for parameter_index, parameter_range in enumerate(parameter_ranges):
logging.debug('Analyze time stamp ' + str(parameter_range[0]) + ' and data from events = [' + str(parameter_range[2]) + ',' + str(parameter_range[3]) + '[ ' + str(int(float(float(parameter_index) / float(len(parameter_ranges)) * 100.0))) + '%')
analyze_data.reset() # resets the data of the last analysis
# loop over the hits in the actual selected events with optimizations: determine best chunk size, start word index given
readout_hit_len = 0 # variable to calculate a optimal chunk size value from the number of hits for speed up
for hits, index in analysis_utils.data_aligned_at_events(hit_table, start_event_number=parameter_range[2], stop_event_number=parameter_range[3], start_index=index, chunk_size=best_chunk_size):
analyze_data.analyze_hits(hits) # analyze the selected hits in chunks
readout_hit_len += hits.shape[0]
progress_bar.update(index)
best_chunk_size = int(1.5 * readout_hit_len) if int(1.05 * readout_hit_len) < chunk_size else chunk_size # to increase the readout speed, estimated the number of hits for one read instruction
# get and store results
occupancy_array = analyze_data.histogram.get_occupancy()
projection_x = np.sum(occupancy_array, axis=0).ravel()
projection_y = np.sum(occupancy_array, axis=1).ravel()
x.append(analysis_utils.get_mean_from_histogram(projection_x, bin_positions=range(0, 80)))
y.append(analysis_utils.get_mean_from_histogram(projection_y, bin_positions=range(0, 336)))
time_stamp.append(parameter_range[0])
if plot_occupancy_hists:
plotting.plot_occupancy(occupancy_array[:, :, 0], title='Occupancy for events between ' + time.strftime('%H:%M:%S', time.localtime(parameter_range[0])) + ' and ' + time.strftime('%H:%M:%S', time.localtime(parameter_range[1])), filename=output_pdf)
progress_bar.finish()
plotting.plot_scatter([i * 250 for i in x], [i * 50 for i in y], title='Mean beam position', x_label='x [um]', y_label='y [um]', marker_style='-o', filename=output_pdf)
if output_file:
with tb.open_file(output_file, mode="a") as out_file_h5:
rec_array = np.array(zip(time_stamp, x, y), dtype=[('time_stamp', float), ('x', float), ('y', float)])
try:
beam_spot_table = out_file_h5.create_table(out_file_h5.root, name='Beamspot', description=rec_array, title='Beam spot position', filters=tb.Filters(complib='blosc', complevel=5, fletcher32=False))
beam_spot_table[:] = rec_array
except tb.exceptions.NodeError:
logging.warning(output_file + ' has already a Beamspot note, do not overwrite existing.')
return time_stamp, x, y | Determines the mean x and y beam spot position as a function of time. Therefore the data of a fixed number of read outs are combined ('combine_n_readouts'). The occupancy is determined
for the given combined events and stored into a pdf file. At the end the beam x and y is plotted into a scatter plot with absolute positions in um.
Parameters
----------
scan_base: list of str
scan base names (e.g.: ['//data//SCC_50_fei4_self_trigger_scan_390', ]
combine_n_readouts: int
the number of read outs to combine (e.g. 1000)
max_chunk_size: int
the maximum chunk size used during read, if too big memory error occurs, if too small analysis takes longer
output_pdf: PdfPages
PdfPages file object, if none the plot is printed to screen |
def update_project(id, **kwargs):
"""
Update an existing Project with new information
"""
content = update_project_raw(id, **kwargs)
if content:
return utils.format_json(content) | Update an existing Project with new information |
def clean(args):
"""
%prog clean
Removes all symlinks from current folder
"""
p = OptionParser(clean.__doc__)
opts, args = p.parse_args(args)
for link_name in os.listdir(os.getcwd()):
if not op.islink(link_name):
continue
logging.debug("remove symlink `{0}`".format(link_name))
os.unlink(link_name) | %prog clean
Removes all symlinks from current folder |
def mailto_to_envelope(mailto_str):
"""
Interpret mailto-string into a :class:`alot.db.envelope.Envelope`
"""
from alot.db.envelope import Envelope
headers, body = parse_mailto(mailto_str)
return Envelope(bodytext=body, headers=headers) | Interpret mailto-string into a :class:`alot.db.envelope.Envelope` |
def procrustes(anchors, X, scale=True, print_out=False):
""" Fit X to anchors by applying optimal translation, rotation and reflection.
Given m >= d anchor nodes (anchors in R^(m x d)), return transformation
of coordinates X (output of EDM algorithm) optimally matching anchors in least squares sense.
:param anchors: Matrix of shape m x d, where m is number of anchors, d is dimension of setup.
:param X: Matrix of shape N x d, where the last m points will be used to find fit with the anchors.
:param scale: set to True if the point set should be scaled to match the anchors.
:return: the transformed vector X, the rotation matrix, translation vector, and scaling factor.
"""
def centralize(X):
n = X.shape[0]
ones = np.ones((n, 1))
return X - np.multiply(1 / n * np.dot(ones.T, X), ones)
m = anchors.shape[0]
N, d = X.shape
assert m >= d, 'Have to give at least d anchor nodes.'
X_m = X[N - m:, :]
ones = np.ones((m, 1))
mux = 1 / m * np.dot(ones.T, X_m)
muy = 1 / m * np.dot(ones.T, anchors)
sigmax = 1 / m * np.linalg.norm(X_m - mux)**2
sigmaxy = 1 / m * np.dot((anchors - muy).T, X_m - mux)
try:
U, D, VT = np.linalg.svd(sigmaxy)
except np.LinAlgError:
print('strange things are happening...')
print(sigmaxy)
print(np.linalg.matrix_rank(sigmaxy))
#this doesn't work and doesn't seem to be necessary! (why?)
# S = np.eye(D.shape[0])
# if (np.linalg.det(U)*np.linalg.det(VT.T) < 0):
# print('switching')
# S[-1,-1] = -1.0
# else:
# print('not switching')
# c = np.trace(np.dot(np.diag(D),S))/sigmax
# R = np.dot(U, np.dot(S,VT))
if (scale):
c = np.trace(np.diag(D)) / sigmax
else:
c = np.trace(np.diag(D)) / sigmax
if (print_out):
print('Optimal scale would be: {}. Setting it to 1 now.'.format(c))
c = 1.0
R = np.dot(U, VT)
t = muy.T - c * np.dot(R, mux.T)
X_transformed = (c * np.dot(R, (X - mux).T) + muy.T).T
return X_transformed, R, t, c | Fit X to anchors by applying optimal translation, rotation and reflection.
Given m >= d anchor nodes (anchors in R^(m x d)), return transformation
of coordinates X (output of EDM algorithm) optimally matching anchors in least squares sense.
:param anchors: Matrix of shape m x d, where m is number of anchors, d is dimension of setup.
:param X: Matrix of shape N x d, where the last m points will be used to find fit with the anchors.
:param scale: set to True if the point set should be scaled to match the anchors.
:return: the transformed vector X, the rotation matrix, translation vector, and scaling factor. |
def infer_type(self, in_type):
"""infer_type interface. override to create new operators
Parameters
----------
in_type : list of np.dtype
list of argument types in the same order as
declared in list_arguments.
Returns
-------
in_type : list
list of argument types. Can be modified from in_type.
out_type : list
list of output types calculated from in_type,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_type,
in the same order as declared in list_auxiliary_states.
"""
return in_type, [in_type[0]]*len(self.list_outputs()), \
[in_type[0]]*len(self.list_auxiliary_states()) | infer_type interface. override to create new operators
Parameters
----------
in_type : list of np.dtype
list of argument types in the same order as
declared in list_arguments.
Returns
-------
in_type : list
list of argument types. Can be modified from in_type.
out_type : list
list of output types calculated from in_type,
in the same order as declared in list_outputs.
aux_type : Optional, list
list of aux types calculated from in_type,
in the same order as declared in list_auxiliary_states. |
def animate(self, **kwargs):
""" Animates the surface.
This function only animates the triangulated surface. There will be no other elements, such as control points
grid or bounding box.
Keyword arguments:
* ``colormap``: applies colormap to the surface
Colormaps are a visualization feature of Matplotlib. They can be used for several types of surface plots via
the following import statement: ``from matplotlib import cm``
The following link displays the list of Matplolib colormaps and some examples on colormaps:
https://matplotlib.org/tutorials/colors/colormaps.html
"""
# Calling parent render function
super(VisSurface, self).render(**kwargs)
# Colormaps
surf_cmaps = kwargs.get('colormap', None)
# Initialize variables
tri_idxs = []
vert_coords = []
trisurf_params = []
frames = []
frames_tris = []
num_vertices = 0
# Start plotting of the surface and the control points grid
fig = plt.figure(figsize=self.vconf.figure_size, dpi=self.vconf.figure_dpi)
ax = Axes3D(fig)
# Start plotting
surf_count = 0
for plot in self._plots:
# Plot evaluated points
if plot['type'] == 'evalpts' and self.vconf.display_evalpts:
# Use internal triangulation algorithm instead of Qhull (MPL default)
verts = plot['ptsarr'][0]
tris = plot['ptsarr'][1]
# Extract zero-indexed vertex number list
tri_idxs += [[ti + num_vertices for ti in tri.data] for tri in tris]
# Extract vertex coordinates
vert_coords += [vert.data for vert in verts]
# Update number of vertices
num_vertices = len(vert_coords)
# Determine the color or the colormap of the triangulated plot
params = {}
if surf_cmaps:
try:
params['cmap'] = surf_cmaps[surf_count]
surf_count += 1
except IndexError:
params['color'] = plot['color']
else:
params['color'] = plot['color']
trisurf_params += [params for _ in range(len(tris))]
# Pre-processing for the animation
pts = np.array(vert_coords, dtype=self.vconf.dtype)
# Create the frames (Artists)
for tidx, pidx in zip(tri_idxs, trisurf_params):
frames_tris.append(tidx)
# Create MPL Triangulation object
triangulation = mpltri.Triangulation(pts[:, 0], pts[:, 1], triangles=frames_tris)
# Use custom Triangulation object and the choice of color/colormap to plot the surface
p3df = ax.plot_trisurf(triangulation, pts[:, 2], alpha=self.vconf.alpha, **pidx)
# Add to frames list
frames.append([p3df])
# Create MPL ArtistAnimation
ani = animation.ArtistAnimation(fig, frames, interval=100, blit=True, repeat_delay=1000)
# Remove axes
if not self.vconf.display_axes:
plt.axis('off')
# Set axes equal
if self.vconf.axes_equal:
self.vconf.set_axes_equal(ax)
# Axis labels
if self.vconf.display_labels:
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
# Process keyword arguments
fig_filename = kwargs.get('fig_save_as', None)
fig_display = kwargs.get('display_plot', True)
# Display the plot
if fig_display:
plt.show()
else:
fig_filename = self.vconf.figure_image_filename if fig_filename is None else fig_filename
# Save the figure
self.vconf.save_figure_as(fig, fig_filename)
# Return the figure object
return fig | Animates the surface.
This function only animates the triangulated surface. There will be no other elements, such as control points
grid or bounding box.
Keyword arguments:
* ``colormap``: applies colormap to the surface
Colormaps are a visualization feature of Matplotlib. They can be used for several types of surface plots via
the following import statement: ``from matplotlib import cm``
The following link displays the list of Matplolib colormaps and some examples on colormaps:
https://matplotlib.org/tutorials/colors/colormaps.html |
def get_failed_jobs(self, fail_running=False, fail_pending=False):
"""Return a dictionary with the subset of jobs that are marked as failed
Parameters
----------
fail_running : `bool`
If True, consider running jobs as failed
fail_pending : `bool`
If True, consider pending jobs as failed
Returns
-------
failed_jobs : dict
Dictionary mapping from job key to `JobDetails` for the failed jobs.
"""
failed_jobs = {}
for job_key, job_details in self.jobs.items():
if job_details.status == JobStatus.failed:
failed_jobs[job_key] = job_details
elif job_details.status == JobStatus.partial_failed:
failed_jobs[job_key] = job_details
elif fail_running and job_details.status == JobStatus.running:
failed_jobs[job_key] = job_details
elif fail_pending and job_details.status <= JobStatus.pending:
failed_jobs[job_key] = job_details
return failed_jobs | Return a dictionary with the subset of jobs that are marked as failed
Parameters
----------
fail_running : `bool`
If True, consider running jobs as failed
fail_pending : `bool`
If True, consider pending jobs as failed
Returns
-------
failed_jobs : dict
Dictionary mapping from job key to `JobDetails` for the failed jobs. |
def set_subcommands(func, parser):
"""
Set subcommands.
"""
if hasattr(func, '__subcommands__') and func.__subcommands__:
sub_parser = parser.add_subparsers(
title=SUBCOMMANDS_LIST_TITLE, dest='subcommand',
description=SUBCOMMANDS_LIST_DESCRIPTION.format(
func.__cmd_name__),
help=func.__doc__)
for sub_func in func.__subcommands__.values():
parser = get_parser(sub_func, sub_parser)
for args, kwargs in get_shared(sub_func):
parser.add_argument(*args, **kwargs)
else:
for args, kwargs in get_shared(func):
parser.add_argument(*args, **kwargs) | Set subcommands. |
def set_memory(self, total=None, static=None):
"""
Set the maxium allowed memory.
Args:
total: The total memory. Integer. Unit: MBytes. If set to None,
this parameter will be neglected.
static: The static memory. Integer. Unit MBytes. If set to None,
this parameterwill be neglected.
"""
if total:
self.params["rem"]["mem_total"] = total
if static:
self.params["rem"]["mem_static"] = static | Set the maxium allowed memory.
Args:
total: The total memory. Integer. Unit: MBytes. If set to None,
this parameter will be neglected.
static: The static memory. Integer. Unit MBytes. If set to None,
this parameterwill be neglected. |
def download_sysdig_capture(self, capture_id):
'''**Description**
Download a sysdig capture by id.
**Arguments**
- **capture_id**: the capture id to download.
**Success Return Value**
The bytes of the scap
'''
url = '{url}/api/sysdig/{id}/download?_product={product}'.format(
url=self.url, id=capture_id, product=self.product)
res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return False, self.lasterr
return True, res.content | **Description**
Download a sysdig capture by id.
**Arguments**
- **capture_id**: the capture id to download.
**Success Return Value**
The bytes of the scap |
def gaussian_prior_model_for_arguments(self, arguments):
"""
Parameters
----------
arguments: {Prior: float}
A dictionary of arguments
Returns
-------
prior_models: [PriorModel]
A new list of prior models with gaussian priors
"""
return CollectionPriorModel(
{
key: value.gaussian_prior_model_for_arguments(arguments)
if isinstance(value, AbstractPriorModel)
else value
for key, value in self.__dict__.items() if key not in ('component_number', 'item_number', 'id')
}
) | Parameters
----------
arguments: {Prior: float}
A dictionary of arguments
Returns
-------
prior_models: [PriorModel]
A new list of prior models with gaussian priors |
def _check_seismogenic_depths(self, upper_depth, lower_depth):
'''
Checks the seismic depths for physical consistency
:param float upper_depth:
Upper seismogenic depth (km)
:param float lower_depth:
Lower seismogenic depth (km)
'''
# Simple check on depths
if upper_depth:
if upper_depth < 0.:
raise ValueError('Upper seismogenic depth must be greater than'
' or equal to 0.0!')
else:
self.upper_depth = upper_depth
else:
self.upper_depth = 0.0
if not lower_depth:
raise ValueError('Lower seismogenic depth must be defined for '
'simple fault source!')
if lower_depth < self.upper_depth:
raise ValueError('Lower seismogenic depth must take a greater'
' value than upper seismogenic depth')
self.lower_depth = lower_depth | Checks the seismic depths for physical consistency
:param float upper_depth:
Upper seismogenic depth (km)
:param float lower_depth:
Lower seismogenic depth (km) |
def directionaldiff(f, x0, vec, **options):
"""
Return directional derivative of a function of n variables
Parameters
----------
fun: callable
analytical function to differentiate.
x0: array
vector location at which to differentiate fun. If x0 is an nxm array,
then fun is assumed to be a function of n*m variables.
vec: array
vector defining the line along which to take the derivative. It should
be the same size as x0, but need not be a vector of unit length.
**options:
optional arguments to pass on to Derivative.
Returns
-------
dder: scalar
estimate of the first derivative of fun in the specified direction.
Examples
--------
At the global minimizer (1,1) of the Rosenbrock function,
compute the directional derivative in the direction [1 2]
>>> import numpy as np
>>> import numdifftools as nd
>>> vec = np.r_[1, 2]
>>> rosen = lambda x: (1-x[0])**2 + 105*(x[1]-x[0]**2)**2
>>> dd, info = nd.directionaldiff(rosen, [1, 1], vec, full_output=True)
>>> np.allclose(dd, 0)
True
>>> np.abs(info.error_estimate)<1e-14
True
See also
--------
Derivative,
Gradient
"""
x0 = np.asarray(x0)
vec = np.asarray(vec)
if x0.size != vec.size:
raise ValueError('vec and x0 must be the same shapes')
vec = np.reshape(vec/np.linalg.norm(vec.ravel()), x0.shape)
return Derivative(lambda t: f(x0+t*vec), **options)(0) | Return directional derivative of a function of n variables
Parameters
----------
fun: callable
analytical function to differentiate.
x0: array
vector location at which to differentiate fun. If x0 is an nxm array,
then fun is assumed to be a function of n*m variables.
vec: array
vector defining the line along which to take the derivative. It should
be the same size as x0, but need not be a vector of unit length.
**options:
optional arguments to pass on to Derivative.
Returns
-------
dder: scalar
estimate of the first derivative of fun in the specified direction.
Examples
--------
At the global minimizer (1,1) of the Rosenbrock function,
compute the directional derivative in the direction [1 2]
>>> import numpy as np
>>> import numdifftools as nd
>>> vec = np.r_[1, 2]
>>> rosen = lambda x: (1-x[0])**2 + 105*(x[1]-x[0]**2)**2
>>> dd, info = nd.directionaldiff(rosen, [1, 1], vec, full_output=True)
>>> np.allclose(dd, 0)
True
>>> np.abs(info.error_estimate)<1e-14
True
See also
--------
Derivative,
Gradient |
def write_chisq(page, injList, grbtag):
"""
Write injection chisq plots to markup.page object page
"""
if injList:
th = ['']+injList + ['OFFSOURCE']
else:
th= ['','OFFSOURCE']
injList = ['OFFSOURCE']
td = []
plots = ['bank_veto','auto_veto','chi_square', 'mchirp']
for test in plots:
pTag = test.replace('_',' ').title()
d = [pTag]
for inj in injList + ['OFFSOURCE']:
plot = markup.page()
p = "%s/plots_clustered/GRB%s_%s_vs_snr_zoom.png" % (inj, grbtag,
test)
plot.a(href=p, title="%s %s versus SNR" % (inj, pTag))
plot.img(src=p)
plot.a.close()
d.append(plot())
td.append(d)
page = write_table(page, th, td)
return page | Write injection chisq plots to markup.page object page |
def make_url(*args, **kwargs):
"""Makes a URL from component parts"""
base = "/".join(args)
if kwargs:
return "%s?%s" % (base, urlencode(kwargs))
else:
return base | Makes a URL from component parts |
def _check_for_answers(self, pk):
"""
Callback called for every packet received to check if we are
waiting for an answer on this port. If so, then cancel the retry
timer.
"""
longest_match = ()
if len(self._answer_patterns) > 0:
data = (pk.header,) + tuple(pk.data)
for p in list(self._answer_patterns.keys()):
logger.debug('Looking for pattern match on %s vs %s', p, data)
if len(p) <= len(data):
if p == data[0:len(p)]:
match = data[0:len(p)]
if len(match) >= len(longest_match):
logger.debug('Found new longest match %s', match)
longest_match = match
if len(longest_match) > 0:
self._answer_patterns[longest_match].cancel()
del self._answer_patterns[longest_match] | Callback called for every packet received to check if we are
waiting for an answer on this port. If so, then cancel the retry
timer. |
def _pnorm_default(x, p):
"""Default p-norm implementation."""
return np.linalg.norm(x.data.ravel(), ord=p) | Default p-norm implementation. |
def calculate_md5(filename, length):
"""Calculate the MD5 hash of a file, up to length bytes.
Returns the MD5 in its binary form, as an 8-byte string. Raises IOError
or OSError in case of error.
"""
assert length >= 0
# shortcut: MD5 of an empty string is 'd41d8cd98f00b204e9800998ecf8427e',
# represented here in binary
if length == 0:
return '\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04\xe9\x80\t\x98\xec\xf8\x42\x7e'
md5_summer = hashlib.md5()
f = open(filename, 'rb')
try:
bytes_read = 0
while bytes_read < length:
chunk_size = min(MD5_CHUNK_SIZE, length - bytes_read)
chunk = f.read(chunk_size)
if not chunk:
# found EOF: means length was larger than the file size, or
# file was truncated while reading -- print warning?
break
md5_summer.update(chunk)
bytes_read += len(chunk)
finally:
f.close()
md5 = md5_summer.digest()
return md5 | Calculate the MD5 hash of a file, up to length bytes.
Returns the MD5 in its binary form, as an 8-byte string. Raises IOError
or OSError in case of error. |
def get_docker_tag(platform: str, registry: str) -> str:
""":return: docker tag to be used for the container"""
platform = platform if any(x in platform for x in ['build.', 'publish.']) else 'build.{}'.format(platform)
if not registry:
registry = "mxnet_local"
return "{0}/{1}".format(registry, platform) | :return: docker tag to be used for the container |
async def _get_popular_people_page(self, page=1):
"""Get a specific page of popular person data.
Arguments:
page (:py:class:`int`, optional): The page to get.
Returns:
:py:class:`dict`: The page data.
"""
return await self.get_data(self.url_builder(
'person/popular',
url_params=OrderedDict(page=page),
)) | Get a specific page of popular person data.
Arguments:
page (:py:class:`int`, optional): The page to get.
Returns:
:py:class:`dict`: The page data. |
def to_bytes(self, frame, state):
"""
Convert a single frame into bytes that can be transmitted on
the stream.
:param frame: The frame to convert. Should be the same type
of object returned by ``to_frame()``.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: Bytes that may be transmitted on the stream.
"""
# Generate the bytes from the frame
frame = six.binary_type(frame)
return self.encode_length(frame, state) + frame | Convert a single frame into bytes that can be transmitted on
the stream.
:param frame: The frame to convert. Should be the same type
of object returned by ``to_frame()``.
:param state: An instance of ``FramerState``. This object may
be used to track information across calls to the
method.
:returns: Bytes that may be transmitted on the stream. |
def get_search_result(self, ddoc_id, index_name, **query_params):
"""
Retrieves the raw JSON content from the remote database based on the
search index on the server, using the query_params provided as query
parameters. A ``query`` parameter containing the Lucene query
syntax is mandatory.
Example for search queries:
.. code-block:: python
# Assuming that 'searchindex001' exists as part of the
# 'ddoc001' design document in the remote database...
# Retrieve documents where the Lucene field name is 'name' and
# the value is 'julia*'
resp = db.get_search_result('ddoc001', 'searchindex001',
query='name:julia*',
include_docs=True)
for row in resp['rows']:
# Process search index data (in JSON format).
Example if the search query requires grouping by using
the ``group_field`` parameter:
.. code-block:: python
# Assuming that 'searchindex001' exists as part of the
# 'ddoc001' design document in the remote database...
# Retrieve JSON response content, limiting response to 10 documents
resp = db.get_search_result('ddoc001', 'searchindex001',
query='name:julia*',
group_field='name',
limit=10)
for group in resp['groups']:
for row in group['rows']:
# Process search index data (in JSON format).
:param str ddoc_id: Design document id used to get the search result.
:param str index_name: Name used in part to identify the index.
:param str bookmark: Optional string that enables you to specify which
page of results you require. Only valid for queries that do not
specify the ``group_field`` query parameter.
:param list counts: Optional JSON array of field names for which
counts should be produced. The response will contain counts for each
unique value of this field name among the documents matching the
search query.
Requires the index to have faceting enabled.
:param list drilldown: Optional list of fields that each define a
pair of a field name and a value. This field can be used several
times. The search will only match documents that have the given
value in the field name. It differs from using
``query=fieldname:value`` only in that the values are not analyzed.
:param str group_field: Optional string field by which to group
search matches. Fields containing other data
(numbers, objects, arrays) can not be used.
:param int group_limit: Optional number with the maximum group count.
This field can only be used if ``group_field`` query parameter
is specified.
:param group_sort: Optional JSON field that defines the order of the
groups in a search using ``group_field``. The default sort order
is relevance. This field can have the same values as the sort field,
so single fields as well as arrays of fields are supported.
:param int limit: Optional number to limit the maximum count of the
returned documents. In case of a grouped search, this parameter
limits the number of documents per group.
:param query/q: A Lucene query in the form of ``name:value``.
If name is omitted, the special value ``default`` is used.
The ``query`` parameter can be abbreviated as ``q``.
:param ranges: Optional JSON facet syntax that reuses the standard
Lucene syntax to return counts of results which fit into each
specified category. Inclusive range queries are denoted by brackets.
Exclusive range queries are denoted by curly brackets.
For example ``ranges={"price":{"cheap":"[0 TO 100]"}}`` has an
inclusive range of 0 to 100.
Requires the index to have faceting enabled.
:param sort: Optional JSON string of the form ``fieldname<type>`` for
ascending or ``-fieldname<type>`` for descending sort order.
Fieldname is the name of a string or number field and type is either
number or string or a JSON array of such strings. The type part is
optional and defaults to number.
:param str stale: Optional string to allow the results from a stale
index to be used. This makes the request return immediately, even
if the index has not been completely built yet.
:param list highlight_fields: Optional list of fields which should be
highlighted.
:param str highlight_pre_tag: Optional string inserted before the
highlighted word in the highlights output. Defaults to ``<em>``.
:param str highlight_post_tag: Optional string inserted after the
highlighted word in the highlights output. Defaults to ``</em>``.
:param int highlight_number: Optional number of fragments returned in
highlights. If the search term occurs less often than the number of
fragments specified, longer fragments are returned. Default is 1.
:param int highlight_size: Optional number of characters in each
fragment for highlights. Defaults to 100 characters.
:param list include_fields: Optional list of field names to include in
search results. Any fields included must have been indexed with the
``store:true`` option.
:returns: Search query result data in JSON format
"""
ddoc = DesignDocument(self, ddoc_id)
return self._get_search_result(
'/'.join((ddoc.document_url, '_search', index_name)),
**query_params
) | Retrieves the raw JSON content from the remote database based on the
search index on the server, using the query_params provided as query
parameters. A ``query`` parameter containing the Lucene query
syntax is mandatory.
Example for search queries:
.. code-block:: python
# Assuming that 'searchindex001' exists as part of the
# 'ddoc001' design document in the remote database...
# Retrieve documents where the Lucene field name is 'name' and
# the value is 'julia*'
resp = db.get_search_result('ddoc001', 'searchindex001',
query='name:julia*',
include_docs=True)
for row in resp['rows']:
# Process search index data (in JSON format).
Example if the search query requires grouping by using
the ``group_field`` parameter:
.. code-block:: python
# Assuming that 'searchindex001' exists as part of the
# 'ddoc001' design document in the remote database...
# Retrieve JSON response content, limiting response to 10 documents
resp = db.get_search_result('ddoc001', 'searchindex001',
query='name:julia*',
group_field='name',
limit=10)
for group in resp['groups']:
for row in group['rows']:
# Process search index data (in JSON format).
:param str ddoc_id: Design document id used to get the search result.
:param str index_name: Name used in part to identify the index.
:param str bookmark: Optional string that enables you to specify which
page of results you require. Only valid for queries that do not
specify the ``group_field`` query parameter.
:param list counts: Optional JSON array of field names for which
counts should be produced. The response will contain counts for each
unique value of this field name among the documents matching the
search query.
Requires the index to have faceting enabled.
:param list drilldown: Optional list of fields that each define a
pair of a field name and a value. This field can be used several
times. The search will only match documents that have the given
value in the field name. It differs from using
``query=fieldname:value`` only in that the values are not analyzed.
:param str group_field: Optional string field by which to group
search matches. Fields containing other data
(numbers, objects, arrays) can not be used.
:param int group_limit: Optional number with the maximum group count.
This field can only be used if ``group_field`` query parameter
is specified.
:param group_sort: Optional JSON field that defines the order of the
groups in a search using ``group_field``. The default sort order
is relevance. This field can have the same values as the sort field,
so single fields as well as arrays of fields are supported.
:param int limit: Optional number to limit the maximum count of the
returned documents. In case of a grouped search, this parameter
limits the number of documents per group.
:param query/q: A Lucene query in the form of ``name:value``.
If name is omitted, the special value ``default`` is used.
The ``query`` parameter can be abbreviated as ``q``.
:param ranges: Optional JSON facet syntax that reuses the standard
Lucene syntax to return counts of results which fit into each
specified category. Inclusive range queries are denoted by brackets.
Exclusive range queries are denoted by curly brackets.
For example ``ranges={"price":{"cheap":"[0 TO 100]"}}`` has an
inclusive range of 0 to 100.
Requires the index to have faceting enabled.
:param sort: Optional JSON string of the form ``fieldname<type>`` for
ascending or ``-fieldname<type>`` for descending sort order.
Fieldname is the name of a string or number field and type is either
number or string or a JSON array of such strings. The type part is
optional and defaults to number.
:param str stale: Optional string to allow the results from a stale
index to be used. This makes the request return immediately, even
if the index has not been completely built yet.
:param list highlight_fields: Optional list of fields which should be
highlighted.
:param str highlight_pre_tag: Optional string inserted before the
highlighted word in the highlights output. Defaults to ``<em>``.
:param str highlight_post_tag: Optional string inserted after the
highlighted word in the highlights output. Defaults to ``</em>``.
:param int highlight_number: Optional number of fragments returned in
highlights. If the search term occurs less often than the number of
fragments specified, longer fragments are returned. Default is 1.
:param int highlight_size: Optional number of characters in each
fragment for highlights. Defaults to 100 characters.
:param list include_fields: Optional list of field names to include in
search results. Any fields included must have been indexed with the
``store:true`` option.
:returns: Search query result data in JSON format |
def _preprocess_and_rename_grid_attrs(func, grid_attrs=None, **kwargs):
"""Call a custom preprocessing method first then rename grid attrs.
This wrapper is needed to generate a single function to pass to the
``preprocesss`` of xr.open_mfdataset. It makes sure that the
user-specified preprocess function is called on the loaded Dataset before
aospy's is applied. An example for why this might be needed is output from
the WRF model; one needs to add a CF-compliant units attribute to the time
coordinate of all input files, because it is not present by default.
Parameters
----------
func : function
An arbitrary function to call before calling
``grid_attrs_to_aospy_names`` in ``_load_data_from_disk``. Must take
an xr.Dataset as an argument as well as ``**kwargs``.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
function
A function that calls the provided function ``func`` on the Dataset
before calling ``grid_attrs_to_aospy_names``; this is meant to be
passed as a ``preprocess`` argument to ``xr.open_mfdataset``.
"""
def func_wrapper(ds):
return grid_attrs_to_aospy_names(func(ds, **kwargs), grid_attrs)
return func_wrapper | Call a custom preprocessing method first then rename grid attrs.
This wrapper is needed to generate a single function to pass to the
``preprocesss`` of xr.open_mfdataset. It makes sure that the
user-specified preprocess function is called on the loaded Dataset before
aospy's is applied. An example for why this might be needed is output from
the WRF model; one needs to add a CF-compliant units attribute to the time
coordinate of all input files, because it is not present by default.
Parameters
----------
func : function
An arbitrary function to call before calling
``grid_attrs_to_aospy_names`` in ``_load_data_from_disk``. Must take
an xr.Dataset as an argument as well as ``**kwargs``.
grid_attrs : dict (optional)
Overriding dictionary of grid attributes mapping aospy internal
names to names of grid attributes used in a particular model.
Returns
-------
function
A function that calls the provided function ``func`` on the Dataset
before calling ``grid_attrs_to_aospy_names``; this is meant to be
passed as a ``preprocess`` argument to ``xr.open_mfdataset``. |
def update(self, instance):
""" method finds unit_of_work record and change its status"""
assert isinstance(instance, UnitOfWork)
if instance.db_id:
query = {'_id': ObjectId(instance.db_id)}
else:
query = {unit_of_work.PROCESS_NAME: instance.process_name,
unit_of_work.TIMEPERIOD: instance.timeperiod,
unit_of_work.START_ID: instance.start_id,
unit_of_work.END_ID: instance.end_id}
self.ds.update(COLLECTION_UNIT_OF_WORK, query, instance)
return instance.db_id | method finds unit_of_work record and change its status |
def inverse(self):
"""return index array that maps unique values back to original space. unique[inverse]==keys"""
inv = np.empty(self.size, np.int)
inv[self.sorter] = self.sorted_group_rank_per_key
return inv | return index array that maps unique values back to original space. unique[inverse]==keys |
def _check_subject_identifier_matches_requested(self, authentication_request, sub):
# type (oic.message.AuthorizationRequest, str) -> None
"""
Verifies the subject identifier against any requested subject identifier using the claims request parameter.
:param authentication_request: authentication request
:param sub: subject identifier
:raise AuthorizationError: if the subject identifier does not match the requested one
"""
if 'claims' in authentication_request:
requested_id_token_sub = authentication_request['claims'].get('id_token', {}).get('sub')
requested_userinfo_sub = authentication_request['claims'].get('userinfo', {}).get('sub')
if requested_id_token_sub and requested_userinfo_sub and requested_id_token_sub != requested_userinfo_sub:
raise AuthorizationError('Requested different subject identifier for IDToken and userinfo: {} != {}'
.format(requested_id_token_sub, requested_userinfo_sub))
requested_sub = requested_id_token_sub or requested_userinfo_sub
if requested_sub and sub != requested_sub:
raise AuthorizationError('Requested subject identifier \'{}\' could not be matched'
.format(requested_sub)) | Verifies the subject identifier against any requested subject identifier using the claims request parameter.
:param authentication_request: authentication request
:param sub: subject identifier
:raise AuthorizationError: if the subject identifier does not match the requested one |
def setupTable_VORG(self):
"""
Make the VORG table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "VORG" not in self.tables:
return
self.otf["VORG"] = vorg = newTable("VORG")
vorg.majorVersion = 1
vorg.minorVersion = 0
vorg.VOriginRecords = {}
# Find the most frequent verticalOrigin
vorg_count = Counter(_getVerticalOrigin(self.otf, glyph)
for glyph in self.allGlyphs.values())
vorg.defaultVertOriginY = vorg_count.most_common(1)[0][0]
if len(vorg_count) > 1:
for glyphName, glyph in self.allGlyphs.items():
vorg.VOriginRecords[glyphName] = _getVerticalOrigin(
self.otf, glyph)
vorg.numVertOriginYMetrics = len(vorg.VOriginRecords) | Make the VORG table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired. |
def logs(self, prefix='worker'):
"""Generates a dictionary that contains all collected statistics.
"""
logs = []
logs += [('success_rate', np.mean(self.success_history))]
if self.compute_Q:
logs += [('mean_Q', np.mean(self.Q_history))]
logs += [('episode', self.n_episodes)]
if prefix != '' and not prefix.endswith('/'):
return [(prefix + '/' + key, val) for key, val in logs]
else:
return logs | Generates a dictionary that contains all collected statistics. |
def get_path(self, dir=None):
"""Return path relative to the current working directory of the
Node.FS.Base object that owns us."""
if not dir:
dir = self.fs.getcwd()
if self == dir:
return '.'
path_elems = self.get_path_elements()
pathname = ''
try: i = path_elems.index(dir)
except ValueError:
for p in path_elems[:-1]:
pathname += p.dirname
else:
for p in path_elems[i+1:-1]:
pathname += p.dirname
return pathname + path_elems[-1].name | Return path relative to the current working directory of the
Node.FS.Base object that owns us. |
def documentation(self):
"""
Get the documentation that the server sends for the API.
"""
newclient = self.__class__(self.session, self.root_url)
return newclient.get_raw('/') | Get the documentation that the server sends for the API. |
def bbduk_trim(forward_in, forward_out, reverse_in='NA', reverse_out='NA',
trimq=20, k=25, minlength=50, forcetrimleft=15, hdist=1, returncmd=False, **kwargs):
"""
Wrapper for using bbduk to quality trim reads. Contains arguments used in OLC Assembly Pipeline, but these can
be overwritten by using keyword parameters.
:param forward_in: Forward reads you want to quality trim.
:param returncmd: If set to true, function will return the cmd string passed to subprocess as a third value.
:param forward_out: Output forward reads.
:param reverse_in: Reverse input reads. Don't need to be specified if _R1/_R2 naming convention is used.
:param reverse_out: Reverse output reads. Don't need to be specified if _R1/_R2 convention is used.
:param kwargs: Other arguments to give to bbduk in parameter=argument format. See bbduk documentation for full list.
:return: out and err: stdout string and stderr string from running bbduk.
"""
options = kwargs_to_string(kwargs)
cmd = 'which bbduk.sh'
try:
subprocess.check_output(cmd.split()).decode('utf-8')
except subprocess.CalledProcessError:
print('ERROR: Could not find bbduk. Plase check that the bbtools package is installed and on your $PATH.\n\n')
raise FileNotFoundError
if os.path.isfile(forward_in.replace('_R1', '_R2')) and reverse_in == 'NA' and '_R1' in forward_in:
reverse_in = forward_in.replace('_R1', '_R2')
if reverse_out == 'NA':
if '_R1' in forward_out:
reverse_out = forward_out.replace('_R1', '_R2')
else:
raise ValueError('If you do not specify reverse_out, forward_out must contain R1.\n\n')
cmd = 'bbduk.sh in1={f_in} in2={r_in} out1={f_out} out2={r_out} qtrim=w trimq={trimq} k={k} ' \
'minlength={minlength} forcetrimleft={forcetrimleft} ref=adapters overwrite hdist={hdist} tpe tbo{optn}'\
.format(f_in=forward_in,
r_in=reverse_in,
f_out=forward_out,
r_out=reverse_out,
trimq=trimq,
k=k,
minlength=minlength,
forcetrimleft=forcetrimleft,
hdist=hdist,
optn=options)
elif reverse_in == 'NA' or reverse_in is None:
cmd = 'bbduk.sh in={f_in} out={f_out} qtrim=w trimq={trimq} k={k} minlength={minlength} ' \
'forcetrimleft={forcetrimleft} ref=adapters overwrite hdist={hdist} tpe tbo{optn}'\
.format(f_in=forward_in,
f_out=forward_out,
trimq=trimq,
k=k,
minlength=minlength,
forcetrimleft=forcetrimleft,
hdist=hdist,
optn=options)
else:
if reverse_out == 'NA':
raise ValueError('Reverse output reads must be specified.')
cmd = 'bbduk.sh in1={f_in} in2={r_in} out1={f_out} out2={r_out} qtrim=w trimq={trimq} k={k} ' \
'minlength={minlength} forcetrimleft={forcetrimleft} ref=adapters overwrite hdist={hdist} tpe tbo{optn}' \
.format(f_in=forward_in,
r_in=reverse_in,
f_out=forward_out,
r_out=reverse_out,
trimq=trimq,
k=k,
minlength=minlength,
forcetrimleft=forcetrimleft,
hdist=hdist,
optn=options)
out, err = accessoryfunctions.run_subprocess(cmd)
if returncmd:
return out, err, cmd
else:
return out, err | Wrapper for using bbduk to quality trim reads. Contains arguments used in OLC Assembly Pipeline, but these can
be overwritten by using keyword parameters.
:param forward_in: Forward reads you want to quality trim.
:param returncmd: If set to true, function will return the cmd string passed to subprocess as a third value.
:param forward_out: Output forward reads.
:param reverse_in: Reverse input reads. Don't need to be specified if _R1/_R2 naming convention is used.
:param reverse_out: Reverse output reads. Don't need to be specified if _R1/_R2 convention is used.
:param kwargs: Other arguments to give to bbduk in parameter=argument format. See bbduk documentation for full list.
:return: out and err: stdout string and stderr string from running bbduk. |
def find_application(app_id=None, app_name=None):
"""
find the application according application id (prioritary) or application name
:param app_id: the application id
:param app_name: the application name
:return: found application or None if not found
"""
LOGGER.debug("ApplicationService.find_application")
if (app_id is None or not app_id) and (app_name is None or not app_name):
raise exceptions.ArianeCallParametersError('id and name')
if (app_id is not None and app_id) and (app_name is not None and app_name):
LOGGER.warn('ApplicationService.find_application - Both id and name are defined. '
'Will give you search on id.')
app_name = None
params = None
if app_id is not None and app_id:
params = {'id': app_id}
elif app_name is not None and app_name:
params = {'name': app_name}
ret = None
if params is not None:
args = {'http_operation': 'GET', 'operation_path': 'get', 'parameters': params}
response = ApplicationService.requester.call(args)
if response.rc == 0:
ret = Application.json_2_application(response.response_content)
elif response.rc != 404:
err_msg = 'ApplicationService.find_application - Problem while finding application (id:' + \
str(app_id) + ', name:' + str(app_name) + '). ' + \
'Reason: ' + str(response.response_content) + '-' + str(response.error_message) + \
" (" + str(response.rc) + ")"
LOGGER.warning(
err_msg
)
return ret | find the application according application id (prioritary) or application name
:param app_id: the application id
:param app_name: the application name
:return: found application or None if not found |
def parse_definition_expr(expr, default_value=None):
"""
Parses a definition expression and returns a key-value pair
as a tuple.
Each definition expression should be in one of these two formats:
* <variable>=<value>
* <variable>
:param expr:
String expression to be parsed.
:param default_value:
(Default None) When a definition is encountered that has no value, this
will be used as its value.
:return:
A (define, value) tuple
or raises a ``ValueError`` if an invalid
definition expression is provided.
or raises ``AttributeError`` if None is provided for ``expr``.
Usage:
>>> parse_definition_expr('DEBUG=1')
('DEBUG', 1)
>>> parse_definition_expr('FOOBAR=0x40')
('FOOBAR', 64)
>>> parse_definition_expr('FOOBAR=whatever')
('FOOBAR', 'whatever')
>>> parse_definition_expr('FOOBAR=false')
('FOOBAR', False)
>>> parse_definition_expr('FOOBAR=TRUE')
('FOOBAR', True)
>>> parse_definition_expr('FOOBAR', default_value=None)
('FOOBAR', None)
>>> parse_definition_expr('FOOBAR', default_value=1)
('FOOBAR', 1)
>>> parse_definition_expr('FOOBAR=ah=3')
('FOOBAR', 'ah=3')
>>> parse_definition_expr(' FOOBAR=ah=3 ')
('FOOBAR', 'ah=3 ')
>>> parse_definition_expr(' FOOBAR =ah=3 ')
('FOOBAR', 'ah=3 ')
>>> parse_definition_expr(' FOOBAR = ah=3 ')
('FOOBAR', ' ah=3 ')
>>> parse_definition_expr(" ")
Traceback (most recent call last):
...
ValueError: Invalid definition symbol ` `
>>> parse_definition_expr(None)
Traceback (most recent call last):
...
AttributeError: 'NoneType' object has no attribute 'split'
"""
try:
define, value = expr.split('=', 1)
try:
value = parse_number_token(value)
except ValueError:
value = parse_bool_token(value)
except ValueError:
if expr:
define, value = expr, default_value
else:
raise ValueError("Invalid definition expression `%s`" % str(expr))
d = define.strip()
if d:
return d, value
else:
raise ValueError("Invalid definition symbol `%s`" % str(define)) | Parses a definition expression and returns a key-value pair
as a tuple.
Each definition expression should be in one of these two formats:
* <variable>=<value>
* <variable>
:param expr:
String expression to be parsed.
:param default_value:
(Default None) When a definition is encountered that has no value, this
will be used as its value.
:return:
A (define, value) tuple
or raises a ``ValueError`` if an invalid
definition expression is provided.
or raises ``AttributeError`` if None is provided for ``expr``.
Usage:
>>> parse_definition_expr('DEBUG=1')
('DEBUG', 1)
>>> parse_definition_expr('FOOBAR=0x40')
('FOOBAR', 64)
>>> parse_definition_expr('FOOBAR=whatever')
('FOOBAR', 'whatever')
>>> parse_definition_expr('FOOBAR=false')
('FOOBAR', False)
>>> parse_definition_expr('FOOBAR=TRUE')
('FOOBAR', True)
>>> parse_definition_expr('FOOBAR', default_value=None)
('FOOBAR', None)
>>> parse_definition_expr('FOOBAR', default_value=1)
('FOOBAR', 1)
>>> parse_definition_expr('FOOBAR=ah=3')
('FOOBAR', 'ah=3')
>>> parse_definition_expr(' FOOBAR=ah=3 ')
('FOOBAR', 'ah=3 ')
>>> parse_definition_expr(' FOOBAR =ah=3 ')
('FOOBAR', 'ah=3 ')
>>> parse_definition_expr(' FOOBAR = ah=3 ')
('FOOBAR', ' ah=3 ')
>>> parse_definition_expr(" ")
Traceback (most recent call last):
...
ValueError: Invalid definition symbol ` `
>>> parse_definition_expr(None)
Traceback (most recent call last):
...
AttributeError: 'NoneType' object has no attribute 'split' |
def _calculate(self):
self.logpriors = np.zeros_like(self.rad)
for i in range(self.N-1):
o = np.arange(i+1, self.N)
dist = ((self.zscale*(self.pos[i] - self.pos[o]))**2).sum(axis=-1)
dist0 = (self.rad[i] + self.rad[o])**2
update = self.prior_func(dist - dist0)
self.logpriors[i] += np.sum(update)
self.logpriors[o] += update
"""
# This is equivalent
for i in range(self.N-1):
for j in range(i+1, self.N):
d = ((self.zscale*(self.pos[i] - self.pos[j]))**2).sum(axis=-1)
r = (self.rad[i] + self.rad[j])**2
cost = self.prior_func(d - r)
self.logpriors[i] += cost
self.logpriors[j] += cost
""" | # This is equivalent
for i in range(self.N-1):
for j in range(i+1, self.N):
d = ((self.zscale*(self.pos[i] - self.pos[j]))**2).sum(axis=-1)
r = (self.rad[i] + self.rad[j])**2
cost = self.prior_func(d - r)
self.logpriors[i] += cost
self.logpriors[j] += cost |
def nonoverlap(item_a, time_a, item_b, time_b, max_value):
"""
Percentage of pixels in each object that do not overlap with the other object
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1.
"""
return np.minimum(1 - item_a.count_overlap(time_a, item_b, time_b), max_value) / float(max_value) | Percentage of pixels in each object that do not overlap with the other object
Args:
item_a: STObject from the first set in ObjectMatcher
time_a: Time integer being evaluated
item_b: STObject from the second set in ObjectMatcher
time_b: Time integer being evaluated
max_value: Maximum distance value used as scaling value and upper constraint.
Returns:
Distance value between 0 and 1. |
def _compute_anelastic_attenuation_term(self, C, rrup, mag):
"""
Compute magnitude-distance scaling term as defined in equation 21,
page 2291 (Tavakoli and Pezeshk, 2005)
"""
r = (rrup**2. + (C['c5'] * np.exp(C['c6'] * mag +
C['c7'] * (8.5 - mag)**2.5))**2.)**.5
f3 = ((C['c4'] + C['c13'] * mag) * np.log(r) +
(C['c8'] + C['c12'] * mag) * r)
return f3 | Compute magnitude-distance scaling term as defined in equation 21,
page 2291 (Tavakoli and Pezeshk, 2005) |
def query_fetch_all(self, query, values):
"""
Executes a db query, gets all the values, and closes the connection.
"""
self.cursor.execute(query, values)
retval = self.cursor.fetchall()
self.__close_db()
return retval | Executes a db query, gets all the values, and closes the connection. |
def info(self):
"""return information about replica set"""
hosts = ','.join(x['host'] for x in self.members())
mongodb_uri = 'mongodb://' + hosts + '/?replicaSet=' + self.repl_id
result = {"id": self.repl_id,
"auth_key": self.auth_key,
"members": self.members(),
"mongodb_uri": mongodb_uri,
"orchestration": 'replica_sets'}
if self.login:
# Add replicaSet URI parameter.
uri = ('%s&replicaSet=%s'
% (self.mongodb_auth_uri(hosts), self.repl_id))
result['mongodb_auth_uri'] = uri
return result | return information about replica set |
def _parse_routes(iface, opts):
'''
Filters given options and outputs valid settings for
the route settings file.
'''
# Normalize keys
opts = dict((k.lower(), v) for (k, v) in six.iteritems(opts))
result = {}
if 'routes' not in opts:
_raise_error_routes(iface, 'routes', 'List of routes')
for opt in opts:
result[opt] = opts[opt]
return result | Filters given options and outputs valid settings for
the route settings file. |
def discover_settings(conf_base=None):
""" Discover custom settings for ZMQ path"""
settings = {
'zmq_prefix': '',
'libzmq_extension': False,
'no_libzmq_extension': False,
'skip_check_zmq': False,
'build_ext': {},
'bdist_egg': {},
}
if sys.platform.startswith('win'):
settings['have_sys_un_h'] = False
if conf_base:
# lowest priority
merge(settings, load_config('config', conf_base))
merge(settings, get_cfg_args())
merge(settings, get_eargs())
return settings | Discover custom settings for ZMQ path |
def Load(self):
"""Loads all new events from disk.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
All events in the file that have not been yielded yet.
"""
for record in super(EventFileLoader, self).Load():
yield event_pb2.Event.FromString(record) | Loads all new events from disk.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
All events in the file that have not been yielded yet. |
def setup_panel_params(self, scale_x, scale_y):
"""
Compute the range and break information for the panel
"""
def train(scale, limits, trans, name):
"""
Train a single coordinate axis
"""
if limits is None:
rangee = scale.dimension()
else:
rangee = scale.transform(limits)
# data space
out = scale.break_info(rangee)
# trans'd range
out['range'] = np.sort(trans.transform(out['range']))
if limits is None:
expand = self.expand_default(scale)
out['range'] = expand_range_distinct(out['range'], expand)
# major and minor breaks in plot space
out['major'] = transform_value(trans, out['major'], out['range'])
out['minor'] = transform_value(trans, out['minor'], out['range'])
for key in list(out.keys()):
new_key = '{}_{}'.format(name, key)
out[new_key] = out.pop(key)
return out
out = dict(
scales=types.SimpleNamespace(x=scale_x, y=scale_y),
**train(scale_x, self.limits.xlim, self.trans.x, 'x'),
**train(scale_y, self.limits.xlim, self.trans.y, 'y')
)
return out | Compute the range and break information for the panel |
def to_capabilities(self):
"""
Creates a capabilities with all the options that have been set and
returns a dictionary with everything
"""
capabilities = ChromeOptions.to_capabilities(self)
capabilities.update(self._caps)
opera_options = capabilities[self.KEY]
if self.android_package_name:
opera_options["androidPackage"] = self.android_package_name
if self.android_device_socket:
opera_options["androidDeviceSocket"] = self.android_device_socket
if self.android_command_line_file:
opera_options["androidCommandLineFile"] = \
self.android_command_line_file
return capabilities | Creates a capabilities with all the options that have been set and
returns a dictionary with everything |
def acquire(self, key):
"""Return the known information about the device and mark the record
as being used by a segmenation state machine."""
if _debug: DeviceInfoCache._debug("acquire %r", key)
if isinstance(key, int):
device_info = self.cache.get(key, None)
elif not isinstance(key, Address):
raise TypeError("key must be integer or an address")
elif key.addrType not in (Address.localStationAddr, Address.remoteStationAddr):
raise TypeError("address must be a local or remote station")
else:
device_info = self.cache.get(key, None)
if device_info:
if _debug: DeviceInfoCache._debug(" - reference bump")
device_info._ref_count += 1
if _debug: DeviceInfoCache._debug(" - device_info: %r", device_info)
return device_info | Return the known information about the device and mark the record
as being used by a segmenation state machine. |
def on_backward_end(self, iteration:int, **kwargs)->None:
"Callback function that writes backward end appropriate data to Tensorboard."
if iteration == 0: return
self._update_batches_if_needed()
#TODO: This could perhaps be implemented as queues of requests instead but that seemed like overkill.
# But I'm not the biggest fan of maintaining these boolean flags either... Review pls.
if iteration % self.stats_iters == 0: self.gen_stats_updated, self.crit_stats_updated = False, False
if not (self.gen_stats_updated and self.crit_stats_updated): self._write_model_stats(iteration=iteration) | Callback function that writes backward end appropriate data to Tensorboard. |
def _find_all_versions(self, project_name):
"""Find all available versions for project_name
This checks index_urls, find_links and dependency_links
All versions found are returned
See _link_package_versions for details on which files are accepted
"""
index_locations = self._get_index_urls_locations(project_name)
index_file_loc, index_url_loc = self._sort_locations(index_locations)
fl_file_loc, fl_url_loc = self._sort_locations(
self.find_links, expand_dir=True)
dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links)
file_locations = (
Link(url) for url in itertools.chain(
index_file_loc, fl_file_loc, dep_file_loc)
)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
# We explicitly do not trust links that came from dependency_links
# We want to filter out any thing which does not have a secure origin.
url_locations = [
link for link in itertools.chain(
(Link(url, trusted=True) for url in index_url_loc),
(Link(url, trusted=True) for url in fl_url_loc),
(Link(url) for url in dep_url_loc),
)
if self._validate_secure_origin(logger, link)
]
logger.debug('%d location(s) to search for versions of %s:',
len(url_locations), project_name)
for location in url_locations:
logger.debug('* %s', location)
canonical_name = pkg_resources.safe_name(project_name).lower()
formats = fmt_ctl_formats(self.format_control, canonical_name)
search = Search(project_name.lower(), canonical_name, formats)
find_links_versions = self._package_versions(
# We trust every directly linked archive in find_links
(Link(url, '-f', trusted=True) for url in self.find_links),
search
)
page_versions = []
for page in self._get_pages(url_locations, project_name):
logger.debug('Analyzing links from page %s', page.url)
with indent_log():
page_versions.extend(
self._package_versions(page.links, search)
)
dependency_versions = self._package_versions(
(Link(url) for url in self.dependency_links), search
)
if dependency_versions:
logger.debug(
'dependency_links found: %s',
', '.join([
version.location.url for version in dependency_versions
])
)
file_versions = self._package_versions(file_locations, search)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.location.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
return (
file_versions + find_links_versions + page_versions +
dependency_versions
) | Find all available versions for project_name
This checks index_urls, find_links and dependency_links
All versions found are returned
See _link_package_versions for details on which files are accepted |
def _check_stations_csv(self, usr, root):
''' Reclocate a stations.csv copy in user home for easy manage.
E.g. not need sudo when you add new station, etc '''
if path.exists(path.join(usr, 'stations.csv')):
return
else:
copyfile(root, path.join(usr, 'stations.csv')) | Reclocate a stations.csv copy in user home for easy manage.
E.g. not need sudo when you add new station, etc |
def resize_image(fullfile,fullfile_resized,_megapixels):
"""Resizes image (fullfile), saves to fullfile_resized. Image
aspect ratio is conserved, will be scaled to be close to _megapixels in
size. Eg if _megapixels=2, will resize 2560x1920 so each dimension
is scaled by ((2**(20+1*MP))/float(2560*1920))**2"""
logger.debug("%s - Resizing to %s MP"%(fullfile,_megapixels))
img = Image.open(fullfile)
width,height=img.size
current_megapixels=width*height/(2.0**20)
# Compute new width and height for image
new_width,new_height=resize_compute_width_height(\
fullfile,_megapixels)
# Not scaling
if not new_width:
logger.debug("%s - NOT Resizing, scale is > 1"%(fullfile))
return False
logger.info("%s - Resizing image from %0.1f to %0.1f MP (%dx%d) to (%dx%d)"\
%(fullfile,current_megapixels,_megapixels,width,height,new_width,new_height))
# Resize the image
imageresize = img.resize((new_width,new_height), Image.ANTIALIAS)
#imageresize.save(fullfile_resized, 'JPEG', quality=75)
#FIXME: What quality to save as?
imageresize.save(fullfile_resized, 'JPEG')
# ---- Transfer over EXIF info ----
if not update_exif_GEXIV2(fullfile,fullfile_resized):
return False
return True | Resizes image (fullfile), saves to fullfile_resized. Image
aspect ratio is conserved, will be scaled to be close to _megapixels in
size. Eg if _megapixels=2, will resize 2560x1920 so each dimension
is scaled by ((2**(20+1*MP))/float(2560*1920))**2 |
def _convert_to_ndarray(self, data):
"""Converts data from dataframe to ndarray format. Assumption: df-columns are ndarray-layers (3rd dim.)"""
if data.__class__.__name__ != "DataFrame":
raise Exception(f"data is not a DataFrame but {data.__class__.__name__}.")
shape_ndarray = (self._height, self._width, data.shape[1])
data_ndarray = data.values.reshape(shape_ndarray)
return data_ndarray | Converts data from dataframe to ndarray format. Assumption: df-columns are ndarray-layers (3rd dim.) |
def process_bytecode(link_refs: Dict[str, Any], bytecode: bytes) -> str:
"""
Replace link_refs in bytecode with 0's.
"""
all_offsets = [y for x in link_refs.values() for y in x.values()]
# Link ref validation.
validate_link_ref_fns = (
validate_link_ref(ref["start"] * 2, ref["length"] * 2)
for ref in concat(all_offsets)
)
pipe(bytecode, *validate_link_ref_fns)
# Convert link_refs in bytecode to 0's
link_fns = (
replace_link_ref_in_bytecode(ref["start"] * 2, ref["length"] * 2)
for ref in concat(all_offsets)
)
processed_bytecode = pipe(bytecode, *link_fns)
return add_0x_prefix(processed_bytecode) | Replace link_refs in bytecode with 0's. |
def delete_chat_sticker_set(self, chat_id):
"""
Use this method to delete a group sticker set from a supergroup. The bot must be an administrator in the chat
for this to work and must have the appropriate admin rights. Use the field can_set_sticker_set
optionally returned in getChat requests to check if the bot can use this method. Returns True on success.
:param chat_id: Unique identifier for the target chat or username of the target supergroup
(in the format @supergroupusername)
:return:
"""
result = apihelper.delete_chat_sticker_set(self.token, chat_id)
return result | Use this method to delete a group sticker set from a supergroup. The bot must be an administrator in the chat
for this to work and must have the appropriate admin rights. Use the field can_set_sticker_set
optionally returned in getChat requests to check if the bot can use this method. Returns True on success.
:param chat_id: Unique identifier for the target chat or username of the target supergroup
(in the format @supergroupusername)
:return: |
def get(self):
'''Get a task from queue when bucket available'''
if self.bucket.get() < 1:
return None
now = time.time()
self.mutex.acquire()
try:
task = self.priority_queue.get_nowait()
self.bucket.desc()
except Queue.Empty:
self.mutex.release()
return None
task.exetime = now + self.processing_timeout
self.processing.put(task)
self.mutex.release()
return task.taskid | Get a task from queue when bucket available |
def format_option(self, ohi):
"""Format the help output for a single option.
:param OptionHelpInfo ohi: Extracted information for option to print
:return: Formatted help text for this option
:rtype: list of string
"""
lines = []
choices = 'one of: [{}] '.format(ohi.choices) if ohi.choices else ''
arg_line = ('{args} {dflt}'
.format(args=self._maybe_cyan(', '.join(ohi.display_args)),
dflt=self._maybe_green('({}default: {})'.format(choices, ohi.default))))
lines.append(arg_line)
indent = ' '
lines.extend(['{}{}'.format(indent, s) for s in wrap(ohi.help, 76)])
if ohi.deprecated_message:
lines.append(self._maybe_red('{}{}.'.format(indent, ohi.deprecated_message)))
if ohi.removal_hint:
lines.append(self._maybe_red('{}{}'.format(indent, ohi.removal_hint)))
return lines | Format the help output for a single option.
:param OptionHelpInfo ohi: Extracted information for option to print
:return: Formatted help text for this option
:rtype: list of string |
def make_clean_visible_file(i_chunk, clean_visible_path):
'''make a temp file of clean_visible text'''
_clean = open(clean_visible_path, 'wb')
_clean.write('<?xml version="1.0" encoding="UTF-8"?>')
_clean.write('<root>')
for idx, si in enumerate(i_chunk):
if si.stream_id is None:
# create the FILENAME element anyway, so the ordering
# remains the same as the i_chunk and can be aligned.
stream_id = ''
else:
stream_id = si.stream_id
doc = lxml.etree.Element("FILENAME", stream_id=stream_id)
if si.body and si.body.clean_visible:
try:
# is UTF-8, and etree wants .text to be unicode
doc.text = si.body.clean_visible.decode('utf8')
except ValueError:
doc.text = drop_invalid_and_upper_utf8_chars(
si.body.clean_visible.decode('utf8'))
except Exception, exc:
# this should never ever fail, because if it does,
# then it means that clean_visible (or more likely
# clean_html) is not what it is supposed to be.
# Therefore, do not take it lightly:
logger.critical(traceback.format_exc(exc))
logger.critical('failed on stream_id=%s to follow:',
si.stream_id)
logger.critical(repr(si.body.clean_visible))
logger.critical('above was stream_id=%s', si.stream_id)
# [I don't know who calls this, but note that this
# will *always* fail if clean_visible isn't valid UTF-8.]
raise
else:
doc.text = ''
_clean.write(lxml.etree.tostring(doc, encoding='UTF-8'))
_clean.write('</root>')
_clean.close()
logger.info(clean_visible_path)
'''
## hack to capture html for inspection
_html = open(clean_visible_path + '-html', 'wb')
for idx, si in enumerate(i_chunk):
_html.write('<FILENAME docid="%s">' % si.stream_id)
if si.body and si.body.clean_html:
_html.write(si.body.clean_html)
_html.write('</FILENAME>\n')
_html.close()
## replace this with log.info()
print clean_visible_path + '-html'
''' | make a temp file of clean_visible text |
def add(self, pattern, method=None, call=None, name=None):
"""Add a url pattern.
Args:
pattern (:obj:`str`): URL pattern to add. This is usually '/'
separated path. Parts of the URL can be parameterised using
curly braces.
Examples: "/", "/path/to/resource", "/resoures/{param}"
method (:obj:`str`, :obj:`list` of :obj:`str`, optional): HTTP
methods for the path specied. By default, GET method is added.
Value can be either a single method, by passing a string, or
multiple methods, by passing a list of strings.
call (callable): Callable corresponding to the url pattern and the
HTTP method specified.
name (:obj:`str`): Name for the pattern that can be used for
reverse matching
Note:
A trailing '/' is always assumed in the pattern.
"""
if not pattern.endswith('/'):
pattern += '/'
parts = tuple(pattern.split('/')[1:])
node = self._routes
for part in parts:
node = node.setdefault(part, {})
if method is None:
node['GET'] = call
elif isinstance(method, str):
node[method.upper()] = call
else:
for m in method:
node[m.upper()] = call
if name is not None:
self._reverse[name] = pattern | Add a url pattern.
Args:
pattern (:obj:`str`): URL pattern to add. This is usually '/'
separated path. Parts of the URL can be parameterised using
curly braces.
Examples: "/", "/path/to/resource", "/resoures/{param}"
method (:obj:`str`, :obj:`list` of :obj:`str`, optional): HTTP
methods for the path specied. By default, GET method is added.
Value can be either a single method, by passing a string, or
multiple methods, by passing a list of strings.
call (callable): Callable corresponding to the url pattern and the
HTTP method specified.
name (:obj:`str`): Name for the pattern that can be used for
reverse matching
Note:
A trailing '/' is always assumed in the pattern. |
def start_sctp_server(self, ip, port, name=None, timeout=None, protocol=None, family='ipv4'):
"""Starts a new STCP server to given `ip` and `port`.
`family` can be either ipv4 (default) or ipv6.
pysctp (https://github.com/philpraxis/pysctp) need to be installed your system.
Server can be given a `name`, default `timeout` and a `protocol`.
Notice that you have to use `Accept Connection` keyword for server to
receive connections.
Examples:
| Start STCP server | 10.10.10.2 | 53 |
| Start STCP server | 10.10.10.2 | 53 | Server1 |
| Start STCP server | 10.10.10.2 | 53 | name=Server1 | protocol=GTPV2 |
| Start STCP server | 10.10.10.2 | 53 | timeout=5 |
"""
self._start_server(SCTPServer, ip, port, name, timeout, protocol, family) | Starts a new STCP server to given `ip` and `port`.
`family` can be either ipv4 (default) or ipv6.
pysctp (https://github.com/philpraxis/pysctp) need to be installed your system.
Server can be given a `name`, default `timeout` and a `protocol`.
Notice that you have to use `Accept Connection` keyword for server to
receive connections.
Examples:
| Start STCP server | 10.10.10.2 | 53 |
| Start STCP server | 10.10.10.2 | 53 | Server1 |
| Start STCP server | 10.10.10.2 | 53 | name=Server1 | protocol=GTPV2 |
| Start STCP server | 10.10.10.2 | 53 | timeout=5 | |
def _GetDirectory(self):
"""Retrieves a directory.
Returns:
CPIODirectory: a directory or None if not available.
"""
if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY:
return None
return CPIODirectory(self._file_system, self.path_spec) | Retrieves a directory.
Returns:
CPIODirectory: a directory or None if not available. |
Subsets and Splits