code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def assign_rates(self, mu=1.0, pi=None, W=None):
"""
Overwrite the GTR model given the provided data
Parameters
----------
mu : float
Substitution rate
W : nxn matrix
Substitution matrix
pi : n vector
Equilibrium frequencies
"""
n = len(self.alphabet)
self.mu = mu
if pi is not None and len(pi)==n:
Pi = np.array(pi)
else:
if pi is not None and len(pi)!=n:
self.logger("length of equilibrium frequency vector does not match alphabet length", 4, warn=True)
self.logger("Ignoring input equilibrium frequencies", 4, warn=True)
Pi = np.ones(shape=(n,))
self.Pi = Pi/np.sum(Pi)
if W is None or W.shape!=(n,n):
if (W is not None) and W.shape!=(n,n):
self.logger("Substitution matrix size does not match alphabet size", 4, warn=True)
self.logger("Ignoring input substitution matrix", 4, warn=True)
# flow matrix
W = np.ones((n,n))
np.fill_diagonal(W, 0.0)
np.fill_diagonal(W, - W.sum(axis=0))
else:
W=np.array(W)
self.W = 0.5*(W+W.T)
self._check_fix_Q(fixed_mu=True)
self._eig() | Overwrite the GTR model given the provided data
Parameters
----------
mu : float
Substitution rate
W : nxn matrix
Substitution matrix
pi : n vector
Equilibrium frequencies |
def deregister_entity_from_group(self, entity, group):
'''
Removes entity from group
'''
if entity in self._entities:
if entity in self._groups[group]:
self._groups[group].remove(entity)
else:
raise UnmanagedEntityError(entity) | Removes entity from group |
def get_unique_schema_id(schema):
# type: (GraphQLSchema) -> str
"""Get a unique id given a GraphQLSchema"""
assert isinstance(schema, GraphQLSchema), (
"Must receive a GraphQLSchema as schema. Received {}"
).format(repr(schema))
if schema not in _cached_schemas:
_cached_schemas[schema] = sha1(str(schema).encode("utf-8")).hexdigest()
return _cached_schemas[schema] | Get a unique id given a GraphQLSchema |
def remove_child_objective_banks(self, objective_bank_id):
"""Removes all children from an objective bank.
arg: objective_bank_id (osid.id.Id): the ``Id`` of an
objective bank
raise: NotFound - ``objective_bank_id`` not in hierarchy
raise: NullArgument - ``objective_bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_child_catalogs(catalog_id=objective_bank_id)
return self._hierarchy_session.remove_children(id_=objective_bank_id) | Removes all children from an objective bank.
arg: objective_bank_id (osid.id.Id): the ``Id`` of an
objective bank
raise: NotFound - ``objective_bank_id`` not in hierarchy
raise: NullArgument - ``objective_bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def updateColumnName(self, networkId, tableType, body, verbose=None):
"""
Renames an existing column in the table specified by the `tableType` and `networkId` parameters.
:param networkId: SUID of the network containing the table
:param tableType: Table Type
:param body: Old and new column name
:param verbose: print more
:returns: default: successful operation
"""
response=api(url=self.___url+'networks/'+str(networkId)+'/tables/'+str(tableType)+'/columns', method="PUT", body=body, verbose=verbose)
return response | Renames an existing column in the table specified by the `tableType` and `networkId` parameters.
:param networkId: SUID of the network containing the table
:param tableType: Table Type
:param body: Old and new column name
:param verbose: print more
:returns: default: successful operation |
def BE8(value, min_value=None, max_value=None, fuzzable=True, name=None, full_range=False):
'''8-bit field, Big endian encoded'''
return UInt8(value, min_value=min_value, max_value=max_value, encoder=ENC_INT_BE, fuzzable=fuzzable, name=name, full_range=full_range) | 8-bit field, Big endian encoded |
def from_dict(cls, d):
"""
Creates a TransformedStructure from a dict.
"""
s = Structure.from_dict(d)
return cls(s, history=d["history"],
other_parameters=d.get("other_parameters", None)) | Creates a TransformedStructure from a dict. |
def str_id(self):
"str: This key's string id."
id_or_name = self.id_or_name
if id_or_name is not None and isinstance(id_or_name, str):
return id_or_name
return None | str: This key's string id. |
def determine_end_point(http_request, url):
"""
returns detail, list or aggregates
"""
if url.endswith('aggregates') or url.endswith('aggregates/'):
return 'aggregates'
else:
return 'detail' if is_detail_url(http_request, url) else 'list' | returns detail, list or aggregates |
def On_close_criteria_box(self, dia):
"""
after criteria dialog window is closed.
Take the acceptance criteria values and update
self.acceptance_criteria
"""
criteria_list = list(self.acceptance_criteria.keys())
criteria_list.sort()
#---------------------------------------
# check if averaging by sample or by site
# and intialize sample/site criteria
#---------------------------------------
avg_by = dia.set_average_by_sample_or_site.GetValue()
if avg_by == 'sample':
for crit in ['site_int_n', 'site_int_sigma', 'site_int_sigma_perc', 'site_aniso_mean', 'site_int_n_outlier_check']:
self.acceptance_criteria[crit]['value'] = -999
if avg_by == 'site':
for crit in ['sample_int_n', 'sample_int_sigma', 'sample_int_sigma_perc', 'sample_aniso_mean', 'sample_int_n_outlier_check']:
self.acceptance_criteria[crit]['value'] = -999
#---------
# get value for each criterion
for i in range(len(criteria_list)):
crit = criteria_list[i]
value, accept = dia.get_value_for_crit(crit, self.acceptance_criteria)
if accept:
self.acceptance_criteria.update(accept)
#---------
# thellier interpreter calculation type
if dia.set_stdev_opt.GetValue() == True:
self.acceptance_criteria['interpreter_method']['value'] = 'stdev_opt'
elif dia.set_bs.GetValue() == True:
self.acceptance_criteria['interpreter_method']['value'] = 'bs'
elif dia.set_bs_par.GetValue() == True:
self.acceptance_criteria['interpreter_method']['value'] = 'bs_par'
# message dialog
dlg1 = wx.MessageDialog(
self, caption="Warning:", message="changes are saved to the criteria file\n ", style=wx.OK)
result = self.show_dlg(dlg1)
if result == wx.ID_OK:
try:
self.clear_boxes()
except IndexError:
pass
try:
self.write_acceptance_criteria_to_boxes()
except IOError:
pass
if self.data_model == 3:
crit_file = 'criteria.txt'
else:
crit_file = 'pmag_criteria.txt'
try:
pmag.write_criteria_to_file(os.path.join(
self.WD, crit_file), self.acceptance_criteria, data_model=self.data_model, prior_crits=self.crit_data)
except AttributeError as ex:
print(ex)
print("no criteria given to save")
dlg1.Destroy()
dia.Destroy()
self.fig4.texts[0].remove()
txt = "{} data".format(avg_by).capitalize()
self.fig4.text(0.02, 0.96, txt, {
'family': self.font_type, 'fontsize': 10, 'style': 'normal', 'va': 'center', 'ha': 'left'})
self.recalculate_satistics()
try:
self.update_GUI_with_new_interpretation()
except KeyError:
pass | after criteria dialog window is closed.
Take the acceptance criteria values and update
self.acceptance_criteria |
def open_stream(stream):
"""Opens a stream and reads 8192 bytes from it.
This is useful to check if a stream actually has data
before opening the output.
"""
global stream_fd
# Attempts to open the stream
try:
stream_fd = stream.open()
except StreamError as err:
raise StreamError("Could not open stream: {0}".format(err))
# Read 8192 bytes before proceeding to check for errors.
# This is to avoid opening the output unnecessarily.
try:
log.debug("Pre-buffering 8192 bytes")
prebuffer = stream_fd.read(8192)
except IOError as err:
stream_fd.close()
raise StreamError("Failed to read data from stream: {0}".format(err))
if not prebuffer:
stream_fd.close()
raise StreamError("No data returned from stream")
return stream_fd, prebuffer | Opens a stream and reads 8192 bytes from it.
This is useful to check if a stream actually has data
before opening the output. |
def result_to_components(self, result, model, island_data, isflags):
"""
Convert fitting results into a set of components
Parameters
----------
result : lmfit.MinimizerResult
The fitting results.
model : lmfit.Parameters
The model that was fit.
island_data : :class:`AegeanTools.models.IslandFittingData`
Data about the island that was fit.
isflags : int
Flags that should be added to this island (in addition to those within the model)
Returns
-------
sources : list
A list of components, and islands if requested.
"""
global_data = self.global_data
# island data
isle_num = island_data.isle_num
idata = island_data.i
xmin, xmax, ymin, ymax = island_data.offsets
box = slice(int(xmin), int(xmax)), slice(int(ymin), int(ymax))
rms = global_data.rmsimg[box]
bkg = global_data.bkgimg[box]
residual = np.median(result.residual), np.std(result.residual)
is_flag = isflags
sources = []
j = 0
for j in range(model['components'].value):
src_flags = is_flag
source = OutputSource()
source.island = isle_num
source.source = j
self.log.debug(" component {0}".format(j))
prefix = "c{0}_".format(j)
xo = model[prefix + 'xo'].value
yo = model[prefix + 'yo'].value
sx = model[prefix + 'sx'].value
sy = model[prefix + 'sy'].value
theta = model[prefix + 'theta'].value
amp = model[prefix + 'amp'].value
src_flags |= model[prefix + 'flags'].value
# these are goodness of fit statistics for the entire island.
source.residual_mean = residual[0]
source.residual_std = residual[1]
# set the flags
source.flags = src_flags
# #pixel pos within island +
# island offset within region +
# region offset within image +
# 1 for luck
# (pyfits->fits conversion = luck)
x_pix = xo + xmin + 1
y_pix = yo + ymin + 1
# update the source xo/yo so the error calculations can be done correctly
# Note that you have to update the max or the value you set will be clipped at the max allowed value
model[prefix + 'xo'].set(value=x_pix, max=np.inf)
model[prefix + 'yo'].set(value=y_pix, max=np.inf)
# ------ extract source parameters ------
# fluxes
# the background is taken from background map
# Clamp the pixel location to the edge of the background map
y = max(min(int(round(y_pix - ymin)), bkg.shape[1] - 1), 0)
x = max(min(int(round(x_pix - xmin)), bkg.shape[0] - 1), 0)
source.background = bkg[x, y]
source.local_rms = rms[x, y]
source.peak_flux = amp
# all params are in degrees
source.ra, source.dec, source.a, source.b, source.pa = global_data.wcshelper.pix2sky_ellipse((x_pix, y_pix),
sx * CC2FHWM,
sy * CC2FHWM,
theta)
source.a *= 3600 # arcseconds
source.b *= 3600
# force a>=b
fix_shape(source)
# limit the pa to be in (-90,90]
source.pa = pa_limit(source.pa)
# if one of these values are nan then there has been some problem with the WCS handling
if not all(np.isfinite((source.ra, source.dec, source.a, source.b, source.pa))):
src_flags |= flags.WCSERR
# negative degrees is valid for RA, but I don't want them.
if source.ra < 0:
source.ra += 360
source.ra_str = dec2hms(source.ra)
source.dec_str = dec2dms(source.dec)
# calculate integrated flux
source.int_flux = source.peak_flux * sx * sy * CC2FHWM ** 2 * np.pi
# scale Jy/beam -> Jy using the area of the beam
source.int_flux /= global_data.psfhelper.get_beamarea_pix(source.ra, source.dec)
# Calculate errors for params that were fit (as well as int_flux)
errors(source, model, global_data.wcshelper)
source.flags = src_flags
# add psf info
local_beam = global_data.psfhelper.get_beam(source.ra, source.dec)
if local_beam is not None:
source.psf_a = local_beam.a * 3600
source.psf_b = local_beam.b * 3600
source.psf_pa = local_beam.pa
else:
source.psf_a = 0
source.psf_b = 0
source.psf_pa = 0
sources.append(source)
self.log.debug(source)
if global_data.blank:
outerclip = island_data.scalars[1]
idx, idy = np.where(abs(idata) - outerclip * rms > 0)
idx += xmin
idy += ymin
self.global_data.img._pixels[[idx, idy]] = np.nan
# calculate the integrated island flux if required
if island_data.doislandflux:
_, outerclip, _ = island_data.scalars
self.log.debug("Integrated flux for island {0}".format(isle_num))
kappa_sigma = np.where(abs(idata) - outerclip * rms > 0, idata, np.NaN)
self.log.debug("- island shape is {0}".format(kappa_sigma.shape))
source = IslandSource()
source.flags = 0
source.island = isle_num
source.components = j + 1
source.peak_flux = np.nanmax(kappa_sigma)
# check for negative islands
if source.peak_flux < 0:
source.peak_flux = np.nanmin(kappa_sigma)
self.log.debug("- peak flux {0}".format(source.peak_flux))
# positions and background
if np.isfinite(source.peak_flux):
positions = np.where(kappa_sigma == source.peak_flux)
else: # if a component has been refit then it might have flux = np.nan
positions = [[kappa_sigma.shape[0] / 2], [kappa_sigma.shape[1] / 2]]
xy = positions[0][0] + xmin, positions[1][0] + ymin
radec = global_data.wcshelper.pix2sky(xy)
source.ra = radec[0]
# convert negative ra's to positive ones
if source.ra < 0:
source.ra += 360
source.dec = radec[1]
source.ra_str = dec2hms(source.ra)
source.dec_str = dec2dms(source.dec)
source.background = bkg[positions[0][0], positions[1][0]]
source.local_rms = rms[positions[0][0], positions[1][0]]
source.x_width, source.y_width = idata.shape
source.pixels = int(sum(np.isfinite(kappa_sigma).ravel() * 1.0))
source.extent = [xmin, xmax, ymin, ymax]
# TODO: investigate what happens when the sky coords are skewed w.r.t the pixel coords
# calculate the area of the island as a fraction of the area of the bounding box
bl = global_data.wcshelper.pix2sky([xmax, ymin])
tl = global_data.wcshelper.pix2sky([xmax, ymax])
tr = global_data.wcshelper.pix2sky([xmin, ymax])
height = gcd(tl[0], tl[1], bl[0], bl[1])
width = gcd(tl[0], tl[1], tr[0], tr[1])
area = height * width
source.area = area * source.pixels / source.x_width / source.y_width # area is in deg^2
# create contours
msq = MarchingSquares(idata)
source.contour = [(a[0] + xmin, a[1] + ymin) for a in msq.perimeter]
# calculate the maximum angular size of this island, brute force method
source.max_angular_size = 0
for i, pos1 in enumerate(source.contour):
radec1 = global_data.wcshelper.pix2sky(pos1)
for j, pos2 in enumerate(source.contour[i:]):
radec2 = global_data.wcshelper.pix2sky(pos2)
dist = gcd(radec1[0], radec1[1], radec2[0], radec2[1])
if dist > source.max_angular_size:
source.max_angular_size = dist
source.pa = bear(radec1[0], radec1[1], radec2[0], radec2[1])
source.max_angular_size_anchors = [pos1[0], pos1[1], pos2[0], pos2[1]]
self.log.debug("- peak position {0}, {1} [{2},{3}]".format(source.ra_str, source.dec_str, positions[0][0],
positions[1][0]))
# integrated flux
beam_area = global_data.psfhelper.get_beamarea_deg2(source.ra, source.dec) # beam in deg^2
# get_beamarea_pix(source.ra, source.dec) # beam is in pix^2
isize = source.pixels # number of non zero pixels
self.log.debug("- pixels used {0}".format(isize))
source.int_flux = np.nansum(kappa_sigma) # total flux Jy/beam
self.log.debug("- sum of pixles {0}".format(source.int_flux))
source.int_flux *= beam_area # total flux in Jy
self.log.debug("- integrated flux {0}".format(source.int_flux))
eta = erf(np.sqrt(-1 * np.log(abs(source.local_rms * outerclip / source.peak_flux)))) ** 2
self.log.debug("- eta {0}".format(eta))
source.eta = eta
source.beam_area = beam_area
# I don't know how to calculate this error so we'll set it to nan
source.err_int_flux = np.nan
sources.append(source)
return sources | Convert fitting results into a set of components
Parameters
----------
result : lmfit.MinimizerResult
The fitting results.
model : lmfit.Parameters
The model that was fit.
island_data : :class:`AegeanTools.models.IslandFittingData`
Data about the island that was fit.
isflags : int
Flags that should be added to this island (in addition to those within the model)
Returns
-------
sources : list
A list of components, and islands if requested. |
def is_local(self):
"""Returns True if the package is in the local package repository"""
local_repo = package_repository_manager.get_repository(
self.config.local_packages_path)
return (self.resource._repository.uid == local_repo.uid) | Returns True if the package is in the local package repository |
def suspended_updates():
"""
This allows you to postpone updates to all the search indexes inside of a with:
with suspended_updates():
model1.save()
model2.save()
model3.save()
model4.delete()
"""
if getattr(local_storage, "bulk_queue", None) is None:
local_storage.bulk_queue = defaultdict(list)
try:
yield
finally:
for index, items in local_storage.bulk_queue.items():
index.bulk(chain(*items))
local_storage.bulk_queue = None | This allows you to postpone updates to all the search indexes inside of a with:
with suspended_updates():
model1.save()
model2.save()
model3.save()
model4.delete() |
def recv_raw(self, x=MTU):
"""Receives a packet, then returns a tuple containing (cls, pkt_data, time)""" # noqa: E501
pkt, sa_ll = self.ins.recvfrom(x)
if self.outs and sa_ll[2] == socket.PACKET_OUTGOING:
return None, None, None
ts = get_last_packet_timestamp(self.ins)
return self.LL, pkt, ts | Receives a packet, then returns a tuple containing (cls, pkt_data, time) |
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: text
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: text
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=True)
stream = event.iter_events(full=True, auto_reconnect=True)
SaltInfo = event_processor.SaltInfo(handler)
def signal_handler(signal, frame):
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send(
str('data: {0}\n\n').format(salt.utils.json.dumps(data)), # future lint: disable=blacklisted-function
False
)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n%s", data)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle asynchronous push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start() | Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: text
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: text
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``. |
def uniq2orderipix(uniq):
"""
convert a HEALPix pixel coded as a NUNIQ number
to a (norder, ipix) tuple
"""
order = ((np.log2(uniq//4)) // 2)
order = order.astype(int)
ipix = uniq - 4 * (4**order)
return order, ipix | convert a HEALPix pixel coded as a NUNIQ number
to a (norder, ipix) tuple |
def reindex(clear: bool, progressive: bool, batch_size: int):
"""Reindex all content; optionally clear index before.
All is done in asingle transaction by default.
:param clear: clear index content.
:param progressive: don't run in a single transaction.
:param batch_size: number of documents to process before writing to the
index. Unused in single transaction mode. If `None` then
all documents of same content type are written at once.
"""
reindexer = Reindexer(clear, progressive, batch_size)
reindexer.reindex_all() | Reindex all content; optionally clear index before.
All is done in asingle transaction by default.
:param clear: clear index content.
:param progressive: don't run in a single transaction.
:param batch_size: number of documents to process before writing to the
index. Unused in single transaction mode. If `None` then
all documents of same content type are written at once. |
def create(name, url, backend, frequency=None, owner=None, org=None):
'''Create a new harvest source'''
log.info('Creating a new Harvest source "%s"', name)
source = actions.create_source(name, url, backend,
frequency=frequency,
owner=owner,
organization=org)
log.info('''Created a new Harvest source:
name: {0.name},
slug: {0.slug},
url: {0.url},
backend: {0.backend},
frequency: {0.frequency},
owner: {0.owner},
organization: {0.organization}'''.format(source)) | Create a new harvest source |
def create_instance(self, body, project_id=None):
"""
Creates a new Cloud SQL instance.
:param body: Body required by the Cloud SQL insert API, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/insert#request-body.
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None
"""
response = self.get_conn().instances().insert(
project=project_id,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(project_id=project_id,
operation_name=operation_name) | Creates a new Cloud SQL instance.
:param body: Body required by the Cloud SQL insert API, as described in
https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/insert#request-body.
:type body: dict
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:return: None |
def deltran(tree, feature):
"""
DELTRAN (delayed transformation) (Swofford & Maddison, 1987) aims at reducing the number of ambiguities
in the parsimonious result. DELTRAN makes the changes as close as possible to the leaves,
hence prioritizing parallel mutations. DELTRAN is performed after DOWNPASS.
if N is not a root:
P <- parent(N)
if intersection(S(N), S(P)) is not empty:
S(N) <- intersection(S(N), S(P))
if N is not a tip:
L, R <- left and right children of N
DELTRAN(L)
DELTRAN(R)
:param tree: ete3.Tree, the tree of interest
:param feature: str, character for which the parsimonious states are reconstructed
:return: void, modifies get_personalized_feature_name(feature, PARS_STATES) feature of the tree nodes
"""
ps_feature = get_personalized_feature_name(feature, PARS_STATES)
for node in tree.traverse('preorder'):
if not node.is_root():
node_states = getattr(node, ps_feature)
parent_states = getattr(node.up, ps_feature)
state_intersection = node_states & parent_states
if state_intersection:
node.add_feature(ps_feature, state_intersection) | DELTRAN (delayed transformation) (Swofford & Maddison, 1987) aims at reducing the number of ambiguities
in the parsimonious result. DELTRAN makes the changes as close as possible to the leaves,
hence prioritizing parallel mutations. DELTRAN is performed after DOWNPASS.
if N is not a root:
P <- parent(N)
if intersection(S(N), S(P)) is not empty:
S(N) <- intersection(S(N), S(P))
if N is not a tip:
L, R <- left and right children of N
DELTRAN(L)
DELTRAN(R)
:param tree: ete3.Tree, the tree of interest
:param feature: str, character for which the parsimonious states are reconstructed
:return: void, modifies get_personalized_feature_name(feature, PARS_STATES) feature of the tree nodes |
def linkify_h_by_hg(self, hostgroups):
"""Link hosts with hostgroups
:param hostgroups: hostgroups object to link with
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: None
"""
# Register host in the hostgroups
for host in self:
new_hostgroups = []
if hasattr(host, 'hostgroups') and host.hostgroups != []:
hgs = [n.strip() for n in host.hostgroups if n.strip()]
for hg_name in hgs:
# TODO: should an unknown hostgroup raise an error ?
hostgroup = hostgroups.find_by_name(hg_name)
if hostgroup is not None:
new_hostgroups.append(hostgroup.uuid)
else:
err = ("the hostgroup '%s' of the host '%s' is "
"unknown" % (hg_name, host.host_name))
host.add_error(err)
host.hostgroups = new_hostgroups | Link hosts with hostgroups
:param hostgroups: hostgroups object to link with
:type hostgroups: alignak.objects.hostgroup.Hostgroups
:return: None |
def transform_file_output(result):
""" Transform to convert SDK file/dir list output to something that
more clearly distinguishes between files and directories. """
from collections import OrderedDict
new_result = []
iterable = result if isinstance(result, list) else result.get('items', result)
for item in iterable:
new_entry = OrderedDict()
entity_type = item['type'] # type property is added by transform_file_directory_result
is_dir = entity_type == 'dir'
new_entry['Name'] = item['name'] + '/' if is_dir else item['name']
new_entry['Content Length'] = ' ' if is_dir else item['properties']['contentLength']
new_entry['Type'] = item['type']
new_entry['Last Modified'] = item['properties']['lastModified'] or ' '
new_result.append(new_entry)
return sorted(new_result, key=lambda k: k['Name']) | Transform to convert SDK file/dir list output to something that
more clearly distinguishes between files and directories. |
def get_display(display):
"""dname, protocol, host, dno, screen = get_display(display)
Parse DISPLAY into its components. If DISPLAY is None, use
the default display. The return values are:
DNAME -- the full display name (string)
PROTOCOL -- the protocol to use (None if automatic)
HOST -- the host name (string, possibly empty)
DNO -- display number (integer)
SCREEN -- default screen number (integer)
"""
modname = _display_mods.get(platform, _default_display_mod)
mod = _relative_import(modname)
return mod.get_display(display) | dname, protocol, host, dno, screen = get_display(display)
Parse DISPLAY into its components. If DISPLAY is None, use
the default display. The return values are:
DNAME -- the full display name (string)
PROTOCOL -- the protocol to use (None if automatic)
HOST -- the host name (string, possibly empty)
DNO -- display number (integer)
SCREEN -- default screen number (integer) |
def get_pstats(pstatfile, n):
"""
Return profiling information as an RST table.
:param pstatfile: path to a .pstat file
:param n: the maximum number of stats to retrieve
"""
with tempfile.TemporaryFile(mode='w+') as stream:
ps = pstats.Stats(pstatfile, stream=stream)
ps.sort_stats('cumtime')
ps.print_stats(n)
stream.seek(0)
lines = list(stream)
for i, line in enumerate(lines):
if line.startswith(' ncalls'):
break
data = []
for line in lines[i + 2:]:
columns = line.split()
if len(columns) == 6:
data.append(PStatData(*columns))
rows = [(rec.ncalls, rec.cumtime, rec.path) for rec in data]
# here is an example of the expected output table:
# ====== ======= ========================================================
# ncalls cumtime path
# ====== ======= ========================================================
# 1 33.502 commands/run.py:77(_run)
# 1 33.483 calculators/base.py:110(run)
# 1 25.166 calculators/classical.py:115(execute)
# 1 25.104 baselib.parallel.py:249(apply_reduce)
# 1 25.099 calculators/classical.py:41(classical)
# 1 25.099 hazardlib/calc/hazard_curve.py:164(classical)
return views.rst_table(rows, header='ncalls cumtime path'.split()) | Return profiling information as an RST table.
:param pstatfile: path to a .pstat file
:param n: the maximum number of stats to retrieve |
def authenticate_with_certificate(reactor, base_url, client_cert, client_key, ca_cert):
"""
See ``authenticate_with_certificate_chain``.
:param pem.Certificate client_cert: The client certificate to use.
"""
return authenticate_with_certificate_chain(
reactor, base_url, [client_cert], client_key, ca_cert,
) | See ``authenticate_with_certificate_chain``.
:param pem.Certificate client_cert: The client certificate to use. |
def sio(mag_file, dir_path=".", input_dir_path="",
meas_file="measurements.txt", spec_file="specimens.txt",
samp_file="samples.txt", site_file="sites.txt", loc_file="locations.txt",
samp_infile="", institution="", syn=False, syntype="", instrument="",
labfield=0, phi=0, theta=0, peakfield=0,
specnum=0, samp_con='1', location="unknown", lat="", lon="",
noave=False, codelist="", cooling_rates="", coil='', timezone="UTC",
user=""):
"""
converts Scripps Institution of Oceanography measurement files to MagIC data base model 3.0
Parameters
_________
magfile : input measurement file
dir_path : output directory path, default "."
input_dir_path : input file directory IF different from dir_path, default ""
meas_file : output file measurement file name, default "measurements.txt"
spec_file : output file specimen file name, default "specimens.txt"
samp_file : output file sample file name, default "samples.tt"
site_file : output file site file name, default "sites.txt"
loc_file : output file location file name, default "locations.txt"
samp_infile : output file to append to, default ""
syn : if True, this is a synthetic specimen, default False
syntype : sample material type, default ""
instrument : instrument on which the measurements were made (e.g., "SIO-2G"), default ""
labfield : lab field in microtesla for TRM, default 0
phi, theta : direction of lab field [-1,-1 for anisotropy experiments], default 0, 0
peakfield : peak af field in mT for ARM, default 0
specnum : number of terminal characters distinguishing specimen from sample, default 0
samp_con : sample/site naming convention, default '1'
"1" XXXXY: where XXXX is an arbitr[ary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
"2" XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
"3" XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
"4-Z" XXXX[YYY]: YYY is sample designation with Z characters from site XXX
"5" site name same as sample
"6" site is entered under a separate column NOT CURRENTLY SUPPORTED
"7-Z" [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
"8" synthetic - has no site name
"9" ODP naming convention
location : location name for study, default "unknown"
lat : latitude of sites, default ""
lon : longitude of sites, default ""
noave : boolean, if False, average replicates, default False
codelist : colon delimited string of lab protocols (e.g., codelist="AF"), default ""
AF: af demag
T: thermal including thellier but not trm acquisition
S: Shaw method
I: IRM (acquisition)
N: NRM only
TRM: trm acquisition
ANI: anisotropy experiment
D: double AF demag
G: triple AF demag (GRM protocol)
CR: cooling rate experiment.
The treatment coding of the measurement file should be: XXX.00,XXX.10, XXX.20 ...XX.70 etc. (XXX.00 is optional)
where XXX in the temperature and .10,.20... are running numbers of the cooling rates steps.
XXX.00 is optional zerofield baseline. XXX.70 is alteration check.
syntax in sio_magic is: -LP CR xxx,yyy,zzz,..... xxx -A
where xxx, yyy, zzz...xxx are cooling time in [K/minutes], seperated by comma, ordered at the same order as XXX.10,XXX.20 ...XX.70
if you use a zerofield step then no need to specify the cooling rate for the zerofield
It is important to add to the command line the -A option so the measurements will not be averaged.
But users need to make sure that there are no duplicate measurements in the file
cooling_rates : cooling rate in K/sec for cooling rate dependence studies (K/minutes)
in comma separated list for each cooling rate (e.g., "43.6,1.3,43.6")
coil : 1,2, or 3 unist of IRM field in volts using ASC coil #1,2 or 3
the fast and slow experiments in comma separated string (e.g., fast: 43.6 K/min, slow: 1.3 K/min)
timezone : timezone of date/time string in comment string, default "UTC"
user : analyst, default ""
Effects
_______
creates MagIC formatted tables
"""
# initialize some stuff
methcode = "LP-NO"
pTRM, MD = 0, 0
dec = [315, 225, 180, 135, 45, 90, 270, 270, 270, 90, 180, 180, 0, 0, 0]
inc = [0, 0, 0, 0, 0, -45, -45, 0, 45, 45, 45, -45, -90, -45, 45]
tdec = [0, 90, 0, 180, 270, 0, 0, 90, 0]
tinc = [0, 0, 90, 0, 0, -90, 0, 0, 90]
missing = 1
demag = "N"
citations = 'This study'
fmt = 'old'
Samps = []
trm = 0
irm = 0
# get args
input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path)
# measurement outfile
meas_file = pmag.resolve_file_name(meas_file, output_dir_path)
spec_file = pmag.resolve_file_name(spec_file, output_dir_path)
samp_file = pmag.resolve_file_name(samp_file, output_dir_path)
site_file = pmag.resolve_file_name(site_file, output_dir_path)
loc_file = pmag.resolve_file_name(loc_file, output_dir_path)
mag_file = pmag.resolve_file_name(mag_file, input_dir_path)
labfield = float(labfield) * 1e-6
phi = float(phi)
theta = float(theta)
peakfield = float(peakfield) * 1e-3
specnum = int(specnum)
samp_con = str(samp_con)
# make sure all initial values are correctly set up (whether they come from the command line or a GUI)
if samp_infile:
Samps, file_type = pmag.magic_read(samp_infile)
if coil:
coil = str(coil)
methcode = "LP-IRM"
irmunits = "V"
if coil not in ["1", "2", "3"]:
print(__doc__)
print('not a valid coil specification')
return False, '{} is not a valid coil specification'.format(coil)
if mag_file:
lines = pmag.open_file(mag_file)
if not lines:
print("you must provide a valid mag_file")
return False, "you must provide a valid mag_file"
if not mag_file:
print(__doc__)
print("mag_file field is required option")
return False, "mag_file field is required option"
if specnum != 0:
specnum = -specnum
if "4" == samp_con[0]:
if "-" not in samp_con:
print(
"naming convention option [4] must be in form 4-Z where Z is an integer")
print('---------------')
return False, "naming convention option [4] must be in form 4-Z where Z is an integer"
else:
Z = samp_con.split("-")[1]
samp_con = "4"
if "7" == samp_con[0]:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "option [7] must be in form 7-Z where Z is an integer"
else:
Z = samp_con.split("-")[1]
samp_con = "7"
else:
Z = 0
if codelist:
codes = codelist.split(':')
if "AF" in codes:
demag = 'AF'
if'-dc' not in sys.argv:
methcode = "LT-AF-Z"
if'-dc' in sys.argv:
methcode = "LT-AF-I"
if "T" in codes:
demag = "T"
if '-dc' not in sys.argv:
methcode = "LT-T-Z"
if '-dc' in sys.argv:
methcode = "LT-T-I"
if "I" in codes:
methcode = "LP-IRM"
irmunits = "mT"
if "I3d" in codes:
methcode = "LT-T-Z:LP-IRM-3D"
if "S" in codes:
demag = "S"
methcode = "LP-PI-TRM:LP-PI-ALT-AFARM"
trm_labfield = labfield
ans = input("DC lab field for ARM step: [50uT] ")
if ans == "":
arm_labfield = 50e-6
else:
arm_labfield = float(ans)*1e-6
ans = input("temperature for total trm step: [600 C] ")
if ans == "":
trm_peakT = 600+273 # convert to kelvin
else:
trm_peakT = float(ans)+273 # convert to kelvin
if "G" in codes:
methcode = "LT-AF-G"
if "D" in codes:
methcode = "LT-AF-D"
if "TRM" in codes:
demag = "T"
trm = 1
if "CR" in codes:
demag = "T"
cooling_rate_experiment = 1
# command_line does not exist in this code
cooling_rates_list = cooling_rates.split(',')
# if command_line:
# ind=sys.argv.index("CR")
# cooling_rates=sys.argv[ind+1]
# cooling_rates_list=cooling_rates.split(',')
# else:
# cooling_rates_list=str(cooling_rates).split(',')
if demag == "T" and "ANI" in codes:
methcode = "LP-AN-TRM"
if demag == "T" and "CR" in codes:
methcode = "LP-CR-TRM"
if demag == "AF" and "ANI" in codes:
methcode = "LP-AN-ARM"
if labfield == 0:
labfield = 50e-6
if peakfield == 0:
peakfield = .180
MeasRecs, SpecRecs, SampRecs, SiteRecs, LocRecs = [], [], [], [], []
version_num = pmag.get_version()
##################################
for line in lines:
instcode = ""
if len(line) > 2:
MeasRec, SpecRec, SampRec, SiteRec, LocRec = {}, {}, {}, {}, {}
MeasRec['software_packages'] = version_num
MeasRec["description"] = ""
MeasRec["treat_temp"] = '%8.3e' % (273) # room temp in kelvin
MeasRec["meas_temp"] = '%8.3e' % (273) # room temp in kelvin
MeasRec["treat_ac_field"] = '0'
MeasRec["treat_dc_field"] = '0'
MeasRec["treat_dc_field_phi"] = '0'
MeasRec["treat_dc_field_theta"] = '0'
meas_type = "LT-NO"
rec = line.split()
try:
float(rec[0])
print("No specimen name for line #%d in the measurement file" %
lines.index(line))
continue
except ValueError:
pass
if rec[1] == ".00":
rec[1] = "0.00"
treat = rec[1].split('.')
if methcode == "LP-IRM":
if irmunits == 'mT':
labfield = float(treat[0])*1e-3
else:
labfield = pmag.getfield(irmunits, coil, treat[0])
if rec[1][0] != "-":
phi, theta = 0., 90.
else:
phi, theta = 0., -90.
meas_type = "LT-IRM"
MeasRec["treat_dc_field"] = '%8.3e' % (labfield)
MeasRec["treat_dc_field_phi"] = '%7.1f' % (phi)
MeasRec["treat_dc_field_theta"] = '%7.1f' % (theta)
if len(rec) > 6:
# break e.g., 10/15/02;7:45 indo date and time
code1 = rec[6].split(';')
if len(code1) == 2: # old format with AM/PM
missing = 0
code2 = code1[0].split('/') # break date into mon/day/year
# break e.g., AM;C34;200 into time;instr/axes/measuring pos;number of measurements
code3 = rec[7].split(';')
yy = int(code2[2])
if yy < 90:
yyyy = str(2000+yy)
else:
yyyy = str(1900+yy)
mm = int(code2[0])
if mm < 10:
mm = "0"+str(mm)
else:
mm = str(mm)
dd = int(code2[1])
if dd < 10:
dd = "0"+str(dd)
else:
dd = str(dd)
time = code1[1].split(':')
hh = int(time[0])
if code3[0] == "PM":
hh = hh+12
if hh < 10:
hh = "0"+str(hh)
else:
hh = str(hh)
min = int(time[1])
if min < 10:
min = "0"+str(min)
else:
min = str(min)
dt = yyyy+":"+mm+":"+dd+":"+hh+":"+min+":00"
local = pytz.timezone(timezone)
naive = datetime.datetime.strptime(dt, "%Y:%m:%d:%H:%M:%S")
local_dt = local.localize(naive, is_dst=None)
utc_dt = local_dt.astimezone(pytz.utc)
MeasRec["timestamp"] = utc_dt.strftime(
"%Y-%m-%dT%H:%M:%S")+"Z"
if instrument == "":
if code3[1][0] == 'C':
instcode = 'SIO-bubba'
if code3[1][0] == 'G':
instcode = 'SIO-flo'
else:
instcode = ''
MeasRec["meas_n_orient"] = code3[1][2]
elif len(code1) > 2: # newest format (cryo7 or later)
if "LP-AN-ARM" not in methcode:
labfield = 0
fmt = 'new'
date = code1[0].split('/') # break date into mon/day/year
yy = int(date[2])
if yy < 90:
yyyy = str(2000+yy)
else:
yyyy = str(1900+yy)
mm = int(date[0])
if mm < 10:
mm = "0"+str(mm)
else:
mm = str(mm)
dd = int(date[1])
if dd < 10:
dd = "0"+str(dd)
else:
dd = str(dd)
time = code1[1].split(':')
hh = int(time[0])
if hh < 10:
hh = "0"+str(hh)
else:
hh = str(hh)
min = int(time[1])
if min < 10:
min = "0"+str(min)
else:
min = str(min)
dt = yyyy+":"+mm+":"+dd+":"+hh+":"+min+":00"
local = pytz.timezone(timezone)
naive = datetime.datetime.strptime(dt, "%Y:%m:%d:%H:%M:%S")
local_dt = local.localize(naive, is_dst=None)
utc_dt = local_dt.astimezone(pytz.utc)
MeasRec["timestamp"] = utc_dt.strftime(
"%Y-%m-%dT%H:%M:%S")+"Z"
if instrument == "":
if code1[6][0] == 'C':
instcode = 'SIO-bubba'
if code1[6][0] == 'G':
instcode = 'SIO-flo'
else:
instcode = ''
if len(code1) > 1:
MeasRec["meas_n_orient"] = code1[6][2]
else:
# takes care of awkward format with bubba and flo being different
MeasRec["meas_n_orient"] = code1[7]
if user == "":
user = code1[5]
if code1[2][-1].upper() == 'C':
demag = "T"
if code1[4] == 'microT' and float(code1[3]) != 0. and "LP-AN-ARM" not in methcode:
labfield = float(code1[3])*1e-6
if code1[2] == 'mT' and methcode != "LP-IRM":
demag = "AF"
if code1[4] == 'microT' and float(code1[3]) != 0.:
labfield = float(code1[3])*1e-6
if code1[4] == 'microT' and labfield != 0. and meas_type != "LT-IRM":
phi, theta = 0., -90.
if demag == "T":
meas_type = "LT-T-I"
if demag == "AF":
meas_type = "LT-AF-I"
MeasRec["treat_dc_field"] = '%8.3e' % (labfield)
MeasRec["treat_dc_field_phi"] = '%7.1f' % (phi)
MeasRec["treat_dc_field_theta"] = '%7.1f' % (theta)
if code1[4] == '' or labfield == 0. and meas_type != "LT-IRM":
if demag == 'T':
meas_type = "LT-T-Z"
if demag == "AF":
meas_type = "LT-AF-Z"
MeasRec["treat_dc_field"] = '0'
if not syn:
specimen = rec[0]
MeasRec["specimen"] = specimen
if specnum != 0:
sample = rec[0][:specnum]
else:
sample = rec[0]
if samp_infile and Samps: # if samp_infile was provided AND yielded sample data
samp = pmag.get_dictitem(Samps, 'sample', sample, 'T')
if len(samp) > 0:
location = samp[0]["location"]
site = samp[0]["site"]
else:
location = ''
site = ''
else:
site = pmag.parse_site(sample, samp_con, Z)
if location != '' and location not in [x['location'] if 'location' in list(x.keys()) else '' for x in LocRecs]:
LocRec['location'] = location
LocRec['lat_n'] = lat
LocRec['lat_s'] = lat
LocRec['lon_e'] = lon
LocRec['lon_w'] = lon
LocRecs.append(LocRec)
if site != '' and site not in [x['site'] if 'site' in list(x.keys()) else '' for x in SiteRecs]:
SiteRec['location'] = location
SiteRec['site'] = site
SiteRec['lat'] = lat
SiteRec['lon'] = lon
SiteRecs.append(SiteRec)
if sample != '' and sample not in [x['sample'] if 'sample' in list(x.keys()) else '' for x in SampRecs]:
SampRec['site'] = site
SampRec['sample'] = sample
SampRecs.append(SampRec)
if specimen != '' and specimen not in [x['specimen'] if 'specimen' in list(x.keys()) else '' for x in SpecRecs]:
SpecRec["specimen"] = specimen
SpecRec['sample'] = sample
SpecRecs.append(SpecRec)
else:
specimen = rec[0]
MeasRec["specimen"] = specimen
if specnum != 0:
sample = rec[0][:specnum]
else:
sample = rec[0]
site = pmag.parse_site(sample, samp_con, Z)
if location != '' and location not in [x['location'] if 'location' in list(x.keys()) else '' for x in LocRecs]:
LocRec['location'] = location
LocRec['lat_n'] = lat
LocRec['lat_s'] = lat
LocRec['lon_e'] = lon
LocRec['lon_w'] = lon
LocRecs.append(LocRec)
if site != '' and site not in [x['site'] if 'site' in list(x.keys()) else '' for x in SiteRecs]:
SiteRec['location'] = location
SiteRec['site'] = site
SiteRec['lat'] = lat
SiteRec['lon'] = lon
SiteRecs.append(SiteRec)
if sample != '' and sample not in [x['sample'] if 'sample' in list(x.keys()) else '' for x in SampRecs]:
SampRec['site'] = site
SampRec['sample'] = sample
SampRecs.append(SampRec)
if specimen != '' and specimen not in [x['specimen'] if 'specimen' in list(x.keys()) else '' for x in SpecRecs]:
SpecRec["specimen"] = specimen
SpecRec['sample'] = sample
SpecRecs.append(SpecRec)
SampRec["institution"] = institution
SampRec["material_type"] = syntype
# MeasRec["sample"]=sample
if float(rec[1]) == 0:
pass
elif demag == "AF":
if methcode != "LP-AN-ARM":
MeasRec["treat_ac_field"] = '%8.3e' % (
float(rec[1])*1e-3) # peak field in tesla
if meas_type == "LT-AF-Z":
MeasRec["treat_dc_field"] = '0'
else: # AARM experiment
if treat[1][0] == '0':
meas_type = "LT-AF-Z:LP-AN-ARM:"
MeasRec["treat_ac_field"] = '%8.3e' % (
peakfield) # peak field in tesla
MeasRec["treat_dc_field"] = '%8.3e' % (0)
if labfield != 0 and methcode != "LP-AN-ARM":
print(
"Warning - inconsistency in mag file with lab field - overriding file with 0")
else:
meas_type = "LT-AF-I:LP-AN-ARM"
ipos = int(treat[0])-1
MeasRec["treat_dc_field_phi"] = '%7.1f' % (dec[ipos])
MeasRec["treat_dc_field_theta"] = '%7.1f' % (inc[ipos])
MeasRec["treat_dc_field"] = '%8.3e' % (labfield)
MeasRec["treat_ac_field"] = '%8.3e' % (
peakfield) # peak field in tesla
elif demag == "T" and methcode == "LP-AN-TRM":
MeasRec["treat_temp"] = '%8.3e' % (
float(treat[0])+273.) # temp in kelvin
if treat[1][0] == '0':
meas_type = "LT-T-Z:LP-AN-TRM"
MeasRec["treat_dc_field"] = '%8.3e' % (0)
MeasRec["treat_dc_field_phi"] = '0'
MeasRec["treat_dc_field_theta"] = '0'
else:
MeasRec["treat_dc_field"] = '%8.3e' % (labfield)
if treat[1][0] == '7': # alteration check as final measurement
meas_type = "LT-PTRM-I:LP-AN-TRM"
else:
meas_type = "LT-T-I:LP-AN-TRM"
# find the direction of the lab field in two ways:
# (1) using the treatment coding (XX.1=+x, XX.2=+y, XX.3=+z, XX.4=-x, XX.5=-y, XX.6=-z)
ipos_code = int(treat[1][0])-1
# (2) using the magnetization
DEC = float(rec[4])
INC = float(rec[5])
if INC < 45 and INC > -45:
if DEC > 315 or DEC < 45:
ipos_guess = 0
if DEC > 45 and DEC < 135:
ipos_guess = 1
if DEC > 135 and DEC < 225:
ipos_guess = 3
if DEC > 225 and DEC < 315:
ipos_guess = 4
else:
if INC > 45:
ipos_guess = 2
if INC < -45:
ipos_guess = 5
# prefer the guess over the code
ipos = ipos_guess
MeasRec["treat_dc_field_phi"] = '%7.1f' % (tdec[ipos])
MeasRec["treat_dc_field_theta"] = '%7.1f' % (tinc[ipos])
# check it
if ipos_guess != ipos_code and treat[1][0] != '7':
print("-E- ERROR: check specimen %s step %s, ATRM measurements, coding does not match the direction of the lab field!" %
(rec[0], ".".join(list(treat))))
elif demag == "S": # Shaw experiment
if treat[1][1] == '0':
if int(treat[0]) != 0:
MeasRec["treat_ac_field"] = '%8.3e' % (
float(treat[0])*1e-3) # AF field in tesla
MeasRec["treat_dc_field"] = '0'
meas_type = "LT-AF-Z" # first AF
else:
meas_type = "LT-NO"
MeasRec["treat_ac_field"] = '0'
MeasRec["treat_dc_field"] = '0'
elif treat[1][1] == '1':
if int(treat[0]) == 0:
MeasRec["treat_ac_field"] = '%8.3e' % (
peakfield) # peak field in tesla
MeasRec["treat_dc_field"] = '%8.3e' % (arm_labfield)
MeasRec["treat_dc_field_phi"] = '%7.1f' % (phi)
MeasRec["treat_dc_field_theta"] = '%7.1f' % (theta)
meas_type = "LT-AF-I"
else:
MeasRec["treat_ac_field"] = '%8.3e' % (
float(treat[0])*1e-3) # AF field in tesla
MeasRec["treat_dc_field"] = '0'
meas_type = "LT-AF-Z"
elif treat[1][1] == '2':
if int(treat[0]) == 0:
MeasRec["treat_ac_field"] = '0'
MeasRec["treat_dc_field"] = '%8.3e' % (trm_labfield)
MeasRec["treat_dc_field_phi"] = '%7.1f' % (phi)
MeasRec["treat_dc_field_theta"] = '%7.1f' % (theta)
MeasRec["treat_temp"] = '%8.3e' % (trm_peakT)
meas_type = "LT-T-I"
else:
MeasRec["treat_ac_field"] = '%8.3e' % (
float(treat[0])*1e-3) # AF field in tesla
MeasRec["treat_dc_field"] = '0'
meas_type = "LT-AF-Z"
elif treat[1][1] == '3':
if int(treat[0]) == 0:
MeasRec["treat_ac_field"] = '%8.3e' % (
peakfield) # peak field in tesla
MeasRec["treat_dc_field"] = '%8.3e' % (arm_labfield)
MeasRec["treat_dc_field_phi"] = '%7.1f' % (phi)
MeasRec["treat_dc_field_theta"] = '%7.1f' % (theta)
meas_type = "LT-AF-I"
else:
MeasRec["treat_ac_field"] = '%8.3e' % (
float(treat[0])*1e-3) # AF field in tesla
MeasRec["treat_dc_field"] = '0'
meas_type = "LT-AF-Z"
# Cooling rate experient # added by rshaar
elif demag == "T" and methcode == "LP-CR-TRM":
MeasRec["treat_temp"] = '%8.3e' % (
float(treat[0])+273.) # temp in kelvin
if treat[1][0] == '0':
meas_type = "LT-T-Z:LP-CR-TRM"
MeasRec["treat_dc_field"] = '%8.3e' % (0)
MeasRec["treat_dc_field_phi"] = '0'
MeasRec["treat_dc_field_theta"] = '0'
else:
MeasRec["treat_dc_field"] = '%8.3e' % (labfield)
if treat[1][0] == '7': # alteration check as final measurement
meas_type = "LT-PTRM-I:LP-CR-TRM"
else:
meas_type = "LT-T-I:LP-CR-TRM"
MeasRec["treat_dc_field_phi"] = '%7.1f' % (
phi) # labfield phi
MeasRec["treat_dc_field_theta"] = '%7.1f' % (
theta) # labfield theta
indx = int(treat[1][0])-1
# alteration check matjed as 0.7 in the measurement file
if indx == 6:
cooling_time = cooling_rates_list[-1]
else:
cooling_time = cooling_rates_list[indx]
MeasRec["description"] = "cooling_rate" + \
":"+cooling_time+":"+"K/min"
noave = 1
elif demag != 'N':
if len(treat) == 1:
treat.append('0')
MeasRec["treat_temp"] = '%8.3e' % (
float(treat[0])+273.) # temp in kelvin
if trm == 0: # demag=T and not trmaq
if treat[1][0] == '0':
meas_type = "LT-T-Z"
else:
# labfield in tesla (convert from microT)
MeasRec["treat_dc_field"] = '%8.3e' % (labfield)
MeasRec["treat_dc_field_phi"] = '%7.1f' % (
phi) # labfield phi
MeasRec["treat_dc_field_theta"] = '%7.1f' % (
theta) # labfield theta
if treat[1][0] == '1':
meas_type = "LT-T-I" # in-field thermal step
if treat[1][0] == '2':
meas_type = "LT-PTRM-I" # pTRM check
pTRM = 1
if treat[1][0] == '3':
# this is a zero field step
MeasRec["treat_dc_field"] = '0'
meas_type = "LT-PTRM-MD" # pTRM tail check
else:
labfield = float(treat[1])*1e-6
# labfield in tesla (convert from microT)
MeasRec["treat_dc_field"] = '%8.3e' % (labfield)
MeasRec["treat_dc_field_phi"] = '%7.1f' % (
phi) # labfield phi
MeasRec["treat_dc_field_theta"] = '%7.1f' % (
theta) # labfield theta
meas_type = "LT-T-I:LP-TRM" # trm acquisition experiment
MeasRec["dir_csd"] = rec[2]
MeasRec["magn_moment"] = '%10.3e' % (
float(rec[3])*1e-3) # moment in Am^2 (from emu)
MeasRec["dir_dec"] = rec[4]
MeasRec["dir_inc"] = rec[5]
MeasRec["instrument_codes"] = instcode
MeasRec["analysts"] = user
MeasRec["citations"] = citations
if "LP-IRM-3D" in methcode:
meas_type = methcode
# MeasRec["method_codes"]=methcode.strip(':')
MeasRec["method_codes"] = meas_type
MeasRec["quality"] = 'g'
if 'std' in rec[0]:
MeasRec["standard"] = 's'
else:
MeasRec["standard"] = 'u'
MeasRec["treat_step_num"] = 0
# print MeasRec['treat_temp']
MeasRecs.append(MeasRec)
con = cb.Contribution(output_dir_path, read_tables=[])
# create MagIC tables
con.add_magic_table_from_data(dtype='specimens', data=SpecRecs)
con.add_magic_table_from_data(dtype='samples', data=SampRecs)
con.add_magic_table_from_data(dtype='sites', data=SiteRecs)
con.add_magic_table_from_data(dtype='locations', data=LocRecs)
MeasOuts = pmag.measurements_methods3(MeasRecs, noave)
con.add_magic_table_from_data(dtype='measurements', data=MeasOuts)
# write MagIC tables to file
con.tables['specimens'].write_magic_file(custom_name=spec_file,dir_path=dir_path)
con.tables['samples'].write_magic_file(custom_name=samp_file,dir_path=dir_path)
con.tables['sites'].write_magic_file(custom_name=site_file,dir_path=dir_path)
con.tables['locations'].write_magic_file(custom_name=loc_file,dir_path=dir_path)
meas_file = con.tables['measurements'].write_magic_file(
custom_name=meas_file,dir_path=dir_path)
return True, meas_file | converts Scripps Institution of Oceanography measurement files to MagIC data base model 3.0
Parameters
_________
magfile : input measurement file
dir_path : output directory path, default "."
input_dir_path : input file directory IF different from dir_path, default ""
meas_file : output file measurement file name, default "measurements.txt"
spec_file : output file specimen file name, default "specimens.txt"
samp_file : output file sample file name, default "samples.tt"
site_file : output file site file name, default "sites.txt"
loc_file : output file location file name, default "locations.txt"
samp_infile : output file to append to, default ""
syn : if True, this is a synthetic specimen, default False
syntype : sample material type, default ""
instrument : instrument on which the measurements were made (e.g., "SIO-2G"), default ""
labfield : lab field in microtesla for TRM, default 0
phi, theta : direction of lab field [-1,-1 for anisotropy experiments], default 0, 0
peakfield : peak af field in mT for ARM, default 0
specnum : number of terminal characters distinguishing specimen from sample, default 0
samp_con : sample/site naming convention, default '1'
"1" XXXXY: where XXXX is an arbitr[ary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
"2" XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
"3" XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
"4-Z" XXXX[YYY]: YYY is sample designation with Z characters from site XXX
"5" site name same as sample
"6" site is entered under a separate column NOT CURRENTLY SUPPORTED
"7-Z" [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail [email protected] for help.
"8" synthetic - has no site name
"9" ODP naming convention
location : location name for study, default "unknown"
lat : latitude of sites, default ""
lon : longitude of sites, default ""
noave : boolean, if False, average replicates, default False
codelist : colon delimited string of lab protocols (e.g., codelist="AF"), default ""
AF: af demag
T: thermal including thellier but not trm acquisition
S: Shaw method
I: IRM (acquisition)
N: NRM only
TRM: trm acquisition
ANI: anisotropy experiment
D: double AF demag
G: triple AF demag (GRM protocol)
CR: cooling rate experiment.
The treatment coding of the measurement file should be: XXX.00,XXX.10, XXX.20 ...XX.70 etc. (XXX.00 is optional)
where XXX in the temperature and .10,.20... are running numbers of the cooling rates steps.
XXX.00 is optional zerofield baseline. XXX.70 is alteration check.
syntax in sio_magic is: -LP CR xxx,yyy,zzz,..... xxx -A
where xxx, yyy, zzz...xxx are cooling time in [K/minutes], seperated by comma, ordered at the same order as XXX.10,XXX.20 ...XX.70
if you use a zerofield step then no need to specify the cooling rate for the zerofield
It is important to add to the command line the -A option so the measurements will not be averaged.
But users need to make sure that there are no duplicate measurements in the file
cooling_rates : cooling rate in K/sec for cooling rate dependence studies (K/minutes)
in comma separated list for each cooling rate (e.g., "43.6,1.3,43.6")
coil : 1,2, or 3 unist of IRM field in volts using ASC coil #1,2 or 3
the fast and slow experiments in comma separated string (e.g., fast: 43.6 K/min, slow: 1.3 K/min)
timezone : timezone of date/time string in comment string, default "UTC"
user : analyst, default ""
Effects
_______
creates MagIC formatted tables |
def get_timerange_formatted(self, now):
"""
Return two ISO8601 formatted date strings, one for timeMin, the other for timeMax (to be consumed by get_events)
"""
later = now + datetime.timedelta(days=self.days)
return now.isoformat(), later.isoformat() | Return two ISO8601 formatted date strings, one for timeMin, the other for timeMax (to be consumed by get_events) |
def list_traces(
self,
project_id,
view=None,
page_size=None,
start_time=None,
end_time=None,
filter_=None,
order_by=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Returns of a list of traces that match the specified filter conditions.
Example:
>>> from google.cloud import trace_v1
>>>
>>> client = trace_v1.TraceServiceClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # Iterate over all results
>>> for element in client.list_traces(project_id):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_traces(project_id).pages:
... for element in page:
... # process element
... pass
Args:
project_id (str): ID of the Cloud project where the trace data is stored.
view (~google.cloud.trace_v1.types.ViewType): Type of data returned for traces in the list. Optional. Default is
``MINIMAL``.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
start_time (Union[dict, ~google.cloud.trace_v1.types.Timestamp]): Start of the time interval (inclusive) during which the trace data was
collected from the application.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v1.types.Timestamp`
end_time (Union[dict, ~google.cloud.trace_v1.types.Timestamp]): End of the time interval (inclusive) during which the trace data was
collected from the application.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v1.types.Timestamp`
filter_ (str): An optional filter against labels for the request.
By default, searches use prefix matching. To specify exact match,
prepend a plus symbol (``+``) to the search term. Multiple terms are
ANDed. Syntax:
- ``root:NAME_PREFIX`` or ``NAME_PREFIX``: Return traces where any root
span starts with ``NAME_PREFIX``.
- ``+root:NAME`` or ``+NAME``: Return traces where any root span's name
is exactly ``NAME``.
- ``span:NAME_PREFIX``: Return traces where any span starts with
``NAME_PREFIX``.
- ``+span:NAME``: Return traces where any span's name is exactly
``NAME``.
- ``latency:DURATION``: Return traces whose overall latency is greater
or equal to than ``DURATION``. Accepted units are nanoseconds
(``ns``), milliseconds (``ms``), and seconds (``s``). Default is
``ms``. For example, ``latency:24ms`` returns traces whose overall
latency is greater than or equal to 24 milliseconds.
- ``label:LABEL_KEY``: Return all traces containing the specified label
key (exact match, case-sensitive) regardless of the key:value pair's
value (including empty values).
- ``LABEL_KEY:VALUE_PREFIX``: Return all traces containing the
specified label key (exact match, case-sensitive) whose value starts
with ``VALUE_PREFIX``. Both a key and a value must be specified.
- ``+LABEL_KEY:VALUE``: Return all traces containing a key:value pair
exactly matching the specified text. Both a key and a value must be
specified.
- ``method:VALUE``: Equivalent to ``/http/method:VALUE``.
- ``url:VALUE``: Equivalent to ``/http/url:VALUE``.
order_by (str): Field used to sort the returned traces. Optional. Can be one of the
following:
- ``trace_id``
- ``name`` (``name`` field of root span in the trace)
- ``duration`` (difference between ``end_time`` and ``start_time``
fields of the root span)
- ``start`` (``start_time`` field of the root span)
Descending order can be specified by appending ``desc`` to the sort
field (for example, ``name desc``).
Only one sort field is permitted.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.trace_v1.types.Trace` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_traces" not in self._inner_api_calls:
self._inner_api_calls[
"list_traces"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_traces,
default_retry=self._method_configs["ListTraces"].retry,
default_timeout=self._method_configs["ListTraces"].timeout,
client_info=self._client_info,
)
request = trace_pb2.ListTracesRequest(
project_id=project_id,
view=view,
page_size=page_size,
start_time=start_time,
end_time=end_time,
filter=filter_,
order_by=order_by,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("project_id", project_id)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_traces"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="traces",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator | Returns of a list of traces that match the specified filter conditions.
Example:
>>> from google.cloud import trace_v1
>>>
>>> client = trace_v1.TraceServiceClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # Iterate over all results
>>> for element in client.list_traces(project_id):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_traces(project_id).pages:
... for element in page:
... # process element
... pass
Args:
project_id (str): ID of the Cloud project where the trace data is stored.
view (~google.cloud.trace_v1.types.ViewType): Type of data returned for traces in the list. Optional. Default is
``MINIMAL``.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
start_time (Union[dict, ~google.cloud.trace_v1.types.Timestamp]): Start of the time interval (inclusive) during which the trace data was
collected from the application.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v1.types.Timestamp`
end_time (Union[dict, ~google.cloud.trace_v1.types.Timestamp]): End of the time interval (inclusive) during which the trace data was
collected from the application.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v1.types.Timestamp`
filter_ (str): An optional filter against labels for the request.
By default, searches use prefix matching. To specify exact match,
prepend a plus symbol (``+``) to the search term. Multiple terms are
ANDed. Syntax:
- ``root:NAME_PREFIX`` or ``NAME_PREFIX``: Return traces where any root
span starts with ``NAME_PREFIX``.
- ``+root:NAME`` or ``+NAME``: Return traces where any root span's name
is exactly ``NAME``.
- ``span:NAME_PREFIX``: Return traces where any span starts with
``NAME_PREFIX``.
- ``+span:NAME``: Return traces where any span's name is exactly
``NAME``.
- ``latency:DURATION``: Return traces whose overall latency is greater
or equal to than ``DURATION``. Accepted units are nanoseconds
(``ns``), milliseconds (``ms``), and seconds (``s``). Default is
``ms``. For example, ``latency:24ms`` returns traces whose overall
latency is greater than or equal to 24 milliseconds.
- ``label:LABEL_KEY``: Return all traces containing the specified label
key (exact match, case-sensitive) regardless of the key:value pair's
value (including empty values).
- ``LABEL_KEY:VALUE_PREFIX``: Return all traces containing the
specified label key (exact match, case-sensitive) whose value starts
with ``VALUE_PREFIX``. Both a key and a value must be specified.
- ``+LABEL_KEY:VALUE``: Return all traces containing a key:value pair
exactly matching the specified text. Both a key and a value must be
specified.
- ``method:VALUE``: Equivalent to ``/http/method:VALUE``.
- ``url:VALUE``: Equivalent to ``/http/url:VALUE``.
order_by (str): Field used to sort the returned traces. Optional. Can be one of the
following:
- ``trace_id``
- ``name`` (``name`` field of root span in the trace)
- ``duration`` (difference between ``end_time`` and ``start_time``
fields of the root span)
- ``start`` (``start_time`` field of the root span)
Descending order can be specified by appending ``desc`` to the sort
field (for example, ``name desc``).
Only one sort field is permitted.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.trace_v1.types.Trace` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
def get_values(self, obj):
"""get label and shape for classes.
The label contains all attributes and methods
"""
if is_exception(obj.node):
label = r"\fb\f09%s\fn" % obj.title
else:
label = r"\fb%s\fn" % obj.title
if obj.shape == "interface":
shape = "ellipse"
else:
shape = "box"
if not self.config.only_classnames:
attrs = obj.attrs
methods = [func.name for func in obj.methods]
# box width for UML like diagram
maxlen = max(len(name) for name in [obj.title] + methods + attrs)
line = "_" * (maxlen + 2)
label = r"%s\n\f%s" % (label, line)
for attr in attrs:
label = r"%s\n\f08%s" % (label, attr)
if attrs:
label = r"%s\n\f%s" % (label, line)
for func in methods:
label = r"%s\n\f10%s()" % (label, func)
return dict(label=label, shape=shape) | get label and shape for classes.
The label contains all attributes and methods |
def flag_forgotten_entries(session, today=None):
"""Flag any entries from previous days where users forgot to sign
out.
:param session: SQLAlchemy session through which to access the database.
:param today: (optional) The current date as a `datetime.date` object. Used for testing.
""" # noqa
today = date.today() if today is None else today
forgotten = (
session
.query(Entry)
.filter(Entry.time_out.is_(None))
.filter(Entry.forgot_sign_out.is_(False))
.filter(Entry.date < today)
)
for entry in forgotten:
e = sign_out(entry, forgot=True)
logger.debug('Signing out forgotten entry: {}'.format(e))
session.add(e)
session.commit() | Flag any entries from previous days where users forgot to sign
out.
:param session: SQLAlchemy session through which to access the database.
:param today: (optional) The current date as a `datetime.date` object. Used for testing. |
def step_forward_with_function(self, uv0fun, uv1fun, dt):
"""Advance particles using a function to determine u and v.
Parameters
----------
uv0fun : function
Called like ``uv0fun(x,y)``. Should return the velocity field
u, v at time t.
uv1fun(x,y) : function
Called like ``uv1fun(x,y)``. Should return the velocity field
u, v at time t + dt.
dt : number
Timestep."""
dx, dy = self._rk4_integrate(self.x, self.y, uv0fun, uv1fun, dt)
self.x = self._wrap_x(self.x + dx)
self.y = self._wrap_y(self.y + dy) | Advance particles using a function to determine u and v.
Parameters
----------
uv0fun : function
Called like ``uv0fun(x,y)``. Should return the velocity field
u, v at time t.
uv1fun(x,y) : function
Called like ``uv1fun(x,y)``. Should return the velocity field
u, v at time t + dt.
dt : number
Timestep. |
def scale_rows(A, v, copy=True):
"""Scale the sparse rows of a matrix.
Parameters
----------
A : sparse matrix
Sparse matrix with M rows
v : array_like
Array of M scales
copy : {True,False}
- If copy=True, then the matrix is copied to a new and different return
matrix (e.g. B=scale_rows(A,v))
- If copy=False, then the matrix is overwritten deeply (e.g.
scale_rows(A,v,copy=False) overwrites A)
Returns
-------
A : sparse matrix
Scaled sparse matrix in original format
See Also
--------
scipy.sparse._sparsetools.csr_scale_rows, scale_columns
Notes
-----
- if A is a csc_matrix, the transpose A.T is passed to scale_columns
- if A is not csr, csc, or bsr, it is converted to csr and sent
to scale_rows
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.utils import scale_rows
>>> n=5
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n-1).tocsr()
>>> B = scale_rows(A,5*np.ones((A.shape[0],1)))
"""
v = np.ravel(v)
M, N = A.shape
if not isspmatrix(A):
raise ValueError('scale rows needs a sparse matrix')
if M != len(v):
raise ValueError('scale vector has incompatible shape')
if copy:
A = A.copy()
A.data = np.asarray(A.data, dtype=upcast(A.dtype, v.dtype))
else:
v = np.asarray(v, dtype=A.dtype)
if isspmatrix_csr(A):
csr_scale_rows(M, N, A.indptr, A.indices, A.data, v)
elif isspmatrix_bsr(A):
R, C = A.blocksize
bsr_scale_rows(int(M/R), int(N/C), R, C, A.indptr, A.indices,
np.ravel(A.data), v)
elif isspmatrix_csc(A):
pyamg.amg_core.csc_scale_rows(M, N, A.indptr, A.indices, A.data, v)
else:
fmt = A.format
A = scale_rows(csr_matrix(A), v).asformat(fmt)
return A | Scale the sparse rows of a matrix.
Parameters
----------
A : sparse matrix
Sparse matrix with M rows
v : array_like
Array of M scales
copy : {True,False}
- If copy=True, then the matrix is copied to a new and different return
matrix (e.g. B=scale_rows(A,v))
- If copy=False, then the matrix is overwritten deeply (e.g.
scale_rows(A,v,copy=False) overwrites A)
Returns
-------
A : sparse matrix
Scaled sparse matrix in original format
See Also
--------
scipy.sparse._sparsetools.csr_scale_rows, scale_columns
Notes
-----
- if A is a csc_matrix, the transpose A.T is passed to scale_columns
- if A is not csr, csc, or bsr, it is converted to csr and sent
to scale_rows
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.utils import scale_rows
>>> n=5
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n-1).tocsr()
>>> B = scale_rows(A,5*np.ones((A.shape[0],1))) |
def stack_memory(data, n_steps=2, delay=1, **kwargs):
"""Short-term history embedding: vertically concatenate a data
vector or matrix with delayed copies of itself.
Each column `data[:, i]` is mapped to::
data[:, i] -> [data[:, i],
data[:, i - delay],
...
data[:, i - (n_steps-1)*delay]]
For columns `i < (n_steps - 1) * delay` , the data will be padded.
By default, the data is padded with zeros, but this behavior can be
overridden by supplying additional keyword arguments which are passed
to `np.pad()`.
Parameters
----------
data : np.ndarray [shape=(t,) or (d, t)]
Input data matrix. If `data` is a vector (`data.ndim == 1`),
it will be interpreted as a row matrix and reshaped to `(1, t)`.
n_steps : int > 0 [scalar]
embedding dimension, the number of steps back in time to stack
delay : int != 0 [scalar]
the number of columns to step.
Positive values embed from the past (previous columns).
Negative values embed from the future (subsequent columns).
kwargs : additional keyword arguments
Additional arguments to pass to `np.pad`.
Returns
-------
data_history : np.ndarray [shape=(m * d, t)]
data augmented with lagged copies of itself,
where `m == n_steps - 1`.
Notes
-----
This function caches at level 40.
Examples
--------
Keep two steps (current and previous)
>>> data = np.arange(-3, 3)
>>> librosa.feature.stack_memory(data)
array([[-3, -2, -1, 0, 1, 2],
[ 0, -3, -2, -1, 0, 1]])
Or three steps
>>> librosa.feature.stack_memory(data, n_steps=3)
array([[-3, -2, -1, 0, 1, 2],
[ 0, -3, -2, -1, 0, 1],
[ 0, 0, -3, -2, -1, 0]])
Use reflection padding instead of zero-padding
>>> librosa.feature.stack_memory(data, n_steps=3, mode='reflect')
array([[-3, -2, -1, 0, 1, 2],
[-2, -3, -2, -1, 0, 1],
[-1, -2, -3, -2, -1, 0]])
Or pad with edge-values, and delay by 2
>>> librosa.feature.stack_memory(data, n_steps=3, delay=2, mode='edge')
array([[-3, -2, -1, 0, 1, 2],
[-3, -3, -3, -2, -1, 0],
[-3, -3, -3, -3, -3, -2]])
Stack time-lagged beat-synchronous chroma edge padding
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> chroma = librosa.feature.chroma_stft(y=y, sr=sr)
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length=512)
>>> beats = librosa.util.fix_frames(beats, x_min=0, x_max=chroma.shape[1])
>>> chroma_sync = librosa.util.sync(chroma, beats)
>>> chroma_lag = librosa.feature.stack_memory(chroma_sync, n_steps=3,
... mode='edge')
Plot the result
>>> import matplotlib.pyplot as plt
>>> beat_times = librosa.frames_to_time(beats, sr=sr, hop_length=512)
>>> librosa.display.specshow(chroma_lag, y_axis='chroma', x_axis='time',
... x_coords=beat_times)
>>> plt.yticks([0, 12, 24], ['Lag=0', 'Lag=1', 'Lag=2'])
>>> plt.title('Time-lagged chroma')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
if n_steps < 1:
raise ParameterError('n_steps must be a positive integer')
if delay == 0:
raise ParameterError('delay must be a non-zero integer')
data = np.atleast_2d(data)
t = data.shape[1]
kwargs.setdefault('mode', 'constant')
if kwargs['mode'] == 'constant':
kwargs.setdefault('constant_values', [0])
# Pad the end with zeros, which will roll to the front below
if delay > 0:
padding = (int((n_steps - 1) * delay), 0)
else:
padding = (0, int((n_steps - 1) * -delay))
data = np.pad(data, [(0, 0), padding], **kwargs)
history = data
for i in range(1, n_steps):
history = np.vstack([np.roll(data, -i * delay, axis=1), history])
# Trim to original width
if delay > 0:
history = history[:, :t]
else:
history = history[:, -t:]
# Make contiguous
return np.ascontiguousarray(history.T).T | Short-term history embedding: vertically concatenate a data
vector or matrix with delayed copies of itself.
Each column `data[:, i]` is mapped to::
data[:, i] -> [data[:, i],
data[:, i - delay],
...
data[:, i - (n_steps-1)*delay]]
For columns `i < (n_steps - 1) * delay` , the data will be padded.
By default, the data is padded with zeros, but this behavior can be
overridden by supplying additional keyword arguments which are passed
to `np.pad()`.
Parameters
----------
data : np.ndarray [shape=(t,) or (d, t)]
Input data matrix. If `data` is a vector (`data.ndim == 1`),
it will be interpreted as a row matrix and reshaped to `(1, t)`.
n_steps : int > 0 [scalar]
embedding dimension, the number of steps back in time to stack
delay : int != 0 [scalar]
the number of columns to step.
Positive values embed from the past (previous columns).
Negative values embed from the future (subsequent columns).
kwargs : additional keyword arguments
Additional arguments to pass to `np.pad`.
Returns
-------
data_history : np.ndarray [shape=(m * d, t)]
data augmented with lagged copies of itself,
where `m == n_steps - 1`.
Notes
-----
This function caches at level 40.
Examples
--------
Keep two steps (current and previous)
>>> data = np.arange(-3, 3)
>>> librosa.feature.stack_memory(data)
array([[-3, -2, -1, 0, 1, 2],
[ 0, -3, -2, -1, 0, 1]])
Or three steps
>>> librosa.feature.stack_memory(data, n_steps=3)
array([[-3, -2, -1, 0, 1, 2],
[ 0, -3, -2, -1, 0, 1],
[ 0, 0, -3, -2, -1, 0]])
Use reflection padding instead of zero-padding
>>> librosa.feature.stack_memory(data, n_steps=3, mode='reflect')
array([[-3, -2, -1, 0, 1, 2],
[-2, -3, -2, -1, 0, 1],
[-1, -2, -3, -2, -1, 0]])
Or pad with edge-values, and delay by 2
>>> librosa.feature.stack_memory(data, n_steps=3, delay=2, mode='edge')
array([[-3, -2, -1, 0, 1, 2],
[-3, -3, -3, -2, -1, 0],
[-3, -3, -3, -3, -3, -2]])
Stack time-lagged beat-synchronous chroma edge padding
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> chroma = librosa.feature.chroma_stft(y=y, sr=sr)
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length=512)
>>> beats = librosa.util.fix_frames(beats, x_min=0, x_max=chroma.shape[1])
>>> chroma_sync = librosa.util.sync(chroma, beats)
>>> chroma_lag = librosa.feature.stack_memory(chroma_sync, n_steps=3,
... mode='edge')
Plot the result
>>> import matplotlib.pyplot as plt
>>> beat_times = librosa.frames_to_time(beats, sr=sr, hop_length=512)
>>> librosa.display.specshow(chroma_lag, y_axis='chroma', x_axis='time',
... x_coords=beat_times)
>>> plt.yticks([0, 12, 24], ['Lag=0', 'Lag=1', 'Lag=2'])
>>> plt.title('Time-lagged chroma')
>>> plt.colorbar()
>>> plt.tight_layout() |
def get_modscag_fn_list(dem_dt, tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05'), pad_days=7):
"""Function to fetch and process MODSCAG fractional snow cover products for input datetime
Products are tiled in MODIS sinusoidal projection
example url: https://snow-data.jpl.nasa.gov/modscag-historic/2015/001/MOD09GA.A2015001.h07v03.005.2015006001833.snow_fraction.tif
"""
#Could also use global MODIS 500 m snowcover grids, 8 day
#http://nsidc.org/data/docs/daac/modis_v5/mod10a2_modis_terra_snow_8-day_global_500m_grid.gd.html
#These are HDF4, sinusoidal
#Should be able to load up with warplib without issue
import re
import requests
from bs4 import BeautifulSoup
auth = iolib.get_auth()
pad_days = timedelta(days=pad_days)
dt_list = timelib.dt_range(dem_dt-pad_days, dem_dt+pad_days+timedelta(1), timedelta(1))
outdir = os.path.join(datadir, 'modscag')
if not os.path.exists(outdir):
os.makedirs(outdir)
out_vrt_fn_list = []
for dt in dt_list:
out_vrt_fn = os.path.join(outdir, dt.strftime('%Y%m%d_snow_fraction.vrt'))
#If we already have a vrt and it contains all of the necessary tiles
if os.path.exists(out_vrt_fn):
vrt_ds = gdal.Open(out_vrt_fn)
if np.all([np.any([tile in sub_fn for sub_fn in vrt_ds.GetFileList()]) for tile in tile_list]):
out_vrt_fn_list.append(out_vrt_fn)
continue
#Otherwise, download missing tiles and rebuild
#Try to use historic products
modscag_fn_list = []
#Note: not all tiles are available for same date ranges in historic vs. real-time
#Need to repeat search tile-by-tile
for tile in tile_list:
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag-historic/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
modscag_url_fn = []
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
#Couldn't find historic, try to use real-time products
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
print("Unable to fetch MODSCAG for %s" % dt)
else:
#OK, we got
#Now extract actual tif filenames to fetch from html
parsed_html = BeautifulSoup(r.content, "html.parser")
#Fetch all tiles
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if modscag_url_fn:
modscag_url_fn = modscag_url_fn[0]
modscag_url = os.path.join(modscag_url_base, modscag_url_fn)
print(modscag_url)
modscag_fn = os.path.join(outdir, os.path.split(modscag_url_fn)[-1])
if not os.path.exists(modscag_fn):
iolib.getfile2(modscag_url, auth=auth, outdir=outdir)
modscag_fn_list.append(modscag_fn)
#Mosaic tiles - currently a hack
if modscag_fn_list:
cmd = ['gdalbuildvrt', '-vrtnodata', '255', out_vrt_fn]
cmd.extend(modscag_fn_list)
print(cmd)
subprocess.call(cmd, shell=False)
out_vrt_fn_list.append(out_vrt_fn)
return out_vrt_fn_list | Function to fetch and process MODSCAG fractional snow cover products for input datetime
Products are tiled in MODIS sinusoidal projection
example url: https://snow-data.jpl.nasa.gov/modscag-historic/2015/001/MOD09GA.A2015001.h07v03.005.2015006001833.snow_fraction.tif |
def get_user_profile_photos(self, user_id, offset=None, limit=None):
"""
Use this method to get a list of profile pictures for a user. Returns a UserProfilePhotos object.
https://core.telegram.org/bots/api#getuserprofilephotos
Parameters:
:param user_id: Unique identifier of the target user
:type user_id: int
Optional keyword parameters:
:param offset: Sequential number of the first photo to be returned. By default, all photos are returned.
:type offset: int
:param limit: Limits the number of photos to be retrieved. Values between 1—100 are accepted. Defaults to 100.
:type limit: int
Returns:
:return: Returns a UserProfilePhotos object
:rtype: pytgbot.api_types.receivable.media.UserProfilePhotos
"""
assert_type_or_raise(user_id, int, parameter_name="user_id")
assert_type_or_raise(offset, None, int, parameter_name="offset")
assert_type_or_raise(limit, None, int, parameter_name="limit")
result = self.do("getUserProfilePhotos", user_id=user_id, offset=offset, limit=limit)
if self.return_python_objects:
logger.debug("Trying to parse {data}".format(data=repr(result)))
from pytgbot.api_types.receivable.media import UserProfilePhotos
try:
return UserProfilePhotos.from_array(result)
except TgApiParseException:
logger.debug("Failed parsing as api_type UserProfilePhotos", exc_info=True)
# end try
# no valid parsing so far
raise TgApiParseException("Could not parse result.") # See debug log for details!
# end if return_python_objects
return result | Use this method to get a list of profile pictures for a user. Returns a UserProfilePhotos object.
https://core.telegram.org/bots/api#getuserprofilephotos
Parameters:
:param user_id: Unique identifier of the target user
:type user_id: int
Optional keyword parameters:
:param offset: Sequential number of the first photo to be returned. By default, all photos are returned.
:type offset: int
:param limit: Limits the number of photos to be retrieved. Values between 1—100 are accepted. Defaults to 100.
:type limit: int
Returns:
:return: Returns a UserProfilePhotos object
:rtype: pytgbot.api_types.receivable.media.UserProfilePhotos |
def synced(func):
'''
Decorator for functions that should be called synchronously from another thread
:param func: function to call
'''
def wrapper(self, *args, **kwargs):
'''
Actual wrapper for the synchronous function
'''
task = DataManagerTask(func, *args, **kwargs)
self.submit_task(task)
return task.get_results()
return wrapper | Decorator for functions that should be called synchronously from another thread
:param func: function to call |
def validate_bool(b):
"""Convert b to a boolean or raise"""
if isinstance(b, six.string_types):
b = b.lower()
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True):
return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False):
return False
else:
raise ValueError('Could not convert "%s" to boolean' % b) | Convert b to a boolean or raise |
def trees_by_subpath(self, sub_path):
"""
Search trees by `sub_path` using ``Tree.path.startswith(sub_path)``
comparison.
Args:
sub_path (str): Part of the :attr:`.Tree.path` property of
:class:`.Tree`.
Returns:
set: Set of matching :class:`Tree` instances.
"""
matches = (
self.path_db[tree_path].keys()
for tree_path in self.path_db.iterkeys()
if tree_path.startswith(sub_path)
)
return set(sum(matches, [])) | Search trees by `sub_path` using ``Tree.path.startswith(sub_path)``
comparison.
Args:
sub_path (str): Part of the :attr:`.Tree.path` property of
:class:`.Tree`.
Returns:
set: Set of matching :class:`Tree` instances. |
def preprocess_source(base_dir=os.curdir):
"""
A special method for convert all source files to compatible with current
python version during installation time.
The source directory layout must like this :
base_dir --+
|
+-- src (All sources are placed into this directory)
|
+-- preprocessed (Preprocessed sources are placed into this
| directory)
|
+-- setup.py
|
...
@return Preprocessed source directory
"""
source_path = os.path.join(base_dir, SOURCE_DIR)
destination_path = os.path.join(base_dir, PREPROCESSED_DIR)
# The 'build' and 'dist' folder sometimes will not update! So we need to
# remove them all !
shutil.rmtree(os.path.join(base_dir, 'build'), ignore_errors=True)
shutil.rmtree(os.path.join(base_dir, 'dist'), ignore_errors=True)
# Remove all unused directories
directories = []
directory_patterns = ['__pycache__', '*.egg-info']
for root, dirs, files in os.walk(destination_path):
for adir in dirs:
for pattern in directory_patterns:
if fnmatch.fnmatch(adir, pattern):
directories.append(os.path.join(root, adir))
break
for adir in directories:
shutil.rmtree(adir, ignore_errors=True)
if sys.version_info[0] >= 3:
# We wrote program implicated by version 3, if python version
# large or equal than 3, we need not change the sources.
return source_path
# Check and prepare 3to2 module.
try:
from lib3to2.main import main as lib3to2_main
except ImportError:
try:
from pip import main as pipmain
except:
from pip._internal import main as pipmain
pipmain(['install', '3to2'])
from lib3to2.main import main as lib3to2_main
# Remove old preprocessed sources.
if not os.path.exists(destination_path):
__copy_tree(source_path, destination_path)
lib3to2_main("lib3to2.fixes",
["-w", "-n", "--no-diffs"] + [destination_path])
else:
# Remove all files that only in right side
# Copy all files that only in left side to right side, then
# 3to2 on these files
files = []
dirs = []
cmp_result = filecmp.dircmp(source_path, destination_path)
dirs.append(cmp_result)
while len(dirs) > 0:
# Get the last one compare result
cmp_result = dirs[-1]
del dirs[-1]
# Append all sub-dirs compare results, so that we could
# continue our loop.
dirs.extend(list(cmp_result.subdirs.values()))
# Remove all files that only in right side
for file_name in cmp_result.right_only:
file_path = os.path.join(cmp_result.right, file_name)
if os.path.isdir(file_path):
shutil.rmtree(file_path, ignore_errors=True)
continue
# Only parse files.
try:
os.remove(file_path)
except:
pass
# Copy all files that only in left side to right side or
# different files, then 3to2 on these files
for file_name in (cmp_result.left_only + cmp_result.diff_files):
left_file_path = os.path.join(cmp_result.left, file_name)
right_file_path = os.path.join(cmp_result.right, file_name)
if os.path.isdir(left_file_path):
__copy_tree(left_file_path, right_file_path)
files.append(right_file_path)
continue
if not fnmatch.fnmatch(file_name, "*.py"):
continue
try:
os.remove(right_file_path)
except:
pass
shutil.copy2(left_file_path, right_file_path)
files.append(right_file_path)
if len(files) > 0:
lib3to2_main("lib3to2.fixes", ["-w", "-n", "--no-diffs"] + files)
return destination_path | A special method for convert all source files to compatible with current
python version during installation time.
The source directory layout must like this :
base_dir --+
|
+-- src (All sources are placed into this directory)
|
+-- preprocessed (Preprocessed sources are placed into this
| directory)
|
+-- setup.py
|
...
@return Preprocessed source directory |
def sync_imports( self, quiet = False ):
"""Return a context manager to control imports onto all the engines
in the underlying cluster. This method is used within a ``with`` statement.
Any imports should be done with no experiments running, otherwise the
method will block until the cluster is quiet. Generally imports will be one
of the first things done when connecting to a cluster. (But be careful
not to accidentally try to re-import if re-connecting to a running
cluster.)
:param quiet: if True, suppresses messages (defaults to False)
:returns: a context manager"""
self.open()
return self._client[:].sync_imports(quiet = quiet) | Return a context manager to control imports onto all the engines
in the underlying cluster. This method is used within a ``with`` statement.
Any imports should be done with no experiments running, otherwise the
method will block until the cluster is quiet. Generally imports will be one
of the first things done when connecting to a cluster. (But be careful
not to accidentally try to re-import if re-connecting to a running
cluster.)
:param quiet: if True, suppresses messages (defaults to False)
:returns: a context manager |
def gradient(self):
"""
Derivative of the covariance matrix over the lower triangular, flat part of L.
It is equal to
∂K/∂Lᵢⱼ = ALᵀ + LAᵀ,
where Aᵢⱼ is an n×m matrix of zeros except at [Aᵢⱼ]ᵢⱼ=1.
Returns
-------
Lu : ndarray
Derivative of K over the lower-triangular, flat part of L.
"""
L = self.L
n = self.L.shape[0]
grad = {"Lu": zeros((n, n, n * self._L.shape[1]))}
for ii in range(self._L.shape[0] * self._L.shape[1]):
row = ii // self._L.shape[1]
col = ii % self._L.shape[1]
grad["Lu"][row, :, ii] = L[:, col]
grad["Lu"][:, row, ii] += L[:, col]
return grad | Derivative of the covariance matrix over the lower triangular, flat part of L.
It is equal to
∂K/∂Lᵢⱼ = ALᵀ + LAᵀ,
where Aᵢⱼ is an n×m matrix of zeros except at [Aᵢⱼ]ᵢⱼ=1.
Returns
-------
Lu : ndarray
Derivative of K over the lower-triangular, flat part of L. |
def clamp(inclusive_lower_bound: int,
inclusive_upper_bound: int,
value: int) -> int:
"""
Bound the given ``value`` between ``inclusive_lower_bound`` and
``inclusive_upper_bound``.
"""
if value <= inclusive_lower_bound:
return inclusive_lower_bound
elif value >= inclusive_upper_bound:
return inclusive_upper_bound
else:
return value | Bound the given ``value`` between ``inclusive_lower_bound`` and
``inclusive_upper_bound``. |
def init(filename=ConfigPath):
"""Loads INI configuration into this module's attributes."""
section, parts = "DEFAULT", filename.rsplit(":", 1)
if len(parts) > 1 and os.path.isfile(parts[0]): filename, section = parts
if not os.path.isfile(filename): return
vardict, parser = globals(), configparser.RawConfigParser()
parser.optionxform = str # Force case-sensitivity on names
try:
def parse_value(raw):
try: return json.loads(raw) # Try to interpret as JSON
except ValueError: return raw # JSON failed, fall back to raw
txt = open(filename).read() # Add DEFAULT section if none present
if not re.search("\\[\\w+\\]", txt): txt = "[DEFAULT]\n" + txt
parser.readfp(StringIO.StringIO(txt), filename)
for k, v in parser.items(section): vardict[k] = parse_value(v)
except Exception:
logging.warn("Error reading config from %s.", filename, exc_info=True) | Loads INI configuration into this module's attributes. |
def createFromSource(cls, vs, name=None):
''' returns a github component for any github url (including
git+ssh:// git+http:// etc. or None if this is not a Github URL.
For all of these we use the github api to grab a tarball, because
that's faster.
Normally version will be empty, unless the original url was of the
form: 'owner/repo @version' or 'url://...#version', which can be used
to grab a particular tagged version.
(Note that for github components we ignore the component name - it
doesn't have to match the github module name)
'''
return GithubComponent(vs.location, vs.spec, vs.semantic_spec, name) | returns a github component for any github url (including
git+ssh:// git+http:// etc. or None if this is not a Github URL.
For all of these we use the github api to grab a tarball, because
that's faster.
Normally version will be empty, unless the original url was of the
form: 'owner/repo @version' or 'url://...#version', which can be used
to grab a particular tagged version.
(Note that for github components we ignore the component name - it
doesn't have to match the github module name) |
def _concat_reps(self, kpop, max_var_multiple, quiet, **kwargs):
"""
Combine structure replicates into a single indfile,
returns nreps, ninds. Excludes reps with too high of
variance (set with max_variance_multiplier) to exclude
runs that did not converge.
"""
## make an output handle
outf = os.path.join(self.workdir,
"{}-K-{}.indfile".format(self.name, kpop))
## combine replicates and write to indfile
excluded = 0
reps = []
with open(outf, 'w') as outfile:
repfiles = glob.glob(
os.path.join(self.workdir,
self.name+"-K-{}-rep-*_f".format(kpop)))
## get result as a Rep object
for rep in repfiles:
result = Rep(rep, kpop=kpop)
reps.append(result)
## exclude results with variance NX above (min)
newreps = []
if len(reps) > 1:
min_var_across_reps = np.min([i.var_lnlik for i in reps])
else:
min_var_across_reps = reps[0].var_lnlik
## iterate over reps
for rep in reps:
## store result w/o filtering
if not max_var_multiple:
newreps.append(rep)
outfile.write(rep.stable)
## use max-var-multiple as a filter for convergence
else:
#print(
# rep.var_lnlik,
# min_var_across_reps,
# rep.var_lnlik / min_var_across_reps,
# max_var_multiple)
## e.g., repvar is 1.05X minvar. We keep it if maxvar <= 1.05
if (rep.var_lnlik / min_var_across_reps) <= max_var_multiple:
newreps.append(rep)
outfile.write(rep.stable)
else:
excluded += 1
return newreps, excluded | Combine structure replicates into a single indfile,
returns nreps, ninds. Excludes reps with too high of
variance (set with max_variance_multiplier) to exclude
runs that did not converge. |
def correct_structure(self, atol=1e-8):
"""Determine if the structure matches the standard primitive structure.
The standard primitive will be different between seekpath and pymatgen
high-symmetry paths, but this is handled by the specific subclasses.
Args:
atol (:obj:`float`, optional): Absolute tolerance used to compare
the input structure with the primitive standard structure.
Returns:
bool: ``True`` if the structure is the same as the standard
primitive, otherwise ``False``.
"""
return np.allclose(self.structure.lattice.matrix,
self.prim.lattice.matrix, atol=atol) | Determine if the structure matches the standard primitive structure.
The standard primitive will be different between seekpath and pymatgen
high-symmetry paths, but this is handled by the specific subclasses.
Args:
atol (:obj:`float`, optional): Absolute tolerance used to compare
the input structure with the primitive standard structure.
Returns:
bool: ``True`` if the structure is the same as the standard
primitive, otherwise ``False``. |
def _y_axis(self, draw_axes=True):
"""Override y axis to make it polar"""
if not self._y_labels or not self.show_y_labels:
return
axis = self.svg.node(self.nodes['plot'], class_="axis y web")
for label, r in reversed(self._y_labels):
major = r in self._y_labels_major
if not (self.show_minor_y_labels or major):
continue
guides = self.svg.node(
axis,
class_='%sguides' %
('logarithmic ' if self.logarithmic else '')
)
if self.show_y_guides:
self.svg.line(
guides, [self.view((r, theta)) for theta in self._x_pos],
close=True,
class_='%sguide line' % ('major ' if major else '')
)
x, y = self.view((r, self._x_pos[0]))
x -= 5
text = self.svg.node(
guides, 'text', x=x, y=y, class_='major' if major else ''
)
text.text = label
if self.y_label_rotation:
text.attrib[
'transform'
] = "rotate(%d %f %f)" % (self.y_label_rotation, x, y)
self.svg.node(
guides,
'title',
).text = self._y_format(r) | Override y axis to make it polar |
def retrieve(cls, *args, **kwargs):
"""Return parent method."""
return super(BankAccount, cls).retrieve(*args, **kwargs) | Return parent method. |
def transition_loop(n_states, prob):
'''Construct a self-loop transition matrix over `n_states`.
The transition matrix will have the following properties:
- `transition[i, i] = p` for all i
- `transition[i, j] = (1 - p) / (n_states - 1)` for all `j != i`
This type of transition matrix is appropriate when states tend to be
locally stable, and there is no additional structure between different
states. This is primarily useful for de-noising frame-wise predictions.
Parameters
----------
n_states : int > 1
The number of states
prob : float in [0, 1] or iterable, length=n_states
If a scalar, this is the probability of a self-transition.
If a vector of length `n_states`, `p[i]` is the probability of state `i`'s self-transition.
Returns
-------
transition : np.ndarray [shape=(n_states, n_states)]
The transition matrix
Examples
--------
>>> librosa.sequence.transition_loop(3, 0.5)
array([[0.5 , 0.25, 0.25],
[0.25, 0.5 , 0.25],
[0.25, 0.25, 0.5 ]])
>>> librosa.sequence.transition_loop(3, [0.8, 0.5, 0.25])
array([[0.8 , 0.1 , 0.1 ],
[0.25 , 0.5 , 0.25 ],
[0.375, 0.375, 0.25 ]])
'''
if not isinstance(n_states, int) or n_states <= 1:
raise ParameterError('n_states={} must be a positive integer > 1')
transition = np.empty((n_states, n_states), dtype=np.float)
# if it's a float, make it a vector
prob = np.asarray(prob, dtype=np.float)
if prob.ndim == 0:
prob = np.tile(prob, n_states)
if prob.shape != (n_states,):
raise ParameterError('prob={} must have length equal to n_states={}'.format(prob, n_states))
if np.any(prob < 0) or np.any(prob > 1):
raise ParameterError('prob={} must have values in the range [0, 1]'.format(prob))
for i, prob_i in enumerate(prob):
transition[i] = (1. - prob_i) / (n_states - 1)
transition[i, i] = prob_i
return transition | Construct a self-loop transition matrix over `n_states`.
The transition matrix will have the following properties:
- `transition[i, i] = p` for all i
- `transition[i, j] = (1 - p) / (n_states - 1)` for all `j != i`
This type of transition matrix is appropriate when states tend to be
locally stable, and there is no additional structure between different
states. This is primarily useful for de-noising frame-wise predictions.
Parameters
----------
n_states : int > 1
The number of states
prob : float in [0, 1] or iterable, length=n_states
If a scalar, this is the probability of a self-transition.
If a vector of length `n_states`, `p[i]` is the probability of state `i`'s self-transition.
Returns
-------
transition : np.ndarray [shape=(n_states, n_states)]
The transition matrix
Examples
--------
>>> librosa.sequence.transition_loop(3, 0.5)
array([[0.5 , 0.25, 0.25],
[0.25, 0.5 , 0.25],
[0.25, 0.25, 0.5 ]])
>>> librosa.sequence.transition_loop(3, [0.8, 0.5, 0.25])
array([[0.8 , 0.1 , 0.1 ],
[0.25 , 0.5 , 0.25 ],
[0.375, 0.375, 0.25 ]]) |
def split_string(self, string):
""" Yields substrings for which the same escape code applies.
"""
self.actions = []
start = 0
# strings ending with \r are assumed to be ending in \r\n since
# \n is appended to output strings automatically. Accounting
# for that, here.
last_char = '\n' if len(string) > 0 and string[-1] == '\n' else None
string = string[:-1] if last_char is not None else string
for match in ANSI_OR_SPECIAL_PATTERN.finditer(string):
raw = string[start:match.start()]
substring = SPECIAL_PATTERN.sub(self._replace_special, raw)
if substring or self.actions:
yield substring
self.actions = []
start = match.end()
groups = filter(lambda x: x is not None, match.groups())
g0 = groups[0]
if g0 == '\a':
self.actions.append(BeepAction('beep'))
yield None
self.actions = []
elif g0 == '\r':
self.actions.append(CarriageReturnAction('carriage-return'))
yield None
self.actions = []
elif g0 == '\b':
self.actions.append(BackSpaceAction('backspace'))
yield None
self.actions = []
elif g0 == '\n' or g0 == '\r\n':
self.actions.append(NewLineAction('newline'))
yield g0
self.actions = []
else:
params = [ param for param in groups[1].split(';') if param ]
if g0.startswith('['):
# Case 1: CSI code.
try:
params = map(int, params)
except ValueError:
# Silently discard badly formed codes.
pass
else:
self.set_csi_code(groups[2], params)
elif g0.startswith(']'):
# Case 2: OSC code.
self.set_osc_code(params)
raw = string[start:]
substring = SPECIAL_PATTERN.sub(self._replace_special, raw)
if substring or self.actions:
yield substring
if last_char is not None:
self.actions.append(NewLineAction('newline'))
yield last_char | Yields substrings for which the same escape code applies. |
def generate_api_key(self):
"""
Creates and returns a new API Key/pass pair.
:returns: API key/pass pair in JSON format
"""
endpoint = '/'.join((self.server_url, '_api', 'v2', 'api_keys'))
resp = self.r_session.post(endpoint)
resp.raise_for_status()
return response_to_json_dict(resp) | Creates and returns a new API Key/pass pair.
:returns: API key/pass pair in JSON format |
def resolveFilenameConflicts(self):
"""Goes through list of DPs to make sure that their destination names
do not clash. Adjust names as needed. Returns True if some conflicts were resolved.
"""
taken_names = set()
resolved = False
# iterate through items
for item, dp in self.getItemDPList():
# only apply this to saved DPs
if dp.policy not in ["remove", "ignore", "banish"]:
name0 = str(item.text(self.ColRename))
name = _makeUniqueFilename(taken_names, name0)
if name != name0:
item.setText(self.ColRename, name)
resolved = True
self.emit(SIGNAL("updated"))
return resolved | Goes through list of DPs to make sure that their destination names
do not clash. Adjust names as needed. Returns True if some conflicts were resolved. |
def list_active_vms(cwd=None):
'''
Return a list of machine names for active virtual machine on the host,
which are defined in the Vagrantfile at the indicated path.
CLI Example:
.. code-block:: bash
salt '*' vagrant.list_active_vms cwd=/projects/project_1
'''
vms = []
cmd = 'vagrant status'
reply = __salt__['cmd.shell'](cmd, cwd=cwd)
log.info('--->\n%s', reply)
for line in reply.split('\n'): # build a list of the text reply
tokens = line.strip().split()
if len(tokens) > 1:
if tokens[1] == 'running':
vms.append(tokens[0])
return vms | Return a list of machine names for active virtual machine on the host,
which are defined in the Vagrantfile at the indicated path.
CLI Example:
.. code-block:: bash
salt '*' vagrant.list_active_vms cwd=/projects/project_1 |
def rolling_restart(self, slave_batch_size=None,
slave_fail_count_threshold=None,
sleep_seconds=None,
stale_configs_only=None,
unupgraded_only=None,
restart_role_types=None,
restart_role_names=None):
"""
Rolling restart the roles of a service. The sequence is:
1. Restart all the non-slave roles
2. If slaves are present restart them in batches of size specified
3. Perform any post-command needed after rolling restart
@param slave_batch_size: Number of slave roles to restart at a time
Must be greater than 0. Default is 1.
For HDFS, this number should be less than the replication factor (default 3)
to ensure data availability during rolling restart.
@param slave_fail_count_threshold: The threshold for number of slave batches that
are allowed to fail to restart before the entire command is considered failed.
Must be >= 0. Default is 0.
@param sleep_seconds: Number of seconds to sleep between restarts of slave role batches.
Must be >=0. Default is 0.
@param stale_configs_only: Restart roles with stale configs only. Default is false.
@param unupgraded_only: Restart roles that haven't been upgraded yet. Default is false.
@param restart_role_types: Role types to restart. If not specified, all startable roles are restarted.
@param restart_role_names: List of specific roles to restart.
If none are specified, then all roles of specified role types are restarted.
@return: Reference to the submitted command.
@since: API v3
"""
args = dict()
if slave_batch_size:
args['slaveBatchSize'] = slave_batch_size
if slave_fail_count_threshold:
args['slaveFailCountThreshold'] = slave_fail_count_threshold
if sleep_seconds:
args['sleepSeconds'] = sleep_seconds
if stale_configs_only:
args['staleConfigsOnly'] = stale_configs_only
if unupgraded_only:
args['unUpgradedOnly'] = unupgraded_only
if restart_role_types:
args['restartRoleTypes'] = restart_role_types
if restart_role_names:
args['restartRoleNames'] = restart_role_names
return self._cmd('rollingRestart', data=args) | Rolling restart the roles of a service. The sequence is:
1. Restart all the non-slave roles
2. If slaves are present restart them in batches of size specified
3. Perform any post-command needed after rolling restart
@param slave_batch_size: Number of slave roles to restart at a time
Must be greater than 0. Default is 1.
For HDFS, this number should be less than the replication factor (default 3)
to ensure data availability during rolling restart.
@param slave_fail_count_threshold: The threshold for number of slave batches that
are allowed to fail to restart before the entire command is considered failed.
Must be >= 0. Default is 0.
@param sleep_seconds: Number of seconds to sleep between restarts of slave role batches.
Must be >=0. Default is 0.
@param stale_configs_only: Restart roles with stale configs only. Default is false.
@param unupgraded_only: Restart roles that haven't been upgraded yet. Default is false.
@param restart_role_types: Role types to restart. If not specified, all startable roles are restarted.
@param restart_role_names: List of specific roles to restart.
If none are specified, then all roles of specified role types are restarted.
@return: Reference to the submitted command.
@since: API v3 |
def _calculate_gas(owners: List[str], safe_setup_data: bytes, payment_token: str) -> int:
"""
Calculate gas manually, based on tests of previosly deployed safes
:param owners: Safe owners
:param safe_setup_data: Data for proxy setup
:param payment_token: If payment token, we will need more gas to transfer and maybe storage if first time
:return: total gas needed for deployment
"""
base_gas = 205000 # Transaction base gas
# If we already have the token, we don't have to pay for storage, so it will be just 5K instead of 20K.
# The other 1K is for overhead of making the call
if payment_token != NULL_ADDRESS:
payment_token_gas = 55000
else:
payment_token_gas = 0
data_gas = 68 * len(safe_setup_data) # Data gas
gas_per_owner = 20000 # Magic number calculated by testing and averaging owners
return base_gas + data_gas + payment_token_gas + len(owners) * gas_per_owner | Calculate gas manually, based on tests of previosly deployed safes
:param owners: Safe owners
:param safe_setup_data: Data for proxy setup
:param payment_token: If payment token, we will need more gas to transfer and maybe storage if first time
:return: total gas needed for deployment |
def configure_logger(logger, filename, folder, log_level):
'''Configure logging behvior for the simulations.
'''
fmt = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
if folder is not None:
log_file = os.path.join(folder, filename)
hdl = logging.FileHandler(log_file)
hdl.setFormatter(fmt)
hdl.setLevel(log_level)
logger.addHandler(hdl)
shdl = logging.StreamHandler()
shdl.setLevel(log_level)
shdl.setFormatter(fmt)
logger.addHandler(shdl)
logger.setLevel(log_level) | Configure logging behvior for the simulations. |
def sh3(cmd):
"""Execute command in a subshell, return stdout, stderr
If anything appears in stderr, print it out to sys.stderr"""
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True,
env=sub_environment())
out, err = p.communicate()
retcode = p.returncode
if retcode:
raise CalledProcessError(retcode, cmd)
else:
return out.rstrip(), err.rstrip() | Execute command in a subshell, return stdout, stderr
If anything appears in stderr, print it out to sys.stderr |
def write_hash_file_for_path(path, recompute=False):
r""" Creates a hash file for each file in a path
CommandLine:
python -m utool.util_hash --test-write_hash_file_for_path
Example:
>>> # DISABLE_DOCTEST
>>> import os
>>> import utool as ut
>>> from utool.util_hash import * # NOQA
>>> fpath = ut.grab_test_imgpath('patsy.jpg')
>>> path, _ = os.path.split(fpath)
>>> hash_fpath_list = write_hash_file_for_path(path)
>>> for hash_fpath in hash_fpath_list:
>>> assert os.path.exists(hash_fpath)
>>> ut.delete(hash_fpath)
"""
hash_fpath_list = []
for root, dname_list, fname_list in os.walk(path):
for fname in sorted(fname_list):
# fpath = os.path.join(path, fname)
fpath = os.path.join(root, fname)
hash_fpath = write_hash_file(fpath, recompute=recompute)
if hash_fpath is not None:
hash_fpath_list.append(hash_fpath)
return hash_fpath_list | r""" Creates a hash file for each file in a path
CommandLine:
python -m utool.util_hash --test-write_hash_file_for_path
Example:
>>> # DISABLE_DOCTEST
>>> import os
>>> import utool as ut
>>> from utool.util_hash import * # NOQA
>>> fpath = ut.grab_test_imgpath('patsy.jpg')
>>> path, _ = os.path.split(fpath)
>>> hash_fpath_list = write_hash_file_for_path(path)
>>> for hash_fpath in hash_fpath_list:
>>> assert os.path.exists(hash_fpath)
>>> ut.delete(hash_fpath) |
def _emit_message(cls, message):
"""Print a message to STDOUT."""
sys.stdout.write(message)
sys.stdout.flush() | Print a message to STDOUT. |
def get_path(filename):
"""
Get absolute path for filename.
:param filename: file
:return: path
"""
path = abspath(filename) if os.path.isdir(filename) else dirname(abspath(filename))
return path | Get absolute path for filename.
:param filename: file
:return: path |
def getISAAssay(assayNum, studyNum, pathToISATABFile):
"""
This function returns an Assay object given the assay and study numbers in an ISA file
Typically, you should use the exploreISA function to check the contents
of the ISA file and retrieve the assay and study numbers you are interested in!
:param assayNum: The Assay number (notice it's not zero-based index).
:type assayNum: int
:param studyNum: The Study number (notice it's not zero-based index).
:type studyNum: int
:param pathToISATABFile: The path to the ISATAB file
:type pathToISATABFile: str
:raise FileNotFoundError: If pathToISATABFile does not contain file 'i_Investigation.txt'.
"""
from isatools import isatab
import copy
try:
isa = isatab.load(pathToISATABFile, skip_load_tables=True)
std = isa.studies[studyNum - 1]
return copy.deepcopy(std.assays[assayNum - 1])
except FileNotFoundError as err:
raise err | This function returns an Assay object given the assay and study numbers in an ISA file
Typically, you should use the exploreISA function to check the contents
of the ISA file and retrieve the assay and study numbers you are interested in!
:param assayNum: The Assay number (notice it's not zero-based index).
:type assayNum: int
:param studyNum: The Study number (notice it's not zero-based index).
:type studyNum: int
:param pathToISATABFile: The path to the ISATAB file
:type pathToISATABFile: str
:raise FileNotFoundError: If pathToISATABFile does not contain file 'i_Investigation.txt'. |
def oidcCredentials(self, *args, **kwargs):
"""
Get Taskcluster credentials given a suitable `access_token`
Given an OIDC `access_token` from a trusted OpenID provider, return a
set of Taskcluster credentials for use on behalf of the identified
user.
This method is typically not called with a Taskcluster client library
and does not accept Hawk credentials. The `access_token` should be
given in an `Authorization` header:
```
Authorization: Bearer abc.xyz
```
The `access_token` is first verified against the named
:provider, then passed to the provider's APIBuilder to retrieve a user
profile. That profile is then used to generate Taskcluster credentials
appropriate to the user. Note that the resulting credentials may or may
not include a `certificate` property. Callers should be prepared for either
alternative.
The given credentials will expire in a relatively short time. Callers should
monitor this expiration and refresh the credentials if necessary, by calling
this endpoint again, if they have expired.
This method gives output: ``v1/oidc-credentials-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["oidcCredentials"], *args, **kwargs) | Get Taskcluster credentials given a suitable `access_token`
Given an OIDC `access_token` from a trusted OpenID provider, return a
set of Taskcluster credentials for use on behalf of the identified
user.
This method is typically not called with a Taskcluster client library
and does not accept Hawk credentials. The `access_token` should be
given in an `Authorization` header:
```
Authorization: Bearer abc.xyz
```
The `access_token` is first verified against the named
:provider, then passed to the provider's APIBuilder to retrieve a user
profile. That profile is then used to generate Taskcluster credentials
appropriate to the user. Note that the resulting credentials may or may
not include a `certificate` property. Callers should be prepared for either
alternative.
The given credentials will expire in a relatively short time. Callers should
monitor this expiration and refresh the credentials if necessary, by calling
this endpoint again, if they have expired.
This method gives output: ``v1/oidc-credentials-response.json#``
This method is ``experimental`` |
def get_id(self):
'''
:returns: Object ID of associated app
:rtype: string
Returns the object ID of the app that the handler is currently
associated with.
'''
if self._dxid is not None:
return self._dxid
else:
return 'app-' + self._name + '/' + self._alias | :returns: Object ID of associated app
:rtype: string
Returns the object ID of the app that the handler is currently
associated with. |
def _render_having(having_conditions):
"""Render the having part of a query.
Parameters
----------
having_conditions : list
A ``list`` of ``dict``s to filter the rows
Returns
-------
str
A string that represents the "having" part of a query.
See Also
--------
render_query : Further clarification of `conditions` formatting.
"""
if not having_conditions:
return ""
rendered_conditions = []
for condition in having_conditions:
field = condition.get('field')
field_type = condition.get('type')
comparators = condition.get('comparators')
if None in (field, field_type, comparators) or not comparators:
logger.warn('Invalid condition passed in: %s' % condition)
continue
rendered_conditions.append(
_render_condition(field, field_type, comparators))
if not rendered_conditions:
return ""
return "HAVING %s" % (" AND ".join(rendered_conditions)) | Render the having part of a query.
Parameters
----------
having_conditions : list
A ``list`` of ``dict``s to filter the rows
Returns
-------
str
A string that represents the "having" part of a query.
See Also
--------
render_query : Further clarification of `conditions` formatting. |
def problem(self):
"""
| Comment: For tickets of type "incident", the ID of the problem the incident is linked to
"""
if self.api and self.problem_id:
return self.api._get_problem(self.problem_id) | | Comment: For tickets of type "incident", the ID of the problem the incident is linked to |
def token_scan(cls, result_key, token):
"""
Define a property that is set to true if the given token is found in
the log file. Uses the __contains__ method of the log file.
"""
def _scan(self):
return token in self
cls.scan(result_key, _scan) | Define a property that is set to true if the given token is found in
the log file. Uses the __contains__ method of the log file. |
def n_executions(self):
"""
Queries and returns the number of past task executions.
"""
pipeline = self.tiger.connection.pipeline()
pipeline.exists(self.tiger._key('task', self.id))
pipeline.llen(self.tiger._key('task', self.id, 'executions'))
exists, n_executions = pipeline.execute()
if not exists:
raise TaskNotFound('Task {} not found.'.format(
self.id
))
return n_executions | Queries and returns the number of past task executions. |
def resubmit(self, job_ids = None, also_success = False, running_jobs = False, new_command=None, verbosity=0, keep_logs=False, **kwargs):
"""Re-submit jobs automatically"""
self.lock()
# iterate over all jobs
jobs = self.get_jobs(job_ids)
if new_command is not None:
if len(jobs) == 1:
jobs[0].set_command_line(new_command)
else:
logger.warn("Ignoring new command since no single job id was specified")
accepted_old_status = ('submitted', 'success', 'failure') if also_success else ('submitted', 'failure',)
for job in jobs:
# check if this job needs re-submission
if running_jobs or job.status in accepted_old_status:
grid_status = qstat(job.id, context=self.context)
if len(grid_status) != 0:
logger.warn("Deleting job '%d' since it was still running in the grid." % job.unique)
qdel(job.id, context=self.context)
# re-submit job to the grid
arguments = job.get_arguments()
arguments.update(**kwargs)
if ('queue' not in arguments or arguments['queue'] == 'all.q'):
for arg in ('hvmem', 'pe_opt', 'io_big'):
if arg in arguments:
del arguments[arg]
job.set_arguments(kwargs=arguments)
# delete old status and result of the job
if not keep_logs:
self.delete_logs(job)
job.submit()
if job.queue_name == 'local' and 'queue' not in arguments:
logger.warn("Re-submitting job '%s' locally (since no queue name is specified)." % job)
else:
deps = [dep.unique for dep in job.get_jobs_we_wait_for()]
logger.debug("Re-submitting job '%s' with dependencies '%s' to the grid." % (job, deps))
self._submit_to_grid(job, job.name, job.get_array(), deps, job.log_dir, verbosity, **arguments)
# commit after each job to avoid failures of not finding the job during execution in the grid
self.session.commit()
self.unlock() | Re-submit jobs automatically |
def make_display_lines(self):
"""
生成输出行
注意: 多线程终端同时输出会有bug, 导致起始位置偏移, 需要在每行加\r
"""
self.screen_height, self.screen_width = self.linesnum() # 屏幕显示行数
display_lines = ['\r']
display_lines.append(self._title + '\r')
top = self.topline
bottom = self.topline + self.screen_height - 3
for index, i in enumerate(self._lines[top:bottom]):
# 箭头指向
if index == self.markline:
prefix = self._prefix_selected
i = color_func(self.c['LINE']['highlight'])(i)
else:
prefix = self._prefix_deselected
# 选择频道
if index + self.topline == self.displayline:
suffix = self._suffix_selected
else:
suffix = self._suffix_deselected
line = '%s %s %s' % (prefix, i, suffix)
line = color_func(self.c['LINE']['line'])(line)
display_lines.append(line + '\r')
return_num = self.screen_height - 3 - len(self._lines)
for _ in range(return_num):
display_lines.append('\r')
self.display_lines = display_lines | 生成输出行
注意: 多线程终端同时输出会有bug, 导致起始位置偏移, 需要在每行加\r |
def _decrypt(private_key, ciphertext, padding):
"""
Decrypts RSA ciphertext using a private key
:param private_key:
A PrivateKey object
:param ciphertext:
The ciphertext - a byte string
:param padding:
The padding mode to use, specified as a kSecPadding*Key value
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext
"""
if not isinstance(private_key, PrivateKey):
raise TypeError(pretty_message(
'''
private_key must be an instance of the PrivateKey class, not %s
''',
type_name(private_key)
))
if not isinstance(ciphertext, byte_cls):
raise TypeError(pretty_message(
'''
ciphertext must be a byte string, not %s
''',
type_name(ciphertext)
))
if not padding:
raise ValueError('padding must be specified')
cf_data = None
sec_transform = None
try:
cf_data = CFHelpers.cf_data_from_bytes(ciphertext)
error_pointer = new(CoreFoundation, 'CFErrorRef *')
sec_transform = Security.SecDecryptTransformCreate(
private_key.sec_key_ref,
error_pointer
)
handle_cf_error(error_pointer)
Security.SecTransformSetAttribute(
sec_transform,
Security.kSecPaddingKey,
padding,
error_pointer
)
handle_cf_error(error_pointer)
Security.SecTransformSetAttribute(
sec_transform,
Security.kSecTransformInputAttributeName,
cf_data,
error_pointer
)
handle_cf_error(error_pointer)
plaintext = Security.SecTransformExecute(sec_transform, error_pointer)
handle_cf_error(error_pointer)
return CFHelpers.cf_data_to_bytes(plaintext)
finally:
if cf_data:
CoreFoundation.CFRelease(cf_data)
if sec_transform:
CoreFoundation.CFRelease(sec_transform) | Decrypts RSA ciphertext using a private key
:param private_key:
A PrivateKey object
:param ciphertext:
The ciphertext - a byte string
:param padding:
The padding mode to use, specified as a kSecPadding*Key value
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by the OS crypto library
:return:
A byte string of the plaintext |
def get_all_player_ids(ids="shots"):
"""
Returns a pandas DataFrame containing the player IDs used in the
stats.nba.com API.
Parameters
----------
ids : { "shots" | "all_players" | "all_data" }, optional
Passing in "shots" returns a DataFrame that contains the player IDs of
all players have shot chart data. It is the default parameter value.
Passing in "all_players" returns a DataFrame that contains
all the player IDs used in the stats.nba.com API.
Passing in "all_data" returns a DataFrame that contains all the data
accessed from the JSON at the following url:
http://stats.nba.com/stats/commonallplayers?IsOnlyCurrentSeason=0&LeagueID=00&Season=2015-16
The column information for this DataFrame is as follows:
PERSON_ID: The player ID for that player
DISPLAY_LAST_COMMA_FIRST: The player's name.
ROSTERSTATUS: 0 means player is not on a roster, 1 means he's on a
roster
FROM_YEAR: The first year the player played.
TO_YEAR: The last year the player played.
PLAYERCODE: A code representing the player. Unsure of its use.
Returns
-------
df : pandas DataFrame
The pandas DataFrame object that contains the player IDs for the
stats.nba.com API.
"""
url = "http://stats.nba.com/stats/commonallplayers?IsOnlyCurrentSeason=0&LeagueID=00&Season=2015-16"
# get the web page
response = requests.get(url, headers=HEADERS)
response.raise_for_status()
# access 'resultSets', which is a list containing the dict with all the data
# The 'header' key accesses the headers
headers = response.json()['resultSets'][0]['headers']
# The 'rowSet' key contains the player data along with their IDs
players = response.json()['resultSets'][0]['rowSet']
# Create dataframe with proper numeric types
df = pd.DataFrame(players, columns=headers)
# Dealing with different means of converision for pandas 0.17.0 or 0.17.1
# and 0.15.0 or loweer
if '0.17' in pd.__version__:
# alternative to convert_objects() to numeric to get rid of warning
# as convert_objects() is deprecated in pandas 0.17.0+
df = df.apply(pd.to_numeric, args=('ignore',))
else:
df = df.convert_objects(convert_numeric=True)
if ids == "shots":
df = df.query("(FROM_YEAR >= 2001) or (TO_YEAR >= 2001)")
df = df.reset_index(drop=True)
# just keep the player ids and names
df = df.iloc[:, 0:2]
return df
if ids == "all_players":
df = df.iloc[:, 0:2]
return df
if ids == "all_data":
return df
else:
er = "Invalid 'ids' value. It must be 'shots', 'all_shots', or 'all_data'."
raise ValueError(er) | Returns a pandas DataFrame containing the player IDs used in the
stats.nba.com API.
Parameters
----------
ids : { "shots" | "all_players" | "all_data" }, optional
Passing in "shots" returns a DataFrame that contains the player IDs of
all players have shot chart data. It is the default parameter value.
Passing in "all_players" returns a DataFrame that contains
all the player IDs used in the stats.nba.com API.
Passing in "all_data" returns a DataFrame that contains all the data
accessed from the JSON at the following url:
http://stats.nba.com/stats/commonallplayers?IsOnlyCurrentSeason=0&LeagueID=00&Season=2015-16
The column information for this DataFrame is as follows:
PERSON_ID: The player ID for that player
DISPLAY_LAST_COMMA_FIRST: The player's name.
ROSTERSTATUS: 0 means player is not on a roster, 1 means he's on a
roster
FROM_YEAR: The first year the player played.
TO_YEAR: The last year the player played.
PLAYERCODE: A code representing the player. Unsure of its use.
Returns
-------
df : pandas DataFrame
The pandas DataFrame object that contains the player IDs for the
stats.nba.com API. |
def get_gtf_db(gtf, in_memory=False):
"""
create a gffutils DB, in memory if we don't have write permissions
"""
db_file = gtf + ".db"
if file_exists(db_file):
return gffutils.FeatureDB(db_file)
if not os.access(os.path.dirname(db_file), os.W_OK | os.X_OK):
in_memory = True
db_file = ":memory:" if in_memory else db_file
if in_memory or not file_exists(db_file):
infer_extent = guess_infer_extent(gtf)
disable_extent = not infer_extent
db = gffutils.create_db(gtf, dbfn=db_file,
disable_infer_genes=disable_extent,
disable_infer_transcripts=disable_extent)
if in_memory:
return db
else:
return gffutils.FeatureDB(db_file) | create a gffutils DB, in memory if we don't have write permissions |
def X_less(self):
"""Zoom out on the x-axis."""
self.parent.value('window_length',
self.parent.value('window_length') / 2)
self.parent.overview.update_position() | Zoom out on the x-axis. |
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=256):
"""
Truncates a colormap, such that the new colormap consists of
``cmap[minval:maxval]``.
If maxval is larger than minval, the truncated colormap will be reversed.
Args:
cmap (colormap): Colormap to be truncated
minval (float): Lower bound. Should be a float betwee 0 and 1.
maxval (float): Upper bound. Should be a float between 0 and 1
n (int): Number of colormap steps. Default is ``256``.
Returns:
colormap: A matplotlib colormap
http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib
"""
cmap = get_cmap(cmap)
name = "%s-trunc-%.2g-%.2g" % (cmap.name, minval, maxval)
return colors.LinearSegmentedColormap.from_list(
name, cmap(np.linspace(minval, maxval, n))) | Truncates a colormap, such that the new colormap consists of
``cmap[minval:maxval]``.
If maxval is larger than minval, the truncated colormap will be reversed.
Args:
cmap (colormap): Colormap to be truncated
minval (float): Lower bound. Should be a float betwee 0 and 1.
maxval (float): Upper bound. Should be a float between 0 and 1
n (int): Number of colormap steps. Default is ``256``.
Returns:
colormap: A matplotlib colormap
http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib |
def GetPlaylists(self, start, max_count, order, reversed):
"""Gets a set of playlists.
:param int start: The index of the first playlist to be fetched
(according to the ordering).
:param int max_count: The maximum number of playlists to fetch.
:param str order: The ordering that should be used.
:param bool reversed: Whether the order should be reversed.
"""
cv = convert2dbus
return self.iface.GetPlaylists(cv(start, 'u'),
cv(max_count, 'u'),
cv(order, 's'),
cv(reversed, 'b')) | Gets a set of playlists.
:param int start: The index of the first playlist to be fetched
(according to the ordering).
:param int max_count: The maximum number of playlists to fetch.
:param str order: The ordering that should be used.
:param bool reversed: Whether the order should be reversed. |
def add(self, subj: Node, pred: URIRef, obj: Node) -> "FHIRResource":
"""
Shortcut to rdflib add function
:param subj:
:param pred:
:param obj:
:return: self for chaining
"""
self._g.add((subj, pred, obj))
return self | Shortcut to rdflib add function
:param subj:
:param pred:
:param obj:
:return: self for chaining |
def _generate_ndarray_function_code(handle, name, func_name, signature_only=False):
"""Generate function for ndarray op by handle and function name."""
real_name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
key_var_num_args = ctypes.c_char_p()
ret_type = ctypes.c_char_p()
check_call(_LIB.MXSymbolGetAtomicSymbolInfo(
handle, ctypes.byref(real_name), ctypes.byref(desc),
ctypes.byref(num_args),
ctypes.byref(arg_names),
ctypes.byref(arg_types),
ctypes.byref(arg_descs),
ctypes.byref(key_var_num_args),
ctypes.byref(ret_type)))
narg = int(num_args.value)
arg_names = [py_str(arg_names[i]) for i in range(narg)]
arg_types = [py_str(arg_types[i]) for i in range(narg)]
key_var_num_args = py_str(key_var_num_args.value)
ret_type = py_str(ret_type.value) if ret_type.value is not None else ''
doc_str = _build_doc(name,
py_str(desc.value),
arg_names,
arg_types,
[py_str(arg_descs[i]) for i in range(narg)],
key_var_num_args,
ret_type)
dtype_name = None
arr_name = None
ndsignature = []
signature = []
ndarg_names = []
kwarg_names = []
for i in range(narg):
name, atype = arg_names[i], arg_types[i]
if name == 'dtype':
dtype_name = name
signature.append('%s=_Null'%name)
elif atype.startswith('NDArray') or atype.startswith('Symbol'):
assert not arr_name, \
"Op can only have one argument with variable " \
"size and it must be the last argument."
if atype.endswith('[]'):
ndsignature.append('*%s'%name)
arr_name = name
else:
ndsignature.append('%s=None'%name)
ndarg_names.append(name)
else:
signature.append('%s=_Null'%name)
kwarg_names.append(name)
signature.append('out=None')
signature.append('name=None')
signature.append('**kwargs')
signature = ndsignature + signature
code = []
if arr_name:
code.append("""
def %s(*%s, **kwargs):"""%(func_name, arr_name))
if not signature_only:
code.append("""
ndargs = []
for i in {}:
assert isinstance(i, NDArrayBase), \\
"Positional arguments must have NDArray type, " \\
"but got %s"%str(i)
ndargs.append(i)""".format(arr_name))
if dtype_name is not None:
code.append("""
if '%s' in kwargs:
kwargs['%s'] = _np.dtype(kwargs['%s']).name"""%(
dtype_name, dtype_name, dtype_name))
code.append("""
_ = kwargs.pop('name', None)
out = kwargs.pop('out', None)
keys = list(kwargs.keys())
vals = list(kwargs.values())""")
else:
code.append("""
def %s(%s):"""%(func_name, ', '.join(signature)))
if not signature_only:
code.append("""
ndargs = []
keys = list(kwargs.keys())
vals = list(kwargs.values())""")
# NDArray args
for name in ndarg_names: # pylint: disable=redefined-argument-from-local
code.append("""
if {name} is not None:
assert isinstance({name}, NDArrayBase), \\
"Argument {name} must have NDArray type, but got %s"%str({name})
ndargs.append({name})""".format(name=name))
# kwargs
for name in kwarg_names: # pylint: disable=redefined-argument-from-local
code.append("""
if %s is not _Null:
keys.append('%s')
vals.append(%s)"""%(name, name, name))
# dtype
if dtype_name is not None:
code.append("""
if %s is not _Null:
keys.append('%s')
vals.append(_np.dtype(%s).name)"""%(dtype_name, dtype_name, dtype_name))
if not signature_only:
code.append("""
return _imperative_invoke(%d, ndargs, keys, vals, out)"""%(
handle.value))
else:
code.append("""
return (0,)""")
doc_str_lines = _os.linesep+''.join([' '+s if s.strip() else s
for s in 'r"""{doc_str}"""'.format(doc_str=doc_str)
.splitlines(True)])
code.insert(1, doc_str_lines)
return ''.join(code), doc_str | Generate function for ndarray op by handle and function name. |
def sh2(cmd):
"""Execute command in a subshell, return stdout.
Stderr is unbuffered from the subshell.x"""
p = Popen(cmd, stdout=PIPE, shell=True, env=sub_environment())
out = p.communicate()[0]
retcode = p.returncode
if retcode:
raise CalledProcessError(retcode, cmd)
else:
return out.rstrip() | Execute command in a subshell, return stdout.
Stderr is unbuffered from the subshell.x |
def deflections_from_grid(self, grid, tabulate_bins=1000):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
tabulate_bins : int
The number of bins to tabulate the inner integral of this profile.
"""
@jit_integrand
def surface_density_integrand(x, kappa_radius, scale_radius, inner_slope):
return (3 - inner_slope) * (x + kappa_radius / scale_radius) ** (inner_slope - 4) * (1 - np.sqrt(1 - x * x))
def calculate_deflection_component(npow, index):
deflection_grid = 2.0 * self.kappa_s * self.axis_ratio * grid[:, index]
deflection_grid *= quad_grid(self.deflection_func, 0.0, 1.0,
grid, args=(npow, self.axis_ratio, minimum_log_eta, maximum_log_eta,
tabulate_bins, surface_density_integral),
epsrel=EllipticalGeneralizedNFW.epsrel)[0]
return deflection_grid
eta_min, eta_max, minimum_log_eta, maximum_log_eta, bin_size = self.tabulate_integral(grid, tabulate_bins)
surface_density_integral = np.zeros((tabulate_bins,))
for i in range(tabulate_bins):
eta = 10. ** (minimum_log_eta + (i - 1) * bin_size)
integral = quad(surface_density_integrand, a=0.0, b=1.0, args=(eta, self.scale_radius,
self.inner_slope),
epsrel=EllipticalGeneralizedNFW.epsrel)[0]
surface_density_integral[i] = ((eta / self.scale_radius) ** (1 - self.inner_slope)) * \
(((1 + eta / self.scale_radius) ** (self.inner_slope - 3)) + integral)
deflection_y = calculate_deflection_component(1.0, 0)
deflection_x = calculate_deflection_component(0.0, 1)
return self.rotate_grid_from_profile(np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T)) | Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
tabulate_bins : int
The number of bins to tabulate the inner integral of this profile. |
def flag(val):
"""Does the value look like an on/off flag?"""
if val == 1:
return True
elif val == 0:
return False
val = str(val)
if len(val) > 5:
return False
return val.upper() in ('1', '0', 'F', 'T', 'TRUE', 'FALSE', 'ON', 'OFF') | Does the value look like an on/off flag? |
def on_bindok(self, unused_frame):
"""
This method is invoked by pika when it receives the Queue.BindOk
response from RabbitMQ.
"""
self._logger.info('Queue bound')
while not self._stopping:
# perform the action that publishes on this client
self.producer(self)
self._logger.info("producer done") | This method is invoked by pika when it receives the Queue.BindOk
response from RabbitMQ. |
def get_traindata(self) -> np.ndarray:
"""
Pulls all available data and concatenates for model training
:return: 2d array of points
"""
traindata = None
for key, value in self.data.items():
if key not in ['__header__', '__version__', '__globals__']:
if traindata is None:
traindata = value[np.where(value[:, 4] != 0)]
else:
traindata = np.concatenate((traindata, value[np.where(value[:, 4] != 0)]))
return traindata | Pulls all available data and concatenates for model training
:return: 2d array of points |
def get_class_name(self):
"""
Return the class name of the field
:rtype: string
"""
if self.class_idx_value is None:
self.class_idx_value = self.CM.get_type(self.class_idx)
return self.class_idx_value | Return the class name of the field
:rtype: string |
def make_full_url(request, url):
"""Get a relative URL and returns the absolute version.
Eg: “/foo/bar?q=is-open” ==> “http://example.com/foo/bar?q=is-open”
"""
urlparts = request.urlparts
return '{scheme}://{site}/{url}'.format(
scheme=urlparts.scheme,
site=get_site_name(request),
url=url.lstrip('/'),
) | Get a relative URL and returns the absolute version.
Eg: “/foo/bar?q=is-open” ==> “http://example.com/foo/bar?q=is-open” |
def _get_descriptors(self):
"""Returns three elements tuple with socket descriptors ready
for gevent.select.select
"""
rlist = []
wlist = []
xlist = []
for socket, flags in self.sockets.items():
if isinstance(socket, zmq.Socket):
rlist.append(socket.getsockopt(zmq.FD))
continue
elif isinstance(socket, int):
fd = socket
elif hasattr(socket, 'fileno'):
try:
fd = int(socket.fileno())
except:
raise ValueError('fileno() must return an valid integer fd')
else:
raise TypeError('Socket must be a 0MQ socket, an integer fd '
'or have a fileno() method: %r' % socket)
if flags & zmq.POLLIN:
rlist.append(fd)
if flags & zmq.POLLOUT:
wlist.append(fd)
if flags & zmq.POLLERR:
xlist.append(fd)
return (rlist, wlist, xlist) | Returns three elements tuple with socket descriptors ready
for gevent.select.select |
def zip(self, *others):
"""
Zip the items of this collection with one or more
other sequences, and wrap the result.
Unlike Python's zip, all sequences must be the same length.
Parameters:
others: One or more iterables or Collections
Returns:
A new collection.
Examples:
>>> c1 = Collection([Scalar(1), Scalar(2)])
>>> c2 = Collection([Scalar(3), Scalar(4)])
>>> c1.zip(c2).val()
[(1, 3), (2, 4)]
"""
args = [_unwrap(item) for item in (self,) + others]
ct = self.count()
if not all(len(arg) == ct for arg in args):
raise ValueError("Arguments are not all the same length")
return Collection(map(Wrapper.wrap, zip(*args))) | Zip the items of this collection with one or more
other sequences, and wrap the result.
Unlike Python's zip, all sequences must be the same length.
Parameters:
others: One or more iterables or Collections
Returns:
A new collection.
Examples:
>>> c1 = Collection([Scalar(1), Scalar(2)])
>>> c2 = Collection([Scalar(3), Scalar(4)])
>>> c1.zip(c2).val()
[(1, 3), (2, 4)] |
def has_permission(self, request, view):
"""Check list and create permissions based on sign and filters."""
if view.suffix == 'Instance':
return True
filter_and_actions = self._get_filter_and_actions(
request.query_params.get('sign'),
view.action,
'{}.{}'.format(
view.queryset.model._meta.app_label,
view.queryset.model._meta.model_name
)
)
if not filter_and_actions:
return False
if request.method == 'POST':
for key, value in request.data.iteritems():
# Do unicode conversion because value will always be a
# string
if (key in filter_and_actions['filters'] and not
unicode(filter_and_actions['filters'][key]) == unicode(value)):
return False
return True | Check list and create permissions based on sign and filters. |
def loop(self, timeout=1.0, max_packets=1):
"""Process network events.
This function must be called regularly to ensure communication with the
broker is carried out. It calls select() on the network socket to wait
for network events. If incoming data is present it will then be
processed. Outgoing commands, from e.g. publish(), are normally sent
immediately that their function is called, but this is not always
possible. loop() will also attempt to send any remaining outgoing
messages, which also includes commands that are part of the flow for
messages with QoS>0.
timeout: The time in seconds to wait for incoming/outgoing network
traffic before timing out and returning.
max_packets: Not currently used.
Returns MQTT_ERR_SUCCESS on success.
Returns >0 on error.
A ValueError will be raised if timeout < 0"""
if timeout < 0.0:
raise ValueError('Invalid timeout.')
self._current_out_packet_mutex.acquire()
self._out_packet_mutex.acquire()
if self._current_out_packet is None and len(self._out_packet) > 0:
self._current_out_packet = self._out_packet.pop(0)
if self._current_out_packet:
wlist = [self.socket()]
else:
wlist = []
self._out_packet_mutex.release()
self._current_out_packet_mutex.release()
# sockpairR is used to break out of select() before the timeout, on a
# call to publish() etc.
rlist = [self.socket(), self._sockpairR]
try:
socklist = select.select(rlist, wlist, [], timeout)
except TypeError as e:
# Socket isn't correct type, in likelihood connection is lost
return MQTT_ERR_CONN_LOST
except ValueError:
# Can occur if we just reconnected but rlist/wlist contain a -1 for
# some reason.
return MQTT_ERR_CONN_LOST
except:
return MQTT_ERR_UNKNOWN
if self.socket() in socklist[0]:
rc = self.loop_read(max_packets)
if rc or (self._ssl is None and self._sock is None):
return rc
if self._sockpairR in socklist[0]:
# Stimulate output write even though we didn't ask for it, because
# at that point the publish or other command wasn't present.
socklist[1].insert(0, self.socket())
# Clear sockpairR - only ever a single byte written.
try:
self._sockpairR.recv(1)
except socket.error as err:
if err.errno != EAGAIN:
raise
if self.socket() in socklist[1]:
rc = self.loop_write(max_packets)
if rc or (self._ssl is None and self._sock is None):
return rc
return self.loop_misc() | Process network events.
This function must be called regularly to ensure communication with the
broker is carried out. It calls select() on the network socket to wait
for network events. If incoming data is present it will then be
processed. Outgoing commands, from e.g. publish(), are normally sent
immediately that their function is called, but this is not always
possible. loop() will also attempt to send any remaining outgoing
messages, which also includes commands that are part of the flow for
messages with QoS>0.
timeout: The time in seconds to wait for incoming/outgoing network
traffic before timing out and returning.
max_packets: Not currently used.
Returns MQTT_ERR_SUCCESS on success.
Returns >0 on error.
A ValueError will be raised if timeout < 0 |
def main():
'''main entry'''
cli = docker.from_env()
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "pcv", ["pretty", "compose"])
except getopt.GetoptError as _:
print("Usage: docker-parse [--pretty|-p|--compose|-c] [containers]")
sys.exit(2)
if len(args) == 0:
containers = cli.containers.list(all=True)
else:
containers = map(lambda nm: cli.containers.get(nm), args)
as_compose = False
pretty = False
for opt, _ in opts:
if opt == '-v':
print(__version__)
sys.exit()
elif opt == '-p' or opt == '--pretty':
pretty = True
break
elif opt == '-c' or opt == '--compose':
as_compose = True
break
for container in containers:
info = container.attrs
# diff with image info to reduce information
image_info = cli.images.get(info['Config']['Image']).attrs
if as_compose:
output_compose(info, image_info)
else:
output_command(info, image_info, pretty) | main entry |
def _get_args_contents(self):
"""
Mimic the argument formatting behaviour of
ActionBase._execute_module().
"""
return ' '.join(
'%s=%s' % (key, shlex_quote(str(self.args[key])))
for key in self.args
) + ' ' | Mimic the argument formatting behaviour of
ActionBase._execute_module(). |
def area(self, chord_length=1e-4):
"""Find area enclosed by path.
Approximates any Arc segments in the Path with lines
approximately `chord_length` long, and returns the area enclosed
by the approximated Path. Default chord length is 0.01. If Arc
segments are included in path, to ensure accurate results, make
sure this `chord_length` is set to a reasonable value (e.g. by
checking curvature).
Notes
-----
* Negative area results from clockwise (as opposed to
counter-clockwise) parameterization of the input Path.
To Contributors
---------------
This is one of many parts of `svgpathtools` that could be
improved by a noble soul implementing a piecewise-linear
approximation scheme for paths (one with controls to guarantee a
desired accuracy).
"""
def area_without_arcs(path):
area_enclosed = 0
for seg in path:
x = real(seg.poly())
dy = imag(seg.poly()).deriv()
integrand = x*dy
integral = integrand.integ()
area_enclosed += integral(1) - integral(0)
return area_enclosed
def seg2lines(seg):
"""Find piecewise-linear approximation of `seg`."""
num_lines = int(ceil(seg.length() / chord_length))
pts = [seg.point(t) for t in np.linspace(0, 1, num_lines+1)]
return [Line(pts[i], pts[i+1]) for i in range(num_lines)]
assert self.isclosed()
bezier_path_approximation = []
for seg in self:
if isinstance(seg, Arc):
bezier_path_approximation += seg2lines(seg)
else:
bezier_path_approximation.append(seg)
return area_without_arcs(Path(*bezier_path_approximation)) | Find area enclosed by path.
Approximates any Arc segments in the Path with lines
approximately `chord_length` long, and returns the area enclosed
by the approximated Path. Default chord length is 0.01. If Arc
segments are included in path, to ensure accurate results, make
sure this `chord_length` is set to a reasonable value (e.g. by
checking curvature).
Notes
-----
* Negative area results from clockwise (as opposed to
counter-clockwise) parameterization of the input Path.
To Contributors
---------------
This is one of many parts of `svgpathtools` that could be
improved by a noble soul implementing a piecewise-linear
approximation scheme for paths (one with controls to guarantee a
desired accuracy). |
def modify(self, service_name, json, **kwargs):
"""Modify an AppNexus object"""
return self._send(requests.put, service_name, json, **kwargs) | Modify an AppNexus object |
def deserialize_profile(profile, key_prefix='', pop=False):
"""De-serialize user profile fields into concrete model fields."""
result = {}
if pop:
getter = profile.pop
else:
getter = profile.get
def prefixed(name):
"""Return name prefixed by `key_prefix`."""
return '%s%s' % (key_prefix, name)
for key in profile.keys():
val = getter(key)
if key == prefixed('name'):
result['full_name'] = val
else:
raise MeteorError(400, 'Bad profile key: %r' % key)
return result | De-serialize user profile fields into concrete model fields. |
async def presentProof(self, proofRequest: ProofRequest) -> FullProof:
"""
Presents a proof to the verifier.
:param proofRequest: description of a proof to be presented (revealed
attributes, predicates, timestamps for non-revocation)
:return: a proof (both primary and non-revocation) and revealed attributes (initial non-encoded values)
"""
claims, requestedProof = await self._findClaims(proofRequest)
proof = await self._prepareProof(claims, proofRequest.nonce, requestedProof)
return proof | Presents a proof to the verifier.
:param proofRequest: description of a proof to be presented (revealed
attributes, predicates, timestamps for non-revocation)
:return: a proof (both primary and non-revocation) and revealed attributes (initial non-encoded values) |
def process_stats(self, stats, prefix, metric_categories, nested_tags, tags, recursion_level=0):
"""
The XML will have Stat Nodes and Nodes that contain the metrics themselves
This code recursively goes through each Stat Node to properly setup tags
where each Stat will have a different tag key depending on the context.
"""
for child in stats:
if child.tag in metrics.METRIC_VALUE_FIELDS:
self.submit_metrics(child, prefix, tags)
elif child.tag in metrics.CATEGORY_FIELDS:
recursion_tags = tags + ["{}:{}".format(nested_tags.get(prefix)[recursion_level], child.get('name'))]
self.process_stats(child, prefix, metric_categories, nested_tags, recursion_tags, recursion_level + 1) | The XML will have Stat Nodes and Nodes that contain the metrics themselves
This code recursively goes through each Stat Node to properly setup tags
where each Stat will have a different tag key depending on the context. |
def depth(self, value):
""" Update ourself and any of our subcommands. """
for command in self.subcommands.values():
command.depth = value + 1
del command.argparser._defaults[self.arg_label_fmt % self._depth]
command.argparser._defaults[self.arg_label_fmt % value] = command
self._depth = value | Update ourself and any of our subcommands. |
def _make_pheno_assoc(
self, graph, gene_id, disorder_num, disorder_label, phene_key
):
"""
From the docs:
Brackets, "[ ]", indicate "nondiseases," mainly genetic variations
that lead to apparently abnormal laboratory test values
(e.g., dysalbuminemic euthyroidal hyperthyroxinemia).
Braces, "{ }", indicate mutations that contribute to susceptibility
to multifactorial disorders (e.g., diabetes, asthma) or to
susceptibility to infection (e.g., malaria).
A question mark, "?", before the phenotype name indicates that the
relationship between the phenotype and gene is provisional.
More details about this relationship are provided in the comment
field of the map and in the gene and phenotype OMIM entries.
Phene key:
The number in parentheses after the name of each disorder indicates
the following:
(1) the disorder was positioned by mapping of the wildtype gene;
(2) the disease phenotype itself was mapped;
(3) the molecular basis of the disorder is known;
(4) the disorder is a chromosome deletion or duplication syndrome.
reference: https://omim.org/help/faq#1_6
:param graph: graph object of type dipper.graph.Graph
:param gene_id: str, gene id as curie
:param gene_symbol: str, symbol
:param disorder_num: str, disorder id
:param disorder_label: str, disorder label
:param phene_key: int or str, 1-4, see docstring
:return:
"""
disorder_id = ':'.join(('OMIM', disorder_num))
rel_label = 'causes condition'
rel_id = self.globaltt[rel_label]
if disorder_label.startswith('['):
rel_id = self.globaltt['is marker for']
# rel_label = 'is a marker for'
elif disorder_label.startswith('{'):
rel_id = self.globaltt['contributes to']
# rel_label = 'contributes to'
elif disorder_label.startswith('?'):
# this is a questionable mapping! skip?
rel_id = self.globaltt['contributes to']
assoc = G2PAssoc(graph, self.name, gene_id, disorder_id, rel_id)
if phene_key is not None:
evidence = self.resolve(phene_key, False)
if evidence != phene_key:
assoc.add_evidence(evidence) # evidence is Found
assoc.add_association_to_graph() | From the docs:
Brackets, "[ ]", indicate "nondiseases," mainly genetic variations
that lead to apparently abnormal laboratory test values
(e.g., dysalbuminemic euthyroidal hyperthyroxinemia).
Braces, "{ }", indicate mutations that contribute to susceptibility
to multifactorial disorders (e.g., diabetes, asthma) or to
susceptibility to infection (e.g., malaria).
A question mark, "?", before the phenotype name indicates that the
relationship between the phenotype and gene is provisional.
More details about this relationship are provided in the comment
field of the map and in the gene and phenotype OMIM entries.
Phene key:
The number in parentheses after the name of each disorder indicates
the following:
(1) the disorder was positioned by mapping of the wildtype gene;
(2) the disease phenotype itself was mapped;
(3) the molecular basis of the disorder is known;
(4) the disorder is a chromosome deletion or duplication syndrome.
reference: https://omim.org/help/faq#1_6
:param graph: graph object of type dipper.graph.Graph
:param gene_id: str, gene id as curie
:param gene_symbol: str, symbol
:param disorder_num: str, disorder id
:param disorder_label: str, disorder label
:param phene_key: int or str, 1-4, see docstring
:return: |
def _recon_lcs(x, y):
"""
Returns the Longest Subsequence between x and y.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns:
sequence: LCS of x and y
"""
i, j = len(x), len(y)
table = _lcs(x, y)
def _recon(i, j):
"""private recon calculation"""
if i == 0 or j == 0:
return []
elif x[i - 1] == y[j - 1]:
return _recon(i - 1, j - 1) + [(x[i - 1], i)]
elif table[i - 1, j] > table[i, j - 1]:
return _recon(i - 1, j)
else:
return _recon(i, j - 1)
recon_tuple = tuple(map(lambda x: x[0], _recon(i, j)))
return recon_tuple | Returns the Longest Subsequence between x and y.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns:
sequence: LCS of x and y |
def get_instance(cls, dependencies=None):
"""
Return an instance for a contract name.
:param dependencies:
:return: Contract base instance
"""
assert cls is not ContractBase, 'ContractBase is not meant to be used directly.'
assert cls.CONTRACT_NAME, 'CONTRACT_NAME must be set to a valid keeper contract name.'
return cls(cls.CONTRACT_NAME, dependencies) | Return an instance for a contract name.
:param dependencies:
:return: Contract base instance |
Subsets and Splits