code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def filter_by_analysis_period(self, analysis_period):
"""Filter the Data Collection based on an analysis period.
Args:
analysis period: A Ladybug analysis period
Return:
A new Data Collection with filtered data
"""
_filtered_data = self.filter_by_months(analysis_period.months_int)
_filtered_data.header._analysis_period = analysis_period
return _filtered_data | Filter the Data Collection based on an analysis period.
Args:
analysis period: A Ladybug analysis period
Return:
A new Data Collection with filtered data |
def runDeferred(self, deferred):
"""
Run the callables in C{deferred} using their associated scope stack.
"""
for handler, scope, offset in deferred:
self.scopeStack = scope
self.offset = offset
handler() | Run the callables in C{deferred} using their associated scope stack. |
def initialize_weights(self):
"""Randomly initializes the visible-to-hidden connections."""
n = self._outputSize
m = self._inputSize
self._Q = self._random.sample((n,m))
# Normalize the weights of each units
for i in range(n):
self._Q[i] /= np.sqrt( np.dot(self._Q[i], self._Q[i]) ) | Randomly initializes the visible-to-hidden connections. |
def discard(self, value):
"""Remove element *value* from the set if it is present."""
# Raise TypeError if value is not hashable
hash(value)
self.redis.srem(self.key, self._pickle(value)) | Remove element *value* from the set if it is present. |
def _check_d1_characters(name):
# type: (bytes) -> None
'''
A function to check that a name only uses d1 characters as defined by ISO9660.
Parameters:
name - The name to check.
Returns:
Nothing.
'''
bytename = bytearray(name)
for char in bytename:
if char not in _allowed_d1_characters:
raise pycdlibexception.PyCdlibInvalidInput('ISO9660 filenames must consist of characters A-Z, 0-9, and _') | A function to check that a name only uses d1 characters as defined by ISO9660.
Parameters:
name - The name to check.
Returns:
Nothing. |
def controller(self):
"""
Check if multiple controllers are connected.
:returns: Return the controller_id of the active controller.
:rtype: string
"""
if hasattr(self, 'controller_id'):
if len(self.controller_info['controllers']) > 1:
raise TypeError(
'Only one controller per account is supported.'
)
return self.controller_id
raise AttributeError('No controllers assigned to this account.') | Check if multiple controllers are connected.
:returns: Return the controller_id of the active controller.
:rtype: string |
def _get_bucket(self, bucket_name):
'''get a bucket based on a bucket name. If it doesn't exist, create it.
Parameters
==========
bucket_name: the name of the bucket to get (or create). It should
not contain google, and should be all lowercase with -
or underscores.
'''
# Case 1: The bucket already exists
try:
bucket = self._bucket_service.get_bucket(bucket_name)
# Case 2: The bucket needs to be created
except google.cloud.exceptions.NotFound:
bucket = self._bucket_service.create_bucket(bucket_name)
# Case 3: The bucket name is already taken
except:
bot.error('Cannot get or create %s' % bucket_name)
sys.exit(1)
return bucket | get a bucket based on a bucket name. If it doesn't exist, create it.
Parameters
==========
bucket_name: the name of the bucket to get (or create). It should
not contain google, and should be all lowercase with -
or underscores. |
def listen(self, event):
"""Request that the Controller listen for and dispatch an event.
Note: Even if the module that requested the listening is later
unloaded, the Controller will continue to dispatch the event, there
just might not be anything that cares about it. That's okay.
"""
if event in self.registered:
# Already listening to this event
return
def handler(client, *args):
return self.process_event(event, client, args)
self.client.add_handler(event, handler)
self.registered.add(event)
_log.debug("Controller is now listening for '%s' events", event) | Request that the Controller listen for and dispatch an event.
Note: Even if the module that requested the listening is later
unloaded, the Controller will continue to dispatch the event, there
just might not be anything that cares about it. That's okay. |
def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
observed=False, mutated=False, validate=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
If observed & we have a categorical grouper, only show the observed
values
If validate, then check for key/level overlaps
"""
group_axis = obj._get_axis(axis)
# validate that the passed single level is compatible with the passed
# axis of the object
if level is not None:
# TODO: These if-block and else-block are almost same.
# MultiIndex instance check is removable, but it seems that there are
# some processes only for non-MultiIndex in else-block,
# eg. `obj.index.name != level`. We have to consider carefully whether
# these are applicable for MultiIndex. Even if these are applicable,
# we need to check if it makes no side effect to subsequent processes
# on the outside of this condition.
# (GH 17621)
if isinstance(group_axis, MultiIndex):
if is_list_like(level) and len(level) == 1:
level = level[0]
if key is None and is_scalar(level):
# Get the level values from group_axis
key = group_axis.get_level_values(level)
level = None
else:
# allow level to be a length-one list-like object
# (e.g., level=[0])
# GH 13901
if is_list_like(level):
nlevels = len(level)
if nlevels == 1:
level = level[0]
elif nlevels == 0:
raise ValueError('No group keys passed!')
else:
raise ValueError('multiple levels only valid with '
'MultiIndex')
if isinstance(level, str):
if obj.index.name != level:
raise ValueError('level name {} is not the name of the '
'index'.format(level))
elif level > 0 or level < -1:
raise ValueError(
'level > 0 or level < -1 only valid with MultiIndex')
# NOTE: `group_axis` and `group_axis.get_level_values(level)`
# are same in this section.
level = None
key = group_axis
# a passed-in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj, validate=False)
if key.key is None:
return grouper, [], obj
else:
return grouper, {key.key}, obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
# In the future, a tuple key will always mean an actual key,
# not an iterable of keys. In the meantime, we attempt to provide
# a warning. We can assume that the user wanted a list of keys when
# the key is not in the index. We just have to be careful with
# unhashble elements of `key`. Any unhashable elements implies that
# they wanted a list of keys.
# https://github.com/pandas-dev/pandas/issues/18314
is_tuple = isinstance(key, tuple)
all_hashable = is_tuple and is_hashable(key)
if is_tuple:
if ((all_hashable and key not in obj and set(key).issubset(obj))
or not all_hashable):
# column names ('a', 'b') -> ['a', 'b']
# arrays like (a, b) -> [a, b]
msg = ("Interpreting tuple 'by' as a list of keys, rather than "
"a single key. Use 'by=[...]' instead of 'by=(...)'. In "
"the future, a tuple will always mean a single key.")
warnings.warn(msg, FutureWarning, stacklevel=5)
key = list(key)
if not isinstance(key, list):
keys = [key]
match_axis_length = False
else:
keys = key
match_axis_length = len(keys) == len(group_axis)
# what are we after, exactly?
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_groupers = any(isinstance(g, Grouper) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
# is this an index replacement?
if (not any_callable and not any_arraylike and not any_groupers and
match_axis_length and level is None):
if isinstance(obj, DataFrame):
all_in_columns_index = all(g in obj.columns or g in
obj.index.names for g in keys)
elif isinstance(obj, Series):
all_in_columns_index = all(g in obj.index.names for g in keys)
if not all_in_columns_index:
keys = [com.asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
# if the actual grouper should be obj[key]
def is_in_axis(key):
if not _is_label_like(key):
try:
obj._data.items.get_loc(key)
except Exception:
return False
return True
# if the grouper is obj[name]
def is_in_obj(gpr):
try:
return id(gpr) == id(obj[gpr.name])
except Exception:
return False
for i, (gpr, level) in enumerate(zip(keys, levels)):
if is_in_obj(gpr): # df.groupby(df['name'])
in_axis, name = True, gpr.name
exclusions.append(name)
elif is_in_axis(gpr): # df.groupby('name')
if gpr in obj:
if validate:
obj._check_label_or_level_ambiguity(gpr)
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
elif obj._is_level_reference(gpr):
in_axis, name, level, gpr = False, None, gpr, None
else:
raise KeyError(gpr)
elif isinstance(gpr, Grouper) and gpr.key is not None:
# Add key to exclusions
exclusions.append(gpr.key)
in_axis, name = False, None
else:
in_axis, name = False, None
if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]:
raise ValueError(
("Length of grouper ({len_gpr}) and axis ({len_axis})"
" must be same length"
.format(len_gpr=len(gpr), len_axis=obj.shape[axis])))
# create the Grouping
# allow us to passing the actual Grouping as the gpr
ping = (Grouping(group_axis,
gpr,
obj=obj,
name=name,
level=level,
sort=sort,
observed=observed,
in_axis=in_axis)
if not isinstance(gpr, Grouping) else gpr)
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated)
return grouper, exclusions, obj | create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
If observed & we have a categorical grouper, only show the observed
values
If validate, then check for key/level overlaps |
def _prodterm_prime(lexer):
"""Return a product term' expression, eliminates left recursion."""
tok = next(lexer)
# '&' FACTOR PRODTERM'
if isinstance(tok, OP_and):
factor = _factor(lexer)
prodterm_prime = _prodterm_prime(lexer)
if prodterm_prime is None:
return factor
else:
return ('and', factor, prodterm_prime)
# null
else:
lexer.unpop_token(tok)
return None | Return a product term' expression, eliminates left recursion. |
def get_wcs(self, data_x, data_y):
"""Return (re_deg, dec_deg) for the (data_x, data_y) position
based on any WCS associated with the loaded image.
"""
img = self.fitsimage.get_image()
ra, dec = img.pixtoradec(data_x, data_y)
return ra, dec | Return (re_deg, dec_deg) for the (data_x, data_y) position
based on any WCS associated with the loaded image. |
def set_table_cb(self, viewer, table):
"""Display the given table object."""
self.clear()
tree_dict = OrderedDict()
# Extract data as astropy table
a_tab = table.get_data()
# Fill masked values, if applicable
try:
a_tab = a_tab.filled()
except Exception: # Just use original table
pass
# This is to get around table widget not sorting numbers properly
i_fmt = '{{0:0{0}d}}'.format(len(str(len(a_tab))))
# Table header with units
columns = [('Row', '_DISPLAY_ROW')]
for c in a_tab.columns.values():
col_str = '{0:^s}\n{1:^s}'.format(c.name, str(c.unit))
columns.append((col_str, c.name))
self.widget.setup_table(columns, 1, '_DISPLAY_ROW')
# Table contents
for i, row in enumerate(a_tab, 1):
bnch = Bunch.Bunch(zip(row.colnames, row.as_void()))
i_str = i_fmt.format(i)
bnch['_DISPLAY_ROW'] = i_str
tree_dict[i_str] = bnch
self.widget.set_tree(tree_dict)
# Resize column widths
n_rows = len(tree_dict)
if n_rows < self.settings.get('max_rows_for_col_resize', 5000):
self.widget.set_optimal_column_widths()
self.logger.debug('Resized columns for {0} row(s)'.format(n_rows))
tablename = table.get('name', 'NoName')
self.logger.debug('Displayed {0}'.format(tablename)) | Display the given table object. |
def disable_svc_notifications(self, service):
"""Disable notifications for a service
Format of the line that triggers function call::
DISABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
"""
if service.notifications_enabled:
service.modified_attributes |= \
DICT_MODATTR["MODATTR_NOTIFICATIONS_ENABLED"].value
service.notifications_enabled = False
self.send_an_element(service.get_update_status_brok()) | Disable notifications for a service
Format of the line that triggers function call::
DISABLE_SVC_NOTIFICATIONS;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None |
def list_resources(self, device_id):
"""List all resources registered to a connected device.
.. code-block:: python
>>> for r in api.list_resources(device_id):
print(r.name, r.observable, r.uri)
None,True,/3/0/1
Update,False,/5/0/3
...
:param str device_id: The ID of the device (Required)
:returns: A list of :py:class:`Resource` objects for the device
:rtype: list
"""
api = self._get_api(mds.EndpointsApi)
return [Resource(r) for r in api.get_endpoint_resources(device_id)] | List all resources registered to a connected device.
.. code-block:: python
>>> for r in api.list_resources(device_id):
print(r.name, r.observable, r.uri)
None,True,/3/0/1
Update,False,/5/0/3
...
:param str device_id: The ID of the device (Required)
:returns: A list of :py:class:`Resource` objects for the device
:rtype: list |
def run_model(self, model_run, run_url):
"""Run model by sending message to RabbitMQ queue containing the
run end experiment identifier. Messages are persistent to ensure that
a worker will process process the run request at some point.
Throws a EngineException if communication with the server fails.
Parameters
----------
model_run : ModelRunHandle
Handle to model run
run_url : string
URL for model run information
"""
# Open connection to RabbitMQ server. Will raise an exception if the
# server is not running. In this case we raise an EngineException to
# allow caller to delete model run.
try:
credentials = pika.PlainCredentials(self.user, self.password)
con = pika.BlockingConnection(pika.ConnectionParameters(
host=self.host,
port=self.port,
virtual_host=self.virtual_host,
credentials=credentials
))
channel = con.channel()
channel.queue_declare(queue=self.queue, durable=True)
except pika.exceptions.AMQPError as ex:
err_msg = str(ex)
if err_msg == '':
err_msg = 'unable to connect to RabbitMQ: ' + self.user + '@'
err_msg += self.host + ':' + str(self.port)
err_msg += self.virtual_host + ' ' + self.queue
raise EngineException(err_msg, 500)
# Create model run request
request = RequestFactory().get_request(model_run, run_url)
# Send request
channel.basic_publish(
exchange='',
routing_key=self.queue,
body=json.dumps(request.to_dict()),
properties=pika.BasicProperties(
delivery_mode = 2, # make message persistent
)
)
con.close() | Run model by sending message to RabbitMQ queue containing the
run end experiment identifier. Messages are persistent to ensure that
a worker will process process the run request at some point.
Throws a EngineException if communication with the server fails.
Parameters
----------
model_run : ModelRunHandle
Handle to model run
run_url : string
URL for model run information |
def warning(self, *msg):
"""
Prints a warning
"""
label = colors.yellow("WARNING")
self._msg(label, *msg) | Prints a warning |
def endpoints(self):
"""
Gets the Endpoints API client.
Returns:
Endpoints:
"""
if not self.__endpoints:
self.__endpoints = Endpoints(self.__connection)
return self.__endpoints | Gets the Endpoints API client.
Returns:
Endpoints: |
def lithospheric_stress(step, trench, ridge, time):
"""calculate stress in the lithosphere"""
timestep = step.isnap
base_lith = step.geom.rcmb + 1 - 0.105
stressfld = step.fields['sII'][0, :, :, 0]
stressfld = np.ma.masked_where(step.geom.r_mesh[0] < base_lith, stressfld)
# stress integration in the lithosphere
dzm = (step.geom.r_coord[1:] - step.geom.r_coord[:-1])
stress_lith = np.sum((stressfld[:, 1:] * dzm.T), axis=1)
ph_coord = step.geom.p_coord # probably doesn't need alias
# plot stress in the lithosphere
fig, axis, _, _ = field.plot_scalar(step, 'sII', stressfld,
cmap='plasma_r', vmin=0, vmax=300)
# Annotation with time and step
axis.text(1., 0.9, str(round(time, 0)) + ' My', transform=axis.transAxes)
axis.text(1., 0.1, str(timestep), transform=axis.transAxes)
misc.saveplot(fig, 'lith', timestep)
# velocity
vphi = step.fields['v2'][0, :, :, 0]
vph2 = 0.5 * (vphi + np.roll(vphi, 1, 0)) # interpolate to the same phi
# position of continents
concfld = step.fields['c'][0, :, :, 0]
if step.sdat.par['boundaries']['air_layer']:
# we are a bit below the surface; delete "-some number"
# to be just below
dsa = step.sdat.par['boundaries']['air_thickness']
# depth to detect the continents
indcont = np.argmin(abs((1 - dsa) - step.geom.r_coord)) - 10
else:
# depth to detect continents
indcont = -1
if step.sdat.par['boundaries']['air_layer'] and\
not step.sdat.par['continents']['proterozoic_belts']:
continents = np.ma.masked_where(
np.logical_or(concfld[:-1, indcont] < 3,
concfld[:-1, indcont] > 4),
concfld[:-1, indcont])
elif step.sdat.par['boundaries']['air_layer'] and\
step.sdat.par['continents']['proterozoic_belts']:
continents = np.ma.masked_where(
np.logical_or(concfld[:-1, indcont] < 3,
concfld[:-1, indcont] > 5),
concfld[:-1, indcont])
elif step.sdat.par['tracersin']['tracers_weakcrust']:
continents = np.ma.masked_where(
concfld[:-1, indcont] < 3, concfld[:-1, indcont])
else:
continents = np.ma.masked_where(
concfld[:-1, indcont] < 2, concfld[:-1, indcont])
# masked array, only continents are true
continentsall = continents / continents
# plot integrated stress in the lithosphere
fig0, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
ax1.plot(ph_coord[:-1], vph2[:-1, -1], label='Vel')
ax1.axhline(y=0, xmin=0, xmax=2 * np.pi,
color='black', ls='solid', alpha=0.2)
ax1.set_ylabel("Velocity")
ax1.text(0.95, 1.07, str(round(time, 0)) + ' My',
transform=ax1.transAxes)
ax1.text(0.01, 1.07, str(round(step.geom.ti_ad, 8)),
transform=ax1.transAxes)
intstr_scale = step.sdat.scales.stress * step.sdat.scales.length / 1.e12
ax2.plot(ph_coord, stress_lith * intstr_scale, color='k', label='Stress')
ax2.set_ylabel(r"Integrated stress [$TN\,m^{-1}$]")
plot_plate_limits(ax1, ridge, trench, conf.plates.vmin,
conf.plates.vmax)
plot_plate_limits(ax2, ridge, trench, conf.plates.stressmin,
conf.plates.lstressmax)
ax1.set_xlim(0, 2 * np.pi)
ax1.set_title(timestep)
ax1.fill_between(
ph_coord[:-1], continentsall * conf.plates.vmin,
conf.plates.vmax, facecolor='#8b6914', alpha=0.2)
ax1.set_ylim(conf.plates.vmin, conf.plates.vmax)
ax2.fill_between(
ph_coord[:-1], continentsall * conf.plates.stressmin,
conf.plates.lstressmax, facecolor='#8b6914', alpha=0.2)
ax2.set_ylim(conf.plates.stressmin, conf.plates.lstressmax)
misc.saveplot(fig0, 'svelslith', timestep) | calculate stress in the lithosphere |
def collect(self):
"""Publish all mdstat metrics."""
def traverse(d, metric_name=''):
"""
Traverse the given nested dict using depth-first search.
If a value is reached it will be published with a metric name
consisting of the hierarchically concatenated keys
of its branch.
"""
for key, value in d.iteritems():
if isinstance(value, dict):
if metric_name == '':
metric_name_next = key
else:
metric_name_next = metric_name + '.' + key
traverse(value, metric_name_next)
else:
metric_name_finished = metric_name + '.' + key
self.publish_gauge(
name=metric_name_finished,
value=value,
precision=1
)
md_state = self._parse_mdstat()
traverse(md_state, '') | Publish all mdstat metrics. |
def aggregationToMonthsSeconds(interval):
"""
Return the number of months and seconds from an aggregation dict that
represents a date and time.
Interval is a dict that contain one or more of the following keys: 'years',
'months', 'weeks', 'days', 'hours', 'minutes', seconds', 'milliseconds',
'microseconds'.
For example:
::
aggregationMicroseconds({'years': 1, 'hours': 4, 'microseconds':42}) ==
{'months':12, 'seconds':14400.000042}
:param interval: (dict) The aggregation interval representing a date and time
:returns: (dict) number of months and seconds in the interval:
``{months': XX, 'seconds': XX}``. The seconds is
a floating point that can represent resolutions down to a
microsecond.
"""
seconds = interval.get('microseconds', 0) * 0.000001
seconds += interval.get('milliseconds', 0) * 0.001
seconds += interval.get('seconds', 0)
seconds += interval.get('minutes', 0) * 60
seconds += interval.get('hours', 0) * 60 * 60
seconds += interval.get('days', 0) * 24 * 60 * 60
seconds += interval.get('weeks', 0) * 7 * 24 * 60 * 60
months = interval.get('months', 0)
months += 12 * interval.get('years', 0)
return {'months': months, 'seconds': seconds} | Return the number of months and seconds from an aggregation dict that
represents a date and time.
Interval is a dict that contain one or more of the following keys: 'years',
'months', 'weeks', 'days', 'hours', 'minutes', seconds', 'milliseconds',
'microseconds'.
For example:
::
aggregationMicroseconds({'years': 1, 'hours': 4, 'microseconds':42}) ==
{'months':12, 'seconds':14400.000042}
:param interval: (dict) The aggregation interval representing a date and time
:returns: (dict) number of months and seconds in the interval:
``{months': XX, 'seconds': XX}``. The seconds is
a floating point that can represent resolutions down to a
microsecond. |
def address_checksum_and_decode(addr: str) -> Address:
""" Accepts a string address and turns it into binary.
Makes sure that the string address provided starts is 0x prefixed and
checksummed according to EIP55 specification
"""
if not is_0x_prefixed(addr):
raise InvalidAddress('Address must be 0x prefixed')
if not is_checksum_address(addr):
raise InvalidAddress('Address must be EIP55 checksummed')
addr_bytes = decode_hex(addr)
assert len(addr_bytes) in (20, 0)
return Address(addr_bytes) | Accepts a string address and turns it into binary.
Makes sure that the string address provided starts is 0x prefixed and
checksummed according to EIP55 specification |
def contour(z, x=None, y=None, v=5, xlbl=None, ylbl=None, title=None,
cfntsz=10, lfntsz=None, intrp='bicubic', alpha=0.5, cmap=None,
vmin=None, vmax=None, fgsz=None, fgnm=None, fig=None, ax=None):
"""
Contour plot of a 2D surface. If a figure object is specified then the
plot is drawn in that figure, and ``fig.show()`` is not called. The
figure is closed on key entry 'q'.
Parameters
----------
z : array_like
2d array of data to plot
x : array_like, optional (default None)
Values for x-axis of the plot
y : array_like, optional (default None)
Values for y-axis of the plot
v : int or sequence of ints, optional (default 5)
An int specifies the number of contours to plot, and a sequence
specifies the specific contour levels to plot.
xlbl : string, optional (default None)
Label for x-axis
ylbl : string, optional (default None)
Label for y-axis
title : string, optional (default None)
Figure title
cfntsz : int or None, optional (default 10)
Contour label font size. No contour labels are displayed if
set to 0 or None.
lfntsz : int, optional (default None)
Axis label font size. The default font size is used if set to None.
intrp : string, optional (default 'bicubic')
Specify type of interpolation used to display image underlying
contours (see ``interpolation`` parameter of
:meth:`matplotlib.axes.Axes.imshow`)
alpha : float, optional (default 0.5)
Underlying image display alpha value
cmap : :class:`matplotlib.colors.Colormap`, optional (default None)
Colour map for surface. If none specifed, defaults to cm.coolwarm
vmin, vmax : float, optional (default None)
Set upper and lower bounds for the colour map (see the corresponding
parameters of :meth:`matplotlib.axes.Axes.imshow`)
fgsz : tuple (width,height), optional (default None)
Specify figure dimensions in inches
fgnm : integer, optional (default None)
Figure number of figure
fig : :class:`matplotlib.figure.Figure` object, optional (default None)
Draw in specified figure instead of creating one
ax : :class:`matplotlib.axes.Axes` object, optional (default None)
Plot in specified axes instead of current axes of figure
Returns
-------
fig : :class:`matplotlib.figure.Figure` object
Figure object for this figure
ax : :class:`matplotlib.axes.Axes` object
Axes object for this plot
"""
figp = fig
if fig is None:
fig = plt.figure(num=fgnm, figsize=fgsz)
fig.clf()
ax = fig.gca()
elif ax is None:
ax = fig.gca()
if cmap is None:
cmap = cm.coolwarm
if x is None:
x = np.arange(z.shape[1])
else:
x = np.array(x)
if y is None:
y = np.arange(z.shape[0])
else:
y = np.array(y)
xg, yg = np.meshgrid(x, y)
cntr = ax.contour(xg, yg, z, v, colors='black')
if cfntsz is not None and cfntsz > 0:
plt.clabel(cntr, inline=True, fontsize=cfntsz)
im = ax.imshow(z, origin='lower', interpolation=intrp, aspect='auto',
extent=[x.min(), x.max(), y.min(), y.max()], cmap=cmap,
vmin=vmin, vmax=vmax, alpha=alpha)
ax.fmt_xdata = lambda x: "{: .2f}".format(x)
ax.fmt_ydata = lambda x: "{: .2f}".format(x)
if title is not None:
ax.set_title(title)
if xlbl is not None:
ax.set_xlabel(xlbl, fontsize=lfntsz)
if ylbl is not None:
ax.set_ylabel(ylbl, fontsize=lfntsz)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
plt.colorbar(im, ax=ax, cax=cax)
attach_keypress(fig)
attach_zoom(ax)
if have_mpldc:
mpldc.datacursor()
if figp is None:
fig.show()
return fig, ax | Contour plot of a 2D surface. If a figure object is specified then the
plot is drawn in that figure, and ``fig.show()`` is not called. The
figure is closed on key entry 'q'.
Parameters
----------
z : array_like
2d array of data to plot
x : array_like, optional (default None)
Values for x-axis of the plot
y : array_like, optional (default None)
Values for y-axis of the plot
v : int or sequence of ints, optional (default 5)
An int specifies the number of contours to plot, and a sequence
specifies the specific contour levels to plot.
xlbl : string, optional (default None)
Label for x-axis
ylbl : string, optional (default None)
Label for y-axis
title : string, optional (default None)
Figure title
cfntsz : int or None, optional (default 10)
Contour label font size. No contour labels are displayed if
set to 0 or None.
lfntsz : int, optional (default None)
Axis label font size. The default font size is used if set to None.
intrp : string, optional (default 'bicubic')
Specify type of interpolation used to display image underlying
contours (see ``interpolation`` parameter of
:meth:`matplotlib.axes.Axes.imshow`)
alpha : float, optional (default 0.5)
Underlying image display alpha value
cmap : :class:`matplotlib.colors.Colormap`, optional (default None)
Colour map for surface. If none specifed, defaults to cm.coolwarm
vmin, vmax : float, optional (default None)
Set upper and lower bounds for the colour map (see the corresponding
parameters of :meth:`matplotlib.axes.Axes.imshow`)
fgsz : tuple (width,height), optional (default None)
Specify figure dimensions in inches
fgnm : integer, optional (default None)
Figure number of figure
fig : :class:`matplotlib.figure.Figure` object, optional (default None)
Draw in specified figure instead of creating one
ax : :class:`matplotlib.axes.Axes` object, optional (default None)
Plot in specified axes instead of current axes of figure
Returns
-------
fig : :class:`matplotlib.figure.Figure` object
Figure object for this figure
ax : :class:`matplotlib.axes.Axes` object
Axes object for this plot |
def check_honeypot(func=None, field_name=None):
"""
Check request.POST for valid honeypot field.
Takes an optional field_name that defaults to HONEYPOT_FIELD_NAME if
not specified.
"""
# hack to reverse arguments if called with str param
if isinstance(func, six.string_types):
func, field_name = field_name, func
def decorated(func):
def inner(request, *args, **kwargs):
response = verify_honeypot_value(request, field_name)
if response:
return response
else:
return func(request, *args, **kwargs)
return wraps(func, assigned=available_attrs(func))(inner)
if func is None:
def decorator(func):
return decorated(func)
return decorator
return decorated(func) | Check request.POST for valid honeypot field.
Takes an optional field_name that defaults to HONEYPOT_FIELD_NAME if
not specified. |
def _gen_delta_per_sec(self, path, value_delta, time_delta, multiplier,
prettyname, device):
"""
Calulates the difference between to point, and scales is to per second.
"""
if time_delta < 0:
return
value = (value_delta / time_delta) * multiplier
# Only publish if there is any data.
# This helps keep unused metrics out of Graphite
if value > 0.0:
self._replace_and_publish(path, prettyname, value, device) | Calulates the difference between to point, and scales is to per second. |
def azimuth(lons1, lats1, lons2, lats2):
"""
Calculate the azimuth between two points or two collections of points.
Parameters are the same as for :func:`geodetic_distance`.
Implements an "alternative formula" from
http://williams.best.vwh.net/avform.htm#Crs
:returns:
Azimuth as an angle between direction to north from first point and
direction to the second point measured clockwise in decimal degrees.
"""
lons1, lats1, lons2, lats2 = _prepare_coords(lons1, lats1, lons2, lats2)
cos_lat2 = numpy.cos(lats2)
true_course = numpy.degrees(numpy.arctan2(
numpy.sin(lons1 - lons2) * cos_lat2,
numpy.cos(lats1) * numpy.sin(lats2)
- numpy.sin(lats1) * cos_lat2 * numpy.cos(lons1 - lons2)
))
return (360 - true_course) % 360 | Calculate the azimuth between two points or two collections of points.
Parameters are the same as for :func:`geodetic_distance`.
Implements an "alternative formula" from
http://williams.best.vwh.net/avform.htm#Crs
:returns:
Azimuth as an angle between direction to north from first point and
direction to the second point measured clockwise in decimal degrees. |
def __locate_scubainit(self):
'''Determine path to scubainit binary
'''
pkg_path = os.path.dirname(__file__)
self.scubainit_path = os.path.join(pkg_path, 'scubainit')
if not os.path.isfile(self.scubainit_path):
raise ScubaError('scubainit not found at "{}"'.format(self.scubainit_path)) | Determine path to scubainit binary |
def GetStartTime(self, problems=problems_module.default_problem_reporter):
"""Return the first time of the trip. TODO: For trips defined by frequency
return the first time of the first trip."""
cursor = self._schedule._connection.cursor()
cursor.execute(
'SELECT arrival_secs,departure_secs FROM stop_times WHERE '
'trip_id=? ORDER BY stop_sequence LIMIT 1', (self.trip_id,))
(arrival_secs, departure_secs) = cursor.fetchone()
if arrival_secs != None:
return arrival_secs
elif departure_secs != None:
return departure_secs
else:
problems.InvalidValue('departure_time', '',
'The first stop_time in trip %s is missing '
'times.' % self.trip_id) | Return the first time of the trip. TODO: For trips defined by frequency
return the first time of the first trip. |
def sample(self, N=1):
"""Sample N trajectories from the posterior.
Note
----
Performs the forward step in case it has not been performed.
"""
if not self.filt:
self.forward()
paths = np.empty((len(self.filt), N), np.int)
paths[-1, :] = rs.multinomial(self.filt[-1], M=N)
log_trans = np.log(self.hmm.trans_mat)
for t, f in reversed(list(enumerate(self.filt[:-1]))):
for n in range(N):
probs = rs.exp_and_normalise(log_trans[:, paths[t + 1, n]] + np.log(f))
paths[t, n] = rs.multinomial_once(probs)
return paths | Sample N trajectories from the posterior.
Note
----
Performs the forward step in case it has not been performed. |
def StrIndexOf(input_string, substring, startIndex, bitlength):
"""
Return True if the concrete value of the input_string ends with suffix
otherwise false.
:param input_string: the string we want to check
:param substring: the substring we want to find the index
:param startIndex: the index to start searching at
:param bitlength: bitlength of the bitvector representing the index of the substring
:return BVV: index of the substring in bit-vector representation or -1 in bitvector representation
"""
try:
s = input_string.value
t = substring.value
i = startIndex.value
return BVV(i + s[i:].index(t), bitlength)
except ValueError:
return BVV(-1, bitlength) | Return True if the concrete value of the input_string ends with suffix
otherwise false.
:param input_string: the string we want to check
:param substring: the substring we want to find the index
:param startIndex: the index to start searching at
:param bitlength: bitlength of the bitvector representing the index of the substring
:return BVV: index of the substring in bit-vector representation or -1 in bitvector representation |
def add_fluctuations(hdf5_file, N_columns, N_processes):
"""This procedure organizes the addition of small fluctuations on top of
a matrix of similarities at 'hdf5_file' across 'N_processes'
different processes. Each of those processes is an instance of the
class 'Fluctuations_Worker' defined elsewhere in this module.
"""
random_state = np.random.RandomState(0)
slice_queue = multiprocessing.JoinableQueue()
pid_list = []
for i in range(N_processes):
worker = Fluctuations_worker(hdf5_file,
'/aff_prop_group/similarities', random_state,
N_columns, slice_queue)
worker.daemon = True
worker.start()
pid_list.append(worker.pid)
for rows_slice in chunk_generator(N_columns, 4 * N_processes):
slice_queue.put(rows_slice)
slice_queue.join()
slice_queue.close()
terminate_processes(pid_list)
gc.collect() | This procedure organizes the addition of small fluctuations on top of
a matrix of similarities at 'hdf5_file' across 'N_processes'
different processes. Each of those processes is an instance of the
class 'Fluctuations_Worker' defined elsewhere in this module. |
def accuracy_helper(egg, match='exact', distance='euclidean',
features=None):
"""
Computes proportion of words recalled
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
prop_recalled : numpy array
proportion of words recalled
"""
def acc(lst):
return len([i for i in np.unique(lst) if i>=0])/(egg.list_length)
opts = dict(match=match, distance=distance, features=features)
if match is 'exact':
opts.update({'features' : 'item'})
recmat = recall_matrix(egg, **opts)
if match in ['exact', 'best']:
result = [acc(lst) for lst in recmat]
elif match is 'smooth':
result = np.mean(recmat, axis=1)
else:
raise ValueError('Match must be set to exact, best or smooth.')
return np.nanmean(result, axis=0) | Computes proportion of words recalled
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
prop_recalled : numpy array
proportion of words recalled |
def align_bam(in_bam, ref_file, names, align_dir, data):
"""Perform direct alignment of an input BAM file with BWA using pipes.
This avoids disk IO by piping between processes:
- samtools sort of input BAM to queryname
- bedtools conversion to interleaved FASTQ
- bwa-mem alignment
- samtools conversion to BAM
- samtools sort to coordinate
"""
config = data["config"]
out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"]))
samtools = config_utils.get_program("samtools", config)
bedtools = config_utils.get_program("bedtools", config)
resources = config_utils.get_resources("samtools", config)
num_cores = config["algorithm"].get("num_cores", 1)
# adjust memory for samtools since used for input and output
max_mem = config_utils.adjust_memory(resources.get("memory", "1G"),
3, "decrease").upper()
if not utils.file_exists(out_file):
with tx_tmpdir(data) as work_dir:
with postalign.tobam_cl(data, out_file, bam.is_paired(in_bam)) as (tobam_cl, tx_out_file):
bwa_cmd = _get_bwa_mem_cmd(data, out_file, ref_file, "-")
tx_out_prefix = os.path.splitext(tx_out_file)[0]
prefix1 = "%s-in1" % tx_out_prefix
cmd = ("unset JAVA_HOME && "
"{samtools} sort -n -o -l 1 -@ {num_cores} -m {max_mem} {in_bam} {prefix1} "
"| {bedtools} bamtofastq -i /dev/stdin -fq /dev/stdout -fq2 /dev/stdout "
"| {bwa_cmd} | ")
cmd = cmd.format(**locals()) + tobam_cl
do.run(cmd, "bwa mem alignment from BAM: %s" % names["sample"], None,
[do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, in_bam)])
return out_file | Perform direct alignment of an input BAM file with BWA using pipes.
This avoids disk IO by piping between processes:
- samtools sort of input BAM to queryname
- bedtools conversion to interleaved FASTQ
- bwa-mem alignment
- samtools conversion to BAM
- samtools sort to coordinate |
def _CallWindowsNetCommand(parameters):
'''
Call Windows NET command, used to acquire/configure network services settings.
:param parameters: list of command line parameters
:return: command output
'''
import subprocess
popen = subprocess.Popen(["net"] + parameters, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdoutdata, stderrdata = popen.communicate()
if stderrdata:
raise OSError("Failed on call net.exe: %s" % stderrdata)
return stdoutdata | Call Windows NET command, used to acquire/configure network services settings.
:param parameters: list of command line parameters
:return: command output |
def register(self, token, regexp):
"""Register a token.
Args:
token (Token): the token class to register
regexp (str): the regexp for that token
"""
self._tokens.append((token, re.compile(regexp))) | Register a token.
Args:
token (Token): the token class to register
regexp (str): the regexp for that token |
def turn_right(self, angle_degrees, rate=RATE):
"""
Turn to the right, staying on the spot
:param angle_degrees: How far to turn (degrees)
:param rate: The trurning speed (degrees/second)
:return:
"""
flight_time = angle_degrees / rate
self.start_turn_right(rate)
time.sleep(flight_time)
self.stop() | Turn to the right, staying on the spot
:param angle_degrees: How far to turn (degrees)
:param rate: The trurning speed (degrees/second)
:return: |
def walk_dependencies(root, visitor):
"""
Call visitor on root and all dependencies reachable from it in breadth
first order.
Args:
root (component): component function or class
visitor (function): signature is `func(component, parent)`. The
call on root is `visitor(root, None)`.
"""
def visit(parent, visitor):
for d in get_dependencies(parent):
visitor(d, parent)
visit(d, visitor)
visitor(root, None)
visit(root, visitor) | Call visitor on root and all dependencies reachable from it in breadth
first order.
Args:
root (component): component function or class
visitor (function): signature is `func(component, parent)`. The
call on root is `visitor(root, None)`. |
def activities(self, *args, **kwargs):
"""Retrieve activities belonging to this scope.
See :class:`pykechain.Client.activities` for available parameters.
"""
if self._client.match_app_version(label='wim', version='<2.0.0', default=True):
return self._client.activities(*args, scope=self.id, **kwargs)
else:
return self._client.activities(*args, scope_id=self.id, **kwargs) | Retrieve activities belonging to this scope.
See :class:`pykechain.Client.activities` for available parameters. |
def delete(self, ids):
"""
Method to delete vlan's by their ids
:param ids: Identifiers of vlan's
:return: None
"""
url = build_uri_with_ids('api/v3/vlan/%s/', ids)
return super(ApiVlan, self).delete(url) | Method to delete vlan's by their ids
:param ids: Identifiers of vlan's
:return: None |
def execute(self):
""""Run Checkstyle on all found non-synthetic source files."""
python_tgts = self.context.targets(
lambda tgt: isinstance(tgt, (PythonTarget))
)
if not python_tgts:
return 0
interpreter_cache = PythonInterpreterCache.global_instance()
with self.invalidated(self.get_targets(self._is_checked)) as invalidation_check:
failure_count = 0
tgts_by_compatibility, _ = interpreter_cache.partition_targets_by_compatibility(
[vt.target for vt in invalidation_check.invalid_vts]
)
for filters, targets in tgts_by_compatibility.items():
sources = self.calculate_sources([tgt for tgt in targets])
if sources:
allowed_interpreters = set(interpreter_cache.setup(filters=filters))
if not allowed_interpreters:
raise TaskError('No valid interpreters found for targets: {}\n(filters: {})'
.format(targets, filters))
interpreter = min(allowed_interpreters)
failure_count += self.checkstyle(interpreter, sources)
if failure_count > 0 and self.get_options().fail:
raise TaskError('{} Python Style issues found. You may try `./pants fmt <targets>`'
.format(failure_count))
return failure_count | Run Checkstyle on all found non-synthetic source files. |
def add_details(file_name, title, artist, album, lyrics=""):
'''
Adds the details to song
'''
tags = EasyMP3(file_name)
tags["title"] = title
tags["artist"] = artist
tags["album"] = album
tags.save()
tags = ID3(file_name)
uslt_output = USLT(encoding=3, lang=u'eng', desc=u'desc', text=lyrics)
tags["USLT::'eng'"] = uslt_output
tags.save(file_name)
log.log("> Adding properties")
log.log_indented("[*] Title: %s" % title)
log.log_indented("[*] Artist: %s" % artist)
log.log_indented("[*] Album: %s " % album) | Adds the details to song |
def lower_camel(string, prefix='', suffix=''):
"""
Generate a camel-case identifier.
Useful for unit test methods.
Takes a string, prefix, and optional suffix.
`prefix` can be set to `''`, though be careful - without a prefix, the
function will throw `InvalidIdentifier` when your string starts with a
number.
Example:
>>> lower_camel("User can login", prefix='test')
'testUserCanLogin'
"""
return require_valid(append_underscore_if_keyword(''.join(
word.lower() if index == 0 else upper_case_first_char(word)
for index, word in enumerate(en.words(' '.join([prefix, string, suffix]))))
)) | Generate a camel-case identifier.
Useful for unit test methods.
Takes a string, prefix, and optional suffix.
`prefix` can be set to `''`, though be careful - without a prefix, the
function will throw `InvalidIdentifier` when your string starts with a
number.
Example:
>>> lower_camel("User can login", prefix='test')
'testUserCanLogin' |
def __send_command(
self, name, args=None, withcontent=False, extralines=None,
nblines=-1):
"""Send a command to the server.
If args is not empty, we concatenate the given command with
the content of this list. If extralines is not empty, they are
sent one by one to the server. (CLRF are automatically
appended to them)
We wait for a response just after the command has been sent.
:param name: the command to sent
:param args: a list of arguments for this command
:param withcontent: tells the function to return the server's response
or not
:param extralines: a list of extra lines to sent after the command
:param nblines: the number of response lines to read (all by default)
:returns: a tuple of the form (code, data[, response])
"""
tosend = name.encode("utf-8")
if args:
tosend += b" " + b" ".join(self.__prepare_args(args))
self.__dprint(b"Command: " + tosend)
self.sock.sendall(tosend + CRLF)
if extralines:
for l in extralines:
self.sock.sendall(l + CRLF)
code, data, content = self.__read_response(nblines)
if isinstance(code, six.binary_type):
code = code.decode("utf-8")
if isinstance(data, six.binary_type):
data = data.decode("utf-8")
if withcontent:
return (code, data, content)
return (code, data) | Send a command to the server.
If args is not empty, we concatenate the given command with
the content of this list. If extralines is not empty, they are
sent one by one to the server. (CLRF are automatically
appended to them)
We wait for a response just after the command has been sent.
:param name: the command to sent
:param args: a list of arguments for this command
:param withcontent: tells the function to return the server's response
or not
:param extralines: a list of extra lines to sent after the command
:param nblines: the number of response lines to read (all by default)
:returns: a tuple of the form (code, data[, response]) |
def avail_sizes(call=None):
'''
Return a dict of all available VM sizes on the cloud provider with
relevant data.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
conn = get_conn()
sizes = conn.fixed_server_flavors()
return sizes | Return a dict of all available VM sizes on the cloud provider with
relevant data. |
def log_likelihood_pairwise(data, params):
"""Compute the log-likelihood of model parameters."""
loglik = 0
for winner, loser in data:
loglik -= np.logaddexp(0, -(params[winner] - params[loser]))
return loglik | Compute the log-likelihood of model parameters. |
def read(self, size=None):
""" Read a specified number of bytes from the file descriptor
This method emulates the normal file descriptor's ``read()`` method and
restricts the total number of bytes readable.
If file descriptor is not present (e.g., ``close()`` method had been
called), ``ValueError`` is raised.
If ``size`` is omitted, or ``None``, or any other falsy value, read
will be done up to the remaining length (constructor's ``length``
argument minus the bytes that have been read previously).
This method internally invokes the file descriptor's ``read()`` method,
and the method must accept a single integer positional argument.
"""
if not self.fd:
raise ValueError('I/O on closed file')
if not size:
size = self.remaining
size = min([self.remaining, size])
if not size:
return ''
data = self.fd.read(size)
self.remaining -= size
return data | Read a specified number of bytes from the file descriptor
This method emulates the normal file descriptor's ``read()`` method and
restricts the total number of bytes readable.
If file descriptor is not present (e.g., ``close()`` method had been
called), ``ValueError`` is raised.
If ``size`` is omitted, or ``None``, or any other falsy value, read
will be done up to the remaining length (constructor's ``length``
argument minus the bytes that have been read previously).
This method internally invokes the file descriptor's ``read()`` method,
and the method must accept a single integer positional argument. |
def file_root_name(name):
"""
Returns the root name of a file from a full file path.
It will not raise an error if the result is empty, but an warning will be
issued.
"""
base = os.path.basename(name)
root = os.path.splitext(base)[0]
if not root:
warning = 'file_root_name returned an empty root name from \"{0}\"'
log.warning(warning.format(name))
return root | Returns the root name of a file from a full file path.
It will not raise an error if the result is empty, but an warning will be
issued. |
def synthesize_software_module_info(modules, module_types):
"""
This function takes as input a dictionary of `modules` (mapping module IDs
to :class:`~openag.models.SoftwareModule` objects) and a dictionary of
`module_types` (mapping module type IDs to
:class:`~openag.models.FirmwareModuleType` objects). For each module, it
synthesizes the information in that module and the corresponding module
type and returns all the results in a dictionary keyed on the ID of the
module.
"""
res = {}
for mod_id, mod_info in modules.items():
mod_info = dict(mod_info)
mod_type = module_types[mod_info["type"]]
# Directly copy any fields only defined on the type
mod_info["package"] = mod_type["package"]
mod_info["executable"] = mod_type["executable"]
if not "categories" in mod_info:
mod_info["categories"] = mod_type.get(
"categories", all_categories
)
mod_info["inputs"] = mod_type["inputs"]
mod_info["outputs"] = mod_type["outputs"]
# Update the arguments
mod_info["arguments"] = process_args(
mod_id, mod_info.get("arguments", []), mod_type["arguments"]
)
# Update the parameters
mod_info["parameters"] = process_params(
mod_id, mod_info.get("parameters", {}), mod_type["parameters"]
)
res[mod_id] = mod_info
return res | This function takes as input a dictionary of `modules` (mapping module IDs
to :class:`~openag.models.SoftwareModule` objects) and a dictionary of
`module_types` (mapping module type IDs to
:class:`~openag.models.FirmwareModuleType` objects). For each module, it
synthesizes the information in that module and the corresponding module
type and returns all the results in a dictionary keyed on the ID of the
module. |
def setupArgparse():
"""Sets up argparse module to create command line options and parse them.
Uses the argparse module to add arguments to the command line for
faradayio-cli. Once the arguments are added and parsed the arguments are
returned
Returns:
argparse.Namespace: Populated namespace of arguments
"""
parser = argparse.ArgumentParser()
# Required arguments
parser.add_argument("callsign", help="Callsign of radio")
parser.add_argument("id", type=int, help="ID number radio")
# Optional arguments
parser.add_argument("-l", "--loopback", action="store_true",
help="Use software loopback serial port")
parser.add_argument("-p", "--port", default="/dev/ttyUSB0",
help="Physical serial port of radio")
# Parse and return arguments
return parser.parse_args() | Sets up argparse module to create command line options and parse them.
Uses the argparse module to add arguments to the command line for
faradayio-cli. Once the arguments are added and parsed the arguments are
returned
Returns:
argparse.Namespace: Populated namespace of arguments |
def update_wish_list_by_id(cls, wish_list_id, wish_list, **kwargs):
"""Update WishList
Update attributes of WishList
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_wish_list_by_id(wish_list_id, wish_list, async=True)
>>> result = thread.get()
:param async bool
:param str wish_list_id: ID of wishList to update. (required)
:param WishList wish_list: Attributes of wishList to update. (required)
:return: WishList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_wish_list_by_id_with_http_info(wish_list_id, wish_list, **kwargs)
else:
(data) = cls._update_wish_list_by_id_with_http_info(wish_list_id, wish_list, **kwargs)
return data | Update WishList
Update attributes of WishList
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_wish_list_by_id(wish_list_id, wish_list, async=True)
>>> result = thread.get()
:param async bool
:param str wish_list_id: ID of wishList to update. (required)
:param WishList wish_list: Attributes of wishList to update. (required)
:return: WishList
If the method is called asynchronously,
returns the request thread. |
def is_job_config(config):
"""
Check whether given dict of config is job config
"""
try:
# Every job has name
if config['config']['job']['name'] is not None:
return True
except KeyError:
return False
except TypeError:
return False
except IndexError:
return False
return False | Check whether given dict of config is job config |
def update(self, iterable={}, **kwargs):
"""
Updates recursively a self with a given iterable.
TODO: rewrite this ugly stuff
"""
def _merge(a, *args):
for key, value in itertools.chain(*args):
if key in a and isinstance(value, (dict, Conf)):
value = _merge(a[key], value.items())
a[key] = value
return a
# adopt iterable sequence to unified interface: (key, value)
if isinstance(iterable, (dict, Conf)):
iterable = iterable.items()
# iterate and update values
_merge(self._data, iterable, kwargs.items()) | Updates recursively a self with a given iterable.
TODO: rewrite this ugly stuff |
def _load_enums(root):
"""Returns {name: Enum}"""
out = collections.OrderedDict()
for elem in root.findall('enums/enum'):
name = elem.attrib['name']
value = elem.attrib['value']
comment = elem.get('comment')
out[name] = Enum(name, value, comment)
return out | Returns {name: Enum} |
def alter_edge(self, from_index, to_index,
new_weight=None, new_edge_properties=None):
"""
Alters either the weight or the edge_properties of
an edge in the MoleculeGraph.
:param from_index: int
:param to_index: int
:param new_weight: alter_edge does not require
that weight be altered. As such, by default, this
is None. If weight is to be changed, it should be a
float.
:param new_edge_properties: alter_edge does not require
that edge_properties be altered. As such, by default,
this is None. If any edge properties are to be changed,
it should be a dictionary of edge properties to be changed.
:return:
"""
existing_edge = self.graph.get_edge_data(from_index, to_index)
# ensure that edge exists before attempting to change it
if not existing_edge:
raise ValueError("Edge between {} and {} cannot be altered;\
no edge exists between those sites.".format(
from_index, to_index
))
# Third index should always be 0 because there should only be one edge between any two nodes
if new_weight is not None:
self.graph[from_index][to_index][0]['weight'] = new_weight
if new_edge_properties is not None:
for prop in list(new_edge_properties.keys()):
self.graph[from_index][to_index][0][prop] = new_edge_properties[prop] | Alters either the weight or the edge_properties of
an edge in the MoleculeGraph.
:param from_index: int
:param to_index: int
:param new_weight: alter_edge does not require
that weight be altered. As such, by default, this
is None. If weight is to be changed, it should be a
float.
:param new_edge_properties: alter_edge does not require
that edge_properties be altered. As such, by default,
this is None. If any edge properties are to be changed,
it should be a dictionary of edge properties to be changed.
:return: |
def cmd(send, msg, args):
"""Gets a random Reddit post.
Syntax: {command} [subreddit]
"""
if msg and not check_exists(msg):
send("Non-existant subreddit.")
return
subreddit = msg if msg else None
send(random_post(subreddit, args['config']['api']['bitlykey'])) | Gets a random Reddit post.
Syntax: {command} [subreddit] |
def query(self, coords, return_sigma=False):
"""
Returns r-band extinction, A_r, at the given coordinates. Can also
return uncertainties.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query.
return_sigma (Optional[:obj:`bool`]): If ``True``, returns the uncertainty in
extinction as well. Defaults to ``False``.
Returns:
Extinction in the r-band at the specified coordinates, in mags.
The shape of the output depends on whether :obj:`coords` contains
distances.
If :obj:`coords` does not specify distance(s), then the shape of the
output begins with :obj:`coords.shape`. If :obj:`coords` does specify
distance(s), then the shape of the output begins with
``coords.shape + ([number of distance bins],)``.
"""
n_coords_ret = coords.shape[0]
# Determine if distance has been requested
has_dist = hasattr(coords.distance, 'kpc')
d = coords.distance.kpc if has_dist else None
# Convert coordinates to pixel indices
pix_idx = self._coords2idx(coords)
# Determine which coordinates are out of bounds
mask_idx = (pix_idx == self._n_pix)
if np.any(mask_idx):
pix_idx[mask_idx] = 0
# Which distances to extract
if has_dist:
d = coords.distance.kpc
dist_idx_ceil = np.searchsorted(self._dists, d)
ret = np.empty((n_coords_ret,), dtype='f8')
if return_sigma:
sigma_ret = np.empty((n_coords_ret,), dtype='f8')
# d < d(nearest distance slice)
idx_near = (dist_idx_ceil == 0) & ~mask_idx
print('d < d(nearest): {:d}'.format(np.sum(idx_near)))
if np.any(idx_near):
a = d[idx_near] / self._dists[0]
ret[idx_near] = a[:] * self._A[pix_idx[idx_near], 0]
if return_sigma:
sigma_ret[idx_near] = a[:] * self._sigma_A[pix_idx[idx_near], 0]
# d > d(farthest distance slice)
idx_far = (dist_idx_ceil == self._n_dists) & ~mask_idx
print('d > d(farthest): {:d}'.format(np.sum(idx_far)))
if np.any(idx_far):
ret[idx_far] = self._A[pix_idx[idx_far], -1]
if return_sigma:
sigma_ret[idx_far] = self._sigma_A[pix_idx[idx_far], -1]
# d(nearest distance slice) < d < d(farthest distance slice)
idx_btw = ~idx_near & ~idx_far & ~mask_idx
print('d(nearest) < d < d(farthest): {:d}'.format(np.sum(idx_btw)))
if np.any(idx_btw):
d_ceil = self._dists[dist_idx_ceil[idx_btw]]
d_floor = self._dists[dist_idx_ceil[idx_btw]-1]
a = (d_ceil - d[idx_btw]) / (d_ceil - d_floor)
ret[idx_btw] = (
(1.-a[:]) * self._A[pix_idx[idx_btw], dist_idx_ceil[idx_btw]]
+ a[:] * self._A[pix_idx[idx_btw], dist_idx_ceil[idx_btw]-1])
if return_sigma:
w0 = (1.-a)**2
w1 = a**2
norm = 1. / (w0 + w1)
w0 *= norm
w1 *= norm
sigma_ret[idx_btw] = np.sqrt(
w0 * self._sigma_A[pix_idx[idx_btw], dist_idx_ceil[idx_btw]]**2
+ w1 * self._sigma_A[pix_idx[idx_btw], dist_idx_ceil[idx_btw]-1]**2
)
else:
# TODO: Harmonize order of distances & samples with Bayestar.
ret = self._A[pix_idx, :]
if return_sigma:
sigma_ret = self._sigma_A[pix_idx, :]
if np.any(mask_idx):
ret[mask_idx] = np.nan
if return_sigma:
sigma_ret[mask_idx] = np.nan
if return_sigma:
return ret, sigma_ret
return ret | Returns r-band extinction, A_r, at the given coordinates. Can also
return uncertainties.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query.
return_sigma (Optional[:obj:`bool`]): If ``True``, returns the uncertainty in
extinction as well. Defaults to ``False``.
Returns:
Extinction in the r-band at the specified coordinates, in mags.
The shape of the output depends on whether :obj:`coords` contains
distances.
If :obj:`coords` does not specify distance(s), then the shape of the
output begins with :obj:`coords.shape`. If :obj:`coords` does specify
distance(s), then the shape of the output begins with
``coords.shape + ([number of distance bins],)``. |
def makepipecomponent(idf, pname):
"""make a pipe component
generate inlet outlet names"""
apipe = idf.newidfobject("Pipe:Adiabatic".upper(), Name=pname)
apipe.Inlet_Node_Name = "%s_inlet" % (pname,)
apipe.Outlet_Node_Name = "%s_outlet" % (pname,)
return apipe | make a pipe component
generate inlet outlet names |
def _match_files_flat_hierarchy(self, text_files, audio_files):
"""
Match audio and text files in flat hierarchies.
Two files match if their names,
once removed the file extension,
are the same.
Examples: ::
foo/text/a.txt foo/audio/a.mp3 => match: ["a", "foo/text/a.txt", "foo/audio/a.mp3"]
foo/text/a.txt foo/audio/b.mp3 => no match
foo/res/c.txt foo/res/c.mp3 => match: ["c", "foo/res/c.txt", "foo/res/c.mp3"]
foo/res/d.txt foo/res/e.mp3 => no match
:param list text_files: the entries corresponding to text files
:param list audio_files: the entries corresponding to audio files
:rtype: list of lists (see above)
"""
self.log(u"Matching files in flat hierarchy")
self.log([u"Text files: '%s'", text_files])
self.log([u"Audio files: '%s'", audio_files])
d_text = {}
d_audio = {}
for text_file in text_files:
text_file_no_ext = gf.file_name_without_extension(text_file)
d_text[text_file_no_ext] = text_file
self.log([u"Added text file '%s' to key '%s'", text_file, text_file_no_ext])
for audio_file in audio_files:
audio_file_no_ext = gf.file_name_without_extension(audio_file)
d_audio[audio_file_no_ext] = audio_file
self.log([u"Added audio file '%s' to key '%s'", audio_file, audio_file_no_ext])
tasks = []
for key in d_text.keys():
self.log([u"Examining text key '%s'", key])
if key in d_audio:
self.log([u"Key '%s' is also in audio", key])
tasks.append([key, d_text[key], d_audio[key]])
self.log([u"Added pair ('%s', '%s')", d_text[key], d_audio[key]])
return tasks | Match audio and text files in flat hierarchies.
Two files match if their names,
once removed the file extension,
are the same.
Examples: ::
foo/text/a.txt foo/audio/a.mp3 => match: ["a", "foo/text/a.txt", "foo/audio/a.mp3"]
foo/text/a.txt foo/audio/b.mp3 => no match
foo/res/c.txt foo/res/c.mp3 => match: ["c", "foo/res/c.txt", "foo/res/c.mp3"]
foo/res/d.txt foo/res/e.mp3 => no match
:param list text_files: the entries corresponding to text files
:param list audio_files: the entries corresponding to audio files
:rtype: list of lists (see above) |
def parse_requested_expands(query_key, request):
"""
Extracts the value of the expand query string parameter from a request.
Supports comma separated lists.
:param query_key: The name query string parameter.
:param request: Request instance.
:return: List of strings representing the values of the expand query string value.
"""
requested_expands = []
for key, val in request.params.items():
if key == query_key:
requested_expands += val.split(',')
return requested_expands | Extracts the value of the expand query string parameter from a request.
Supports comma separated lists.
:param query_key: The name query string parameter.
:param request: Request instance.
:return: List of strings representing the values of the expand query string value. |
def _scalar_power(self, f, p, out):
"""Compute ``p``-th power of ``f`` for ``p`` scalar."""
# Avoid infinite recursions by making a copy of the function
f_copy = f.copy()
def pow_posint(x, n):
"""Power function for positive integer ``n``, out-of-place."""
if isinstance(x, np.ndarray):
y = x.copy()
return ipow_posint(y, n)
else:
return x ** n
def ipow_posint(x, n):
"""Power function for positive integer ``n``, in-place."""
if n == 1:
return x
elif n % 2 == 0:
x *= x
return ipow_posint(x, n // 2)
else:
tmp = x.copy()
x *= x
ipow_posint(x, n // 2)
x *= tmp
return x
def power_oop(x, **kwargs):
"""Power out-of-place evaluation function."""
if p == 0:
return self.one()
elif p == int(p) and p >= 1:
return np.asarray(pow_posint(f_copy(x, **kwargs), int(p)),
dtype=self.scalar_out_dtype)
else:
result = np.power(f_copy(x, **kwargs), p)
return result.astype(self.scalar_out_dtype)
out._call_out_of_place = power_oop
decorator = preload_first_arg(out, 'in-place')
out._call_in_place = decorator(_default_in_place)
out._call_has_out = out._call_out_optional = False
return out | Compute ``p``-th power of ``f`` for ``p`` scalar. |
def find_pulls(self, testpulls=None):
"""Finds a list of new pull requests that need to be processed.
:arg testpulls: a list of tserver.FakePull instances so we can test the code
functionality without making live requests to github.
"""
#We check all the repositories installed for new (open) pull requests.
#If any exist, we check the pull request number against our archive to
#see if we have to do anything for it.
result = {}
for lname, repo in self.repositories.items():
if lname not in self.archive:
raise ValueError("Trying to find pull requests for a repository "
"that hasn't been installed. Use server.install().")
if self.runnable is not None and lname not in self.runnable:
#We just ignore this repository completely and don't even bother
#performing a live check on github.
continue
pulls = testpulls if testpulls is not None else repo.repo.get_pulls("open")
result[lname] = []
for pull in pulls:
newpull = True
if pull.snumber in self.archive[lname]:
#Check the status of that pull request processing. If it was
#successful, we just ignore this open pull request; it is
#obviously waiting to be merged in.
if self.archive[lname][pull.snumber]["completed"] == True:
newpull = False
if newpull:
#Add the pull request to the list that needs to be processed.
#We don't add the request to the archive yet because the
#processing step hasn't happened yet.
result[lname].append(PullRequest(self, repo, pull, testpulls is not None))
return result | Finds a list of new pull requests that need to be processed.
:arg testpulls: a list of tserver.FakePull instances so we can test the code
functionality without making live requests to github. |
def toggle_buttons(self):
"""Turn buttons on and off."""
all_time_on = self.all_time.get_value()
all_chan_on = self.all_chan.get_value()
self.times['beg'].setEnabled(not all_time_on)
self.times['end'].setEnabled(not all_time_on)
self.idx_chan.setEnabled(not all_chan_on) | Turn buttons on and off. |
def get_selinux_status():
"""
get SELinux status of host
:return: string, one of Enforced, Permissive, Disabled
"""
getenforce_command_exists()
# alternatively, we could read directly from /sys/fs/selinux/{enforce,status}, but status is
# empty (why?) and enforce doesn't tell whether SELinux is disabled or not
o = run_cmd(["getenforce"], return_output=True).strip() # libselinux-utils
logger.debug("SELinux is %r", o)
return o | get SELinux status of host
:return: string, one of Enforced, Permissive, Disabled |
def update(self, *args, **kwargs):
"""
Equivalent to the python dict update method.
Update the dictionary with the key/value pairs from other, overwriting
existing keys.
Args:
other (dict): The source of key value pairs to add to headers
Keyword Args:
All keyword arguments are stored in header directly
Returns:
None
"""
for next_dict in chain(args, (kwargs, )):
for k, v in next_dict.items():
self[k] = v | Equivalent to the python dict update method.
Update the dictionary with the key/value pairs from other, overwriting
existing keys.
Args:
other (dict): The source of key value pairs to add to headers
Keyword Args:
All keyword arguments are stored in header directly
Returns:
None |
def to_designspace_instances(self):
"""Write instance data from self.font to self.designspace."""
for instance in self.font.instances:
if self.minimize_glyphs_diffs or (
is_instance_active(instance)
and _is_instance_included_in_family(self, instance)
):
_to_designspace_instance(self, instance) | Write instance data from self.font to self.designspace. |
def insured_losses(losses, deductible, insured_limit):
"""
:param losses: an array of ground-up loss ratios
:param float deductible: the deductible limit in fraction form
:param float insured_limit: the insured limit in fraction form
Compute insured losses for the given asset and losses, from the point
of view of the insurance company. For instance:
>>> insured_losses(numpy.array([3, 20, 101]), 5, 100)
array([ 0, 15, 95])
- if the loss is 3 (< 5) the company does not pay anything
- if the loss is 20 the company pays 20 - 5 = 15
- if the loss is 101 the company pays 100 - 5 = 95
"""
return numpy.piecewise(
losses,
[losses < deductible, losses > insured_limit],
[0, insured_limit - deductible, lambda x: x - deductible]) | :param losses: an array of ground-up loss ratios
:param float deductible: the deductible limit in fraction form
:param float insured_limit: the insured limit in fraction form
Compute insured losses for the given asset and losses, from the point
of view of the insurance company. For instance:
>>> insured_losses(numpy.array([3, 20, 101]), 5, 100)
array([ 0, 15, 95])
- if the loss is 3 (< 5) the company does not pay anything
- if the loss is 20 the company pays 20 - 5 = 15
- if the loss is 101 the company pays 100 - 5 = 95 |
def on_data(self, raw_data):
"""Called when raw data is received from connection.
Override this method if you wish to manually handle
the stream data. Return False to stop stream and close connection.
"""
data = json.loads(raw_data)
message_type = data['meta'].get('type')
prepare_method = 'prepare_%s' % (message_type)
args = getattr(self, prepare_method, self.prepare_fallback)(data.get('data'))
method_name = 'on_%s' % (message_type,)
func = getattr(self, method_name, self.on_fallback)
func(*args, meta=StreamingMeta.from_response_data(data.get('meta'), self.api)) | Called when raw data is received from connection.
Override this method if you wish to manually handle
the stream data. Return False to stop stream and close connection. |
def atomic_output_file(dest_path, make_parents=False, backup_suffix=None, suffix=".partial.%s"):
"""
A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present.
"""
if dest_path == os.devnull:
# Handle the (probably rare) case of writing to /dev/null.
yield dest_path
else:
tmp_path = ("%s" + suffix) % (dest_path, new_uid())
if make_parents:
make_parent_dirs(tmp_path)
yield tmp_path
# Note this is not in a finally block, so that result won't be renamed to final location
# in case of abnormal exit.
if not os.path.exists(tmp_path):
raise IOError("failure in writing file '%s': target file '%s' missing" % (dest_path, tmp_path))
if backup_suffix:
move_to_backup(dest_path, backup_suffix=backup_suffix)
# If the target already exists, and is a directory, it has to be removed.
if os.path.isdir(dest_path):
shutil.rmtree(dest_path)
shutil.move(tmp_path, dest_path) | A context manager for convenience in writing a file or directory in an atomic way. Set up
a temporary name, then rename it after the operation is done, optionally making a backup of
the previous file or directory, if present. |
def stupid_hack(most=10, wait=None):
"""Return a random time between 1 - 10 Seconds."""
# Stupid Hack For Public Cloud so it is not overwhelmed with API requests.
if wait is not None:
time.sleep(wait)
else:
time.sleep(random.randrange(1, most)) | Return a random time between 1 - 10 Seconds. |
def read_pl_dataset(infile):
"""
Description:
Read from disk a Plackett-Luce dataset.
Parameters:
infile: open file object from which to read the dataset
"""
m, n = [int(i) for i in infile.readline().split(',')]
gamma = np.array([float(f) for f in infile.readline().split(',')])
if len(gamma) != m:
infile.close()
raise ValueError("malformed file: len(gamma) != m")
votes = []
i = 0
for line in infile:
vote = [int(v) for v in line.split(',')]
if len(vote) != m:
infile.close()
raise ValueError("malformed file: len(vote) != m")
votes.append(vote)
i += 1
infile.close()
if i != n:
raise ValueError("malformed file: number of votes != n")
return (gamma, np.array(votes)) | Description:
Read from disk a Plackett-Luce dataset.
Parameters:
infile: open file object from which to read the dataset |
def get_livestate(self):
"""Get the SatelliteLink live state.
The live state is a tuple information containing a state identifier and a message, where:
state is:
- 0 for an up and running satellite
- 1 if the satellite is not reachale
- 2 if the satellite is dead
- 3 else (not active)
:return: tuple
"""
livestate = 0
if self.active:
if not self.reachable:
livestate = 1
elif not self.alive:
livestate = 2
else:
livestate = 3
livestate_output = "%s/%s is %s" % (self.type, self.name, [
"up and running.",
"warning because not reachable.",
"critical because not responding.",
"not active by configuration."
][livestate])
return (livestate, livestate_output) | Get the SatelliteLink live state.
The live state is a tuple information containing a state identifier and a message, where:
state is:
- 0 for an up and running satellite
- 1 if the satellite is not reachale
- 2 if the satellite is dead
- 3 else (not active)
:return: tuple |
def validate(self, validator=None, skip_relations=False):
"""Validate a GTFS
:param validator: a ValidationReport
:param (bool) skip_relations: skip validation of relations between entities (e.g. stop_times to stops)
:return:
"""
validator = validation.make_validator(validator)
self.log('Loading...')
self.preload()
# required
required = [
'agency',
'stops',
'routes',
'trips',
'stop_times',
'calendar'
]
for f in required:
self.log("Validating required file: %s"%f)
data = self.read(f)
for i in data:
i.validate(validator=validator)
if skip_relations is False:
i.validate_feed(validator=validator)
# optional
optional = [
'calendar_dates',
'fare_attributes',
'fare_rules',
'shapes',
'frequencies',
'transfers',
'feed_info'
]
for f in optional:
self.log("Validating optional file: %s"%f)
try:
data = self.read(f)
except KeyError, e:
data = []
for i in data:
i.validate(validator=validator)
if skip_relations is False:
i.validate_feed(validator=validator)
return validator | Validate a GTFS
:param validator: a ValidationReport
:param (bool) skip_relations: skip validation of relations between entities (e.g. stop_times to stops)
:return: |
def delete_feed(self, pid):
"""Delete a feed, identified by its local id.
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`pid` (required) (string) local identifier of your feed you want to delete
"""
logger.info("delete_feed(pid=\"%s\") [lid=%s]", pid, self.__lid)
return self.__delete_point(R_FEED, pid) | Delete a feed, identified by its local id.
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`pid` (required) (string) local identifier of your feed you want to delete |
def save(self) -> None:
"""Saves model to the save_path, provided in config. The directory is
already created by super().__init__, which is called in __init__ of this class"""
path = str(self.save_path.absolute())
log.info('[saving model to {}]'.format(path))
self._net.save(path) | Saves model to the save_path, provided in config. The directory is
already created by super().__init__, which is called in __init__ of this class |
def from_url(cls, url, **kwargs):
"""Create a client from a url."""
url = urllib3.util.parse_url(url)
if url.host:
kwargs.setdefault('host', url.host)
if url.port:
kwargs.setdefault('port', url.port)
if url.scheme == 'https':
kwargs.setdefault('connection_class', urllib3.HTTPSConnectionPool)
return cls(**kwargs) | Create a client from a url. |
def LockRetryWrapper(self,
subject,
retrywrap_timeout=1,
retrywrap_max_timeout=10,
blocking=True,
lease_time=None):
"""Retry a DBSubjectLock until it succeeds.
Args:
subject: The subject which the lock applies to.
retrywrap_timeout: How long to wait before retrying the lock.
retrywrap_max_timeout: The maximum time to wait for a retry until we
raise.
blocking: If False, raise on first lock failure.
lease_time: lock lease time in seconds.
Returns:
The DBSubjectLock object
Raises:
DBSubjectLockError: If the maximum retry count has been reached.
"""
timeout = 0
while timeout < retrywrap_max_timeout:
try:
return self.DBSubjectLock(subject, lease_time=lease_time)
except DBSubjectLockError:
if not blocking:
raise
stats_collector_instance.Get().IncrementCounter("datastore_retries")
time.sleep(retrywrap_timeout)
timeout += retrywrap_timeout
raise DBSubjectLockError("Retry number exceeded.") | Retry a DBSubjectLock until it succeeds.
Args:
subject: The subject which the lock applies to.
retrywrap_timeout: How long to wait before retrying the lock.
retrywrap_max_timeout: The maximum time to wait for a retry until we
raise.
blocking: If False, raise on first lock failure.
lease_time: lock lease time in seconds.
Returns:
The DBSubjectLock object
Raises:
DBSubjectLockError: If the maximum retry count has been reached. |
def parse_uci(self, uci: str) -> Move:
"""
Parses the given move in UCI notation.
Supports both Chess960 and standard UCI notation.
The returned move is guaranteed to be either legal or a null move.
:raises: :exc:`ValueError` if the move is invalid or illegal in the
current position (but not a null move).
"""
move = Move.from_uci(uci)
if not move:
return move
move = self._to_chess960(move)
move = self._from_chess960(self.chess960, move.from_square, move.to_square, move.promotion, move.drop)
if not self.is_legal(move):
raise ValueError("illegal uci: {!r} in {}".format(uci, self.fen()))
return move | Parses the given move in UCI notation.
Supports both Chess960 and standard UCI notation.
The returned move is guaranteed to be either legal or a null move.
:raises: :exc:`ValueError` if the move is invalid or illegal in the
current position (but not a null move). |
def add_prefix_from_pool(arg, opts):
""" Add prefix using from-pool to NIPAP
"""
args = {}
# sanity checking
if 'from-pool' in opts:
res = Pool.list({ 'name': opts['from-pool'] })
if len(res) == 0:
print("No pool named '%s' found." % opts['from-pool'], file=sys.stderr)
sys.exit(1)
args['from-pool'] = res[0]
if 'family' not in opts:
print("ERROR: You have to specify the address family.", file=sys.stderr)
sys.exit(1)
if opts['family'] == 'ipv4':
afis = [4]
elif opts['family'] == 'ipv6':
afis = [6]
elif opts['family'] == 'dual-stack':
afis = [4, 6]
if 'prefix_length' in opts:
print("ERROR: 'prefix_length' can not be specified for 'dual-stack' assignment", file=sys.stderr)
sys.exit(1)
else:
print("ERROR: 'family' must be one of: %s" % " ".join(valid_families), file=sys.stderr)
sys.exit(1)
if 'prefix_length' in opts:
args['prefix_length'] = int(opts['prefix_length'])
for afi in afis:
p = _prefix_from_opts(opts)
if opts.get('vrf_rt') is None:
# if no VRF is specified use the pools implied VRF
p.vrf = args['from-pool'].vrf
else:
# use the specified VRF
p.vrf = get_vrf(opts.get('vrf_rt'), abort=True)
# set type to default type of pool unless already set
if p.type is None:
if args['from-pool'].default_type is None:
print("ERROR: Type not specified and no default-type specified for pool: %s" % opts['from-pool'], file=sys.stderr)
p.type = args['from-pool'].default_type
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print("ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp, file=sys.stderr)
return
p.avps[key] = value
args['family'] = afi
try:
p.save(args)
except NipapError as exc:
print("Could not add prefix to NIPAP: %s" % str(exc), file=sys.stderr)
sys.exit(1)
if p.type == 'host':
print("Host %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.node or p.description))
else:
print("Network %s added to %s: %s" % (p.display_prefix,
vrf_format(p.vrf), p.description))
if opts.get('add-hosts') is not None:
if p.type != 'assignment':
print("ERROR: Not possible to add hosts to non-assignment", file=sys.stderr)
sys.exit(1)
for host in opts.get('add-hosts').split(','):
h_opts = {
'from-prefix': p.prefix,
'vrf_rt': p.vrf.rt,
'type': 'host',
'node': host
}
add_prefix({}, h_opts, {}) | Add prefix using from-pool to NIPAP |
def _parse_vars_tbl(self, var_tbl):
"""Parse a table of variable bindings (dictionary with key = variable name)"""
# Find the length of each variable to infer T
T = self._check_forward_mode_input_dict(var_tbl)
# The shape of X based on T and m
shape = (T, 1)
# Initialize X to zeros in the correct shape
X = np.zeros(shape)
X[:,0] = var_tbl[self.var_name]
return X | Parse a table of variable bindings (dictionary with key = variable name) |
def _find_proj_root():
# type: () -> Optional[str]
""" Find the project path by going up the file tree.
This will look in the current directory and upwards for the pelconf file
(.yaml or .py)
"""
proj_files = frozenset(('pelconf.py', 'pelconf.yaml'))
curr = os.getcwd()
while curr.startswith('/') and len(curr) > 1:
if proj_files & frozenset(os.listdir(curr)):
return curr
else:
curr = os.path.dirname(curr)
return None | Find the project path by going up the file tree.
This will look in the current directory and upwards for the pelconf file
(.yaml or .py) |
def enclosing_frame(frame=None, level=2):
"""Get an enclosing frame that skips decorator code"""
frame = frame or sys._getframe(level)
while frame.f_globals.get('__name__') == __name__: frame = frame.f_back
return frame | Get an enclosing frame that skips decorator code |
def save_file(self, obj): # pylint: disable=too-many-branches
"""Save a file"""
try:
import StringIO as pystringIO #we can't use cStringIO as it lacks the name attribute
except ImportError:
import io as pystringIO # pylint: disable=reimported
if not hasattr(obj, 'name') or not hasattr(obj, 'mode'):
raise pickle.PicklingError("Cannot pickle files that do not map to an actual file")
if obj is sys.stdout:
return self.save_reduce(getattr, (sys, 'stdout'), obj=obj)
if obj is sys.stderr:
return self.save_reduce(getattr, (sys, 'stderr'), obj=obj)
if obj is sys.stdin:
raise pickle.PicklingError("Cannot pickle standard input")
if hasattr(obj, 'isatty') and obj.isatty():
raise pickle.PicklingError("Cannot pickle files that map to tty objects")
if 'r' not in obj.mode:
raise pickle.PicklingError("Cannot pickle files that are not opened for reading")
name = obj.name
try:
fsize = os.stat(name).st_size
except OSError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be stat" % name)
if obj.closed:
#create an empty closed string io
retval = pystringIO.StringIO("")
retval.close()
elif not fsize: #empty file
retval = pystringIO.StringIO("")
try:
tmpfile = file(name)
tst = tmpfile.read(1)
except IOError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name)
tmpfile.close()
if tst != '':
raise pickle.PicklingError(
"Cannot pickle file %s as it does not appear to map to a physical, real file" % name)
else:
try:
tmpfile = file(name)
contents = tmpfile.read()
tmpfile.close()
except IOError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name)
retval = pystringIO.StringIO(contents)
curloc = obj.tell()
retval.seek(curloc)
retval.name = name
self.save(retval)
self.memoize(obj) | Save a file |
def checkpoint(self, message, header=None, delay=0, **kwargs):
"""Send a message to the current recipe destination. This can be used to
keep a state for longer processing tasks.
:param delay: Delay transport of message by this many seconds
"""
if not self.transport:
raise ValueError(
"This RecipeWrapper object does not contain "
"a reference to a transport object."
)
if not self.recipe_step:
raise ValueError(
"This RecipeWrapper object does not contain "
"a recipe with a selected step."
)
kwargs["delay"] = delay
self._send_to_destination(
self.recipe_pointer, header, message, kwargs, add_path_step=False
) | Send a message to the current recipe destination. This can be used to
keep a state for longer processing tasks.
:param delay: Delay transport of message by this many seconds |
def do_struct(self, subcmd, opts, message):
"""${cmd_name}: get the structure of the specified message
${cmd_usage}
${cmd_option_list}
"""
client = MdClient(self.maildir, filesystem=self.filesystem)
as_json = getattr(opts, "json", False)
client.getstruct(message, as_json=as_json, stream=self.stdout) | ${cmd_name}: get the structure of the specified message
${cmd_usage}
${cmd_option_list} |
def feature_enabled(self, feature_name):
"""
Indicates whether the specified feature is enabled for the CPC of this
partition.
The HMC must generally support features, and the specified feature must
be available for the CPC.
For a list of available features, see section "Features" in the
:term:`HMC API`, or use the :meth:`feature_info` method.
Authorization requirements:
* Object-access permission to this partition.
Parameters:
feature_name (:term:`string`): The name of the feature.
Returns:
bool: `True` if the feature is enabled, or `False` if the feature is
disabled (but available).
Raises:
:exc:`ValueError`: Features are not supported on the HMC.
:exc:`ValueError`: The specified feature is not available for the
CPC.
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
feature_list = self.prop('available-features-list', None)
if feature_list is None:
raise ValueError("Firmware features are not supported on CPC %s" %
self.manager.cpc.name)
for feature in feature_list:
if feature['name'] == feature_name:
break
else:
raise ValueError("Firmware feature %s is not available on CPC %s" %
(feature_name, self.manager.cpc.name))
return feature['state'] | Indicates whether the specified feature is enabled for the CPC of this
partition.
The HMC must generally support features, and the specified feature must
be available for the CPC.
For a list of available features, see section "Features" in the
:term:`HMC API`, or use the :meth:`feature_info` method.
Authorization requirements:
* Object-access permission to this partition.
Parameters:
feature_name (:term:`string`): The name of the feature.
Returns:
bool: `True` if the feature is enabled, or `False` if the feature is
disabled (but available).
Raises:
:exc:`ValueError`: Features are not supported on the HMC.
:exc:`ValueError`: The specified feature is not available for the
CPC.
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError` |
def file_loc():
"""Return file and line number"""
import sys
import inspect
try:
raise Exception
except:
file_ = '.../' + '/'.join((inspect.currentframe().f_code.co_filename.split('/'))[-3:])
line_ = sys.exc_info()[2].tb_frame.f_back.f_lineno
return "{}:{}".format(file_, line_) | Return file and line number |
def setup_panel_params(self, coord):
"""
Calculate the x & y range & breaks information for each panel
Parameters
----------
coord : coord
Coordinate
"""
if not self.panel_scales_x:
raise PlotnineError('Missing an x scale')
if not self.panel_scales_y:
raise PlotnineError('Missing a y scale')
self.panel_params = []
cols = ['SCALE_X', 'SCALE_Y']
for i, j in self.layout[cols].itertuples(index=False):
i, j = i-1, j-1
params = coord.setup_panel_params(
self.panel_scales_x[i],
self.panel_scales_y[j])
self.panel_params.append(params) | Calculate the x & y range & breaks information for each panel
Parameters
----------
coord : coord
Coordinate |
def as_error(self) :
"fills in and returns an Error object that reports the specified error name and message."
result = dbus.Error.init()
result.set(self.args[0], self.args[1])
return \
result | fills in and returns an Error object that reports the specified error name and message. |
def get_for_model(self, obj):
"""Returns the tags for a specific model/content type."""
qs = Tag.objects.language(get_language())
qs = qs.filter(
tagged_items__content_type=ctype_models.ContentType.objects.get_for_model(obj)) # NOQA
return qs.distinct() | Returns the tags for a specific model/content type. |
def delete_pool(name):
"""Delete pool."""
try:
pool = pool_api.delete_pool(name=name)
except AirflowException as err:
_log.error(err)
response = jsonify(error="{}".format(err))
response.status_code = err.status_code
return response
else:
return jsonify(pool.to_json()) | Delete pool. |
def serialize(script_string):
'''
str -> bytearray
'''
string_tokens = script_string.split()
serialized_script = bytearray()
for token in string_tokens:
if token == 'OP_CODESEPARATOR' or token == 'OP_PUSHDATA4':
raise NotImplementedError('{} is a bad idea.'.format(token))
if token in riemann.network.CODE_TO_INT_OVERWRITE:
serialized_script.extend(
[riemann.network.CODE_TO_INT_OVERWRITE[token]])
elif token in CODE_TO_INT:
serialized_script.extend([CODE_TO_INT[token]])
else:
token_bytes = bytes.fromhex(token)
if len(token_bytes) <= 75:
op = 'OP_PUSH_{}'.format(len(token_bytes))
serialized_script.extend([CODE_TO_INT[op]])
serialized_script.extend(token_bytes)
elif len(token_bytes) > 75 and len(token_bytes) <= 255:
op = 'OP_PUSHDATA1'
serialized_script.extend([CODE_TO_INT[op]])
serialized_script.extend(utils.i2le(len(token_bytes)))
serialized_script.extend(token_bytes)
elif len(token_bytes) > 255 and len(token_bytes) <= 1000:
op = 'OP_PUSHDATA2'
serialized_script.extend([CODE_TO_INT[op]])
serialized_script.extend(
utils.i2le_padded(len(token_bytes), 2))
serialized_script.extend(token_bytes)
else:
raise NotImplementedError(
'Hex string too long to serialize.')
return serialized_script | str -> bytearray |
def get_substrates(self, material_id, number=50, orient=None):
"""
Get a substrate list for a material id. The list is in order of
increasing elastic energy if a elastic tensor is available for
the material_id. Otherwise the list is in order of increasing
matching area.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
orient (list) : substrate orientation to look for
number (int) : number of substrates to return;
n=0 returns all available matches
Returns:
list of dicts with substrate matches
"""
req = "/materials/{}/substrates?n={}".format(material_id, number)
if orient:
req += "&orient={}".format(" ".join(map(str, orient)))
return self._make_request(req) | Get a substrate list for a material id. The list is in order of
increasing elastic energy if a elastic tensor is available for
the material_id. Otherwise the list is in order of increasing
matching area.
Args:
material_id (str): Materials Project material_id, e.g. 'mp-123'.
orient (list) : substrate orientation to look for
number (int) : number of substrates to return;
n=0 returns all available matches
Returns:
list of dicts with substrate matches |
def main():
"""Define the CLI inteface/commands."""
arguments = docopt(__doc__)
cfg_filename = pkg_resources.resource_filename(
'knowledge_base',
'config/virtuoso.ini'
)
kb = KnowledgeBase(cfg_filename)
# the user has issued a `find` command
if arguments["find"]:
search_string = arguments["<search_string>"]
try:
urn = CTS_URN(search_string)
match = kb.get_resource_by_urn(str(urn))
show_result(match, verbose=True)
return
except BadCtsUrnSyntax as e:
pass
except IndexError as e:
raise e
print("\nNo records with this CTS URN!\n")
return
try:
matches = kb.search(search_string)
print("\nSearching for \"%s\" yielded %s results" % (
search_string,
len(matches)
))
print_results(matches)
return
except SparqlReaderException as e:
print("\nWildcard word needs at least 4 leading characters")
# the user has issued an `add` command
elif arguments["add"]:
input_urn = arguments["--to"]
# first let's check if it's a valid URN
try:
urn = CTS_URN(input_urn)
except Exception as e:
print("The provided URN ({}) is invalid!".format(input_urn))
return
try:
resource = kb.get_resource_by_urn(urn)
assert resource is not None
except ResourceNotFound:
print("The KB does not contain a resource identified by {}".format(
urn
))
return
print(arguments)
#if arguments[""]
pass | Define the CLI inteface/commands. |
def one(self, command, params=None):
"""
Возвращает первую строку ответа, полученного через query
> db.query('SELECT * FORM users WHERE id=:id', {"id":MY_USER_ID})
:param command: SQL запрос
:param params: Параметры для prepared statements
:rtype: dict
"""
dr = self.query(command, params)
if dr['rows']:
return dr['rows'][0]
else:
return None | Возвращает первую строку ответа, полученного через query
> db.query('SELECT * FORM users WHERE id=:id', {"id":MY_USER_ID})
:param command: SQL запрос
:param params: Параметры для prepared statements
:rtype: dict |
def create_actor_delaunay(pts, color, **kwargs):
""" Creates a VTK actor for rendering triangulated plots using Delaunay triangulation.
Keyword Arguments:
* ``d3d``: flag to choose between Delaunay2D (``False``) and Delaunay3D (``True``). *Default: False*
:param pts: points
:type pts: vtkFloatArray
:param color: actor color
:type color: list
:return: a VTK actor
:rtype: vtkActor
"""
# Keyword arguments
array_name = kwargs.get('name', "")
array_index = kwargs.get('index', 0)
use_delaunay3d = kwargs.get("d3d", False)
# Create points
points = vtk.vtkPoints()
points.SetData(pts)
# Create a PolyData object and add points
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
# Apply Delaunay triangulation on the poly data object
triangulation = vtk.vtkDelaunay3D() if use_delaunay3d else vtk.vtkDelaunay2D()
triangulation.SetInputData(polydata)
# Map triangulated surface to the graphics primitives
mapper = vtk.vtkDataSetMapper()
mapper.SetInputConnection(triangulation.GetOutputPort())
mapper.SetArrayName(array_name)
mapper.SetArrayId(array_index)
# Create an actor and set its properties
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(*color)
# Return the actor
return actor | Creates a VTK actor for rendering triangulated plots using Delaunay triangulation.
Keyword Arguments:
* ``d3d``: flag to choose between Delaunay2D (``False``) and Delaunay3D (``True``). *Default: False*
:param pts: points
:type pts: vtkFloatArray
:param color: actor color
:type color: list
:return: a VTK actor
:rtype: vtkActor |
def subdevicenames(self) -> Tuple[str, ...]:
"""A |tuple| containing the (sub)device names.
Property |NetCDFVariableFlat.subdevicenames| clarifies which
row of |NetCDFVariableAgg.array| contains which time series.
For 0-dimensional series like |lland_inputs.Nied|, the plain
device names are returned
>>> from hydpy.core.examples import prepare_io_example_1
>>> nodes, elements = prepare_io_example_1()
>>> from hydpy.core.netcdftools import NetCDFVariableFlat
>>> ncvar = NetCDFVariableFlat('input_nied', isolate=False, timeaxis=1)
>>> for element in elements:
... nied1 = element.model.sequences.inputs.nied
... ncvar.log(nied1, nied1.series)
>>> ncvar.subdevicenames
('element1', 'element2', 'element3')
For higher dimensional sequences like |lland_fluxes.NKor|, an
additional suffix defines the index of the respective subdevice.
For example contains the third row of |NetCDFVariableAgg.array|
the time series of the first hydrological response unit of the
second element:
>>> ncvar = NetCDFVariableFlat('flux_nkor', isolate=False, timeaxis=1)
>>> for element in elements:
... nkor1 = element.model.sequences.fluxes.nkor
... ncvar.log(nkor1, nkor1.series)
>>> ncvar.subdevicenames[1:3]
('element2_0', 'element2_1')
"""
stats: List[str] = collections.deque()
for devicename, seq in self.sequences.items():
if seq.NDIM:
temp = devicename + '_'
for prod in self._product(seq.shape):
stats.append(temp + '_'.join(str(idx) for idx in prod))
else:
stats.append(devicename)
return tuple(stats) | A |tuple| containing the (sub)device names.
Property |NetCDFVariableFlat.subdevicenames| clarifies which
row of |NetCDFVariableAgg.array| contains which time series.
For 0-dimensional series like |lland_inputs.Nied|, the plain
device names are returned
>>> from hydpy.core.examples import prepare_io_example_1
>>> nodes, elements = prepare_io_example_1()
>>> from hydpy.core.netcdftools import NetCDFVariableFlat
>>> ncvar = NetCDFVariableFlat('input_nied', isolate=False, timeaxis=1)
>>> for element in elements:
... nied1 = element.model.sequences.inputs.nied
... ncvar.log(nied1, nied1.series)
>>> ncvar.subdevicenames
('element1', 'element2', 'element3')
For higher dimensional sequences like |lland_fluxes.NKor|, an
additional suffix defines the index of the respective subdevice.
For example contains the third row of |NetCDFVariableAgg.array|
the time series of the first hydrological response unit of the
second element:
>>> ncvar = NetCDFVariableFlat('flux_nkor', isolate=False, timeaxis=1)
>>> for element in elements:
... nkor1 = element.model.sequences.fluxes.nkor
... ncvar.log(nkor1, nkor1.series)
>>> ncvar.subdevicenames[1:3]
('element2_0', 'element2_1') |
def get_array_for_fit(observables: dict, track_pt_bin: int, jet_pt_bin: int) -> histogram.Histogram1D:
""" Get a Histogram1D associated with the selected jet and track pt bins.
This is often used to retrieve data for fitting.
Args:
observables (dict): The observables from which the hist should be retrieved.
track_pt_bin (int): Track pt bin of the desired hist.
jet_ptbin (int): Jet pt bin of the desired hist.
Returns:
Histogram1D: Converted TH1 or uproot histogram.
Raises:
ValueError: If the requested observable couldn't be found.
"""
for name, observable in observables.items():
if observable.track_pt_bin == track_pt_bin and observable.jet_pt_bin == jet_pt_bin:
return histogram.Histogram1D.from_existing_hist(observable.hist)
raise ValueError("Cannot find fit with jet pt bin {jet_pt_bin} and track pt bin {track_pt_bin}") | Get a Histogram1D associated with the selected jet and track pt bins.
This is often used to retrieve data for fitting.
Args:
observables (dict): The observables from which the hist should be retrieved.
track_pt_bin (int): Track pt bin of the desired hist.
jet_ptbin (int): Jet pt bin of the desired hist.
Returns:
Histogram1D: Converted TH1 or uproot histogram.
Raises:
ValueError: If the requested observable couldn't be found. |
def get(self, *args, **kwargs):
"""Retrieve a collection of objects"""
self.before_get(args, kwargs)
qs = QSManager(request.args, self.schema)
objects_count, objects = self.get_collection(qs, kwargs)
schema_kwargs = getattr(self, 'get_schema_kwargs', dict())
schema_kwargs.update({'many': True})
self.before_marshmallow(args, kwargs)
schema = compute_schema(self.schema,
schema_kwargs,
qs,
qs.include)
result = schema.dump(objects).data
view_kwargs = request.view_args if getattr(self, 'view_kwargs', None) is True else dict()
add_pagination_links(result,
objects_count,
qs,
url_for(self.view, _external=True, **view_kwargs))
result.update({'meta': {'count': objects_count}})
final_result = self.after_get(result)
return final_result | Retrieve a collection of objects |
def upload_image(vol, img, offset, parallel=1,
manual_shared_memory_id=None, manual_shared_memory_bbox=None, manual_shared_memory_order='F'):
"""Upload img to vol with offset. This is the primary entry point for uploads."""
global NON_ALIGNED_WRITE
if not np.issubdtype(img.dtype, np.dtype(vol.dtype).type):
raise ValueError('The uploaded image data type must match the volume data type. volume: {}, image: {}'.format(vol.dtype, img.dtype))
(is_aligned, bounds, expanded) = check_grid_aligned(vol, img, offset)
if is_aligned:
upload_aligned(vol, img, offset, parallel=parallel,
manual_shared_memory_id=manual_shared_memory_id, manual_shared_memory_bbox=manual_shared_memory_bbox,
manual_shared_memory_order=manual_shared_memory_order)
return
elif vol.non_aligned_writes == False:
msg = NON_ALIGNED_WRITE.format(mip=vol.mip, chunk_size=vol.chunk_size, offset=vol.voxel_offset, got=bounds, check=expanded)
raise AlignmentError(msg)
# Upload the aligned core
retracted = bounds.shrink_to_chunk_size(vol.underlying, vol.voxel_offset)
core_bbox = retracted.clone() - bounds.minpt
if not core_bbox.subvoxel():
core_img = img[ core_bbox.to_slices() ]
upload_aligned(vol, core_img, retracted.minpt, parallel=parallel,
manual_shared_memory_id=manual_shared_memory_id, manual_shared_memory_bbox=manual_shared_memory_bbox,
manual_shared_memory_order=manual_shared_memory_order)
# Download the shell, paint, and upload
all_chunks = set(chunknames(expanded, vol.bounds, vol.key, vol.underlying))
core_chunks = set(chunknames(retracted, vol.bounds, vol.key, vol.underlying))
shell_chunks = all_chunks.difference(core_chunks)
def shade_and_upload(img3d, bbox):
# decode is returning non-writable chunk
# we're throwing them away so safe to write
img3d.setflags(write=1)
shade(img3d, bbox, img, bounds)
single_process_upload(vol, img3d, (( Vec(0,0,0), Vec(*img3d.shape[:3]), bbox.minpt, bbox.maxpt),), n_threads=0)
download_multiple(vol, shell_chunks, fn=shade_and_upload) | Upload img to vol with offset. This is the primary entry point for uploads. |
def tuning_config(tuner, inputs, job_name=None):
"""Export Airflow tuning config from an estimator
Args:
tuner (sagemaker.tuner.HyperparameterTuner): The tuner to export tuning config from.
inputs: Information about the training data. Please refer to the ``fit()`` method of
the associated estimator in the tuner, as this can take any of the following forms:
* (str) - The S3 location where training data is saved.
* (dict[str, str] or dict[str, sagemaker.session.s3_input]) - If using multiple channels for
training data, you can specify a dict mapping channel names
to strings or :func:`~sagemaker.session.s3_input` objects.
* (sagemaker.session.s3_input) - Channel configuration for S3 data sources that can provide
additional information about the training dataset. See :func:`sagemaker.session.s3_input`
for full details.
* (sagemaker.amazon.amazon_estimator.RecordSet) - A collection of
Amazon :class:~`Record` objects serialized and stored in S3.
For use with an estimator for an Amazon algorithm.
* (list[sagemaker.amazon.amazon_estimator.RecordSet]) - A list of
:class:~`sagemaker.amazon.amazon_estimator.RecordSet` objects, where each instance is
a different channel of training data.
job_name (str): Specify a tuning job name if needed.
Returns:
dict: Tuning config that can be directly used by SageMakerTuningOperator in Airflow.
"""
train_config = training_base_config(tuner.estimator, inputs)
hyperparameters = train_config.pop('HyperParameters', None)
s3_operations = train_config.pop('S3Operations', None)
if hyperparameters and len(hyperparameters) > 0:
tuner.static_hyperparameters = \
{utils.to_str(k): utils.to_str(v) for (k, v) in hyperparameters.items()}
if job_name is not None:
tuner._current_job_name = job_name
else:
base_name = tuner.base_tuning_job_name or utils.base_name_from_image(tuner.estimator.train_image())
tuner._current_job_name = utils.name_from_base(base_name, tuner.TUNING_JOB_NAME_MAX_LENGTH, True)
for hyperparameter_name in tuner._hyperparameter_ranges.keys():
tuner.static_hyperparameters.pop(hyperparameter_name, None)
train_config['StaticHyperParameters'] = tuner.static_hyperparameters
tune_config = {
'HyperParameterTuningJobName': tuner._current_job_name,
'HyperParameterTuningJobConfig': {
'Strategy': tuner.strategy,
'HyperParameterTuningJobObjective': {
'Type': tuner.objective_type,
'MetricName': tuner.objective_metric_name,
},
'ResourceLimits': {
'MaxNumberOfTrainingJobs': tuner.max_jobs,
'MaxParallelTrainingJobs': tuner.max_parallel_jobs,
},
'ParameterRanges': tuner.hyperparameter_ranges(),
},
'TrainingJobDefinition': train_config
}
if tuner.metric_definitions is not None:
tune_config['TrainingJobDefinition']['AlgorithmSpecification']['MetricDefinitions'] = \
tuner.metric_definitions
if tuner.tags is not None:
tune_config['Tags'] = tuner.tags
if s3_operations is not None:
tune_config['S3Operations'] = s3_operations
return tune_config | Export Airflow tuning config from an estimator
Args:
tuner (sagemaker.tuner.HyperparameterTuner): The tuner to export tuning config from.
inputs: Information about the training data. Please refer to the ``fit()`` method of
the associated estimator in the tuner, as this can take any of the following forms:
* (str) - The S3 location where training data is saved.
* (dict[str, str] or dict[str, sagemaker.session.s3_input]) - If using multiple channels for
training data, you can specify a dict mapping channel names
to strings or :func:`~sagemaker.session.s3_input` objects.
* (sagemaker.session.s3_input) - Channel configuration for S3 data sources that can provide
additional information about the training dataset. See :func:`sagemaker.session.s3_input`
for full details.
* (sagemaker.amazon.amazon_estimator.RecordSet) - A collection of
Amazon :class:~`Record` objects serialized and stored in S3.
For use with an estimator for an Amazon algorithm.
* (list[sagemaker.amazon.amazon_estimator.RecordSet]) - A list of
:class:~`sagemaker.amazon.amazon_estimator.RecordSet` objects, where each instance is
a different channel of training data.
job_name (str): Specify a tuning job name if needed.
Returns:
dict: Tuning config that can be directly used by SageMakerTuningOperator in Airflow. |
def _add_trits(left, right):
# type: (int, int) -> int
"""
Adds two individual trits together.
The result is always a single trit.
"""
res = left + right
return res if -2 < res < 2 else (res < 0) - (res > 0) | Adds two individual trits together.
The result is always a single trit. |
Subsets and Splits