code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def get_live_data_flat_binary(self):
"""
Gets the live data packet in flatbuffer binary format. You'll need to do something like
GameTickPacket.GetRootAsGameTickPacket(binary, 0) to get the data out.
This is a temporary method designed to keep the integration test working. It returns the raw bytes
of the flatbuffer so that it can be stored in a file. We can get rid of this once we have a first-class
data recorder that lives inside the core dll.
"""
byte_buffer = self.game.UpdateLiveDataPacketFlatbuffer()
if byte_buffer.size >= 4: # GetRootAsGameTickPacket gets angry if the size is less than 4
# We're counting on this copying the data over to a new memory location so that the original
# pointer can be freed safely.
proto_string = ctypes.string_at(byte_buffer.ptr, byte_buffer.size)
self.game.Free(byte_buffer.ptr) # Avoid a memory leak
self.game_status(None, RLBotCoreStatus.Success)
return proto_string | Gets the live data packet in flatbuffer binary format. You'll need to do something like
GameTickPacket.GetRootAsGameTickPacket(binary, 0) to get the data out.
This is a temporary method designed to keep the integration test working. It returns the raw bytes
of the flatbuffer so that it can be stored in a file. We can get rid of this once we have a first-class
data recorder that lives inside the core dll. |
def getReverseRankMaps(self):
"""
Returns a list of dictionaries, one for each preference, that associates each position in
the ranking with a list of integer representations of the candidates ranked at that
position and returns a list of the number of times each preference is given.
"""
reverseRankMaps = []
for preference in self.preferences:
reverseRankMaps.append(preference.getReverseRankMap())
return reverseRankMaps | Returns a list of dictionaries, one for each preference, that associates each position in
the ranking with a list of integer representations of the candidates ranked at that
position and returns a list of the number of times each preference is given. |
def function(self, addr=None, name=None, create=False, syscall=False, plt=None):
"""
Get a function object from the function manager.
Pass either `addr` or `name` with the appropriate values.
:param int addr: Address of the function.
:param str name: Name of the function.
:param bool create: Whether to create the function or not if the function does not exist.
:param bool syscall: True to create the function as a syscall, False otherwise.
:param bool or None plt: True to find the PLT stub, False to find a non-PLT stub, None to disable this
restriction.
:return: The Function instance, or None if the function is not found and create is False.
:rtype: Function or None
"""
if addr is not None:
try:
f = self._function_map.get(addr)
if plt is None or f.is_plt == plt:
return f
except KeyError:
if create:
# the function is not found
f = self._function_map[addr]
if name is not None:
f.name = name
if syscall:
f.is_syscall=True
return f
elif name is not None:
for func in self._function_map.values():
if func.name == name:
if plt is None or func.is_plt == plt:
return func
return None | Get a function object from the function manager.
Pass either `addr` or `name` with the appropriate values.
:param int addr: Address of the function.
:param str name: Name of the function.
:param bool create: Whether to create the function or not if the function does not exist.
:param bool syscall: True to create the function as a syscall, False otherwise.
:param bool or None plt: True to find the PLT stub, False to find a non-PLT stub, None to disable this
restriction.
:return: The Function instance, or None if the function is not found and create is False.
:rtype: Function or None |
def set_stop_chars_left(self, stop_chars):
"""
Set stop characters for text on left from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError
"""
if not isinstance(stop_chars, set):
raise TypeError("stop_chars should be type set "
"but {} was given".format(type(stop_chars)))
self._stop_chars_left = stop_chars
self._stop_chars = self._stop_chars_left | self._stop_chars_right | Set stop characters for text on left from TLD.
Stop characters are used when determining end of URL.
:param set stop_chars: set of characters
:raises: TypeError |
def color_is_forced(**envars):
''' Look for clues in environment, e.g.:
- https://bixense.com/clicolors/
Arguments:
envars: Additional environment variables to check for
equality, i.e. ``MYAPP_COLOR_FORCED='1'``
Returns:
Bool: Forced
'''
result = env.CLICOLOR_FORCE and env.CLICOLOR_FORCE != '0'
log.debug('%s (CLICOLOR_FORCE=%s)', result, env.CLICOLOR_FORCE or '')
for name, value in envars.items():
envar = getattr(env, name)
if envar.value == value:
result = True
log.debug('%s == %r: %r', name, value, result)
return result | Look for clues in environment, e.g.:
- https://bixense.com/clicolors/
Arguments:
envars: Additional environment variables to check for
equality, i.e. ``MYAPP_COLOR_FORCED='1'``
Returns:
Bool: Forced |
def set_tag(self, tag):
'''
Sets the tag.
If the Entity belongs to the world it will check for tag conflicts.
'''
if self._world:
if self._world.get_entity_by_tag(tag):
raise NonUniqueTagError(tag)
self._tag = tag | Sets the tag.
If the Entity belongs to the world it will check for tag conflicts. |
def is_letter(uni_char):
"""Determine whether the given Unicode character is a Unicode letter"""
category = Category.get(uni_char)
return (category == Category.UPPERCASE_LETTER or
category == Category.LOWERCASE_LETTER or
category == Category.TITLECASE_LETTER or
category == Category.MODIFIER_LETTER or
category == Category.OTHER_LETTER) | Determine whether the given Unicode character is a Unicode letter |
def parse_note(cls, note):
"""Parse string annotation into object reference with optional name."""
if isinstance(note, tuple):
if len(note) != 2:
raise ValueError('tuple annotations must be length 2')
return note
try:
match = cls.re_note.match(note)
except TypeError:
# Note is not a string. Support any Python object as a note.
return note, None
return match.groups() | Parse string annotation into object reference with optional name. |
def update_template(self, template_id, template_dict):
"""
Updates a template
:param template_id: the template id
:param template_dict: dict
:return: dict
"""
return self._create_put_request(
resource=TEMPLATES,
billomat_id=template_id,
send_data=template_dict
) | Updates a template
:param template_id: the template id
:param template_dict: dict
:return: dict |
def _compile_seriesflow(self):
"""Post power flow computation of series device flow"""
string = '"""\n'
for device, pflow, series in zip(self.devices, self.pflow,
self.series):
if pflow and series:
string += 'system.' + device + '.seriesflow(system.dae)\n'
string += '\n'
string += '"""'
self.seriesflow = compile(eval(string), '', 'exec') | Post power flow computation of series device flow |
def put_text(self, key, text):
"""Put the text into the storage associated with the key."""
with open(key, "w") as fh:
fh.write(text) | Put the text into the storage associated with the key. |
def lookup_casstype(casstype):
"""
Given a Cassandra type as a string (possibly including parameters), hand
back the CassandraType class responsible for it. If a name is not
recognized, a custom _UnrecognizedType subclass will be created for it.
Example:
>>> lookup_casstype('org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.Int32Type)')
<class 'cassandra.cqltypes.MapType(UTF8Type, Int32Type)'>
"""
if isinstance(casstype, (CassandraType, CassandraTypeType)):
return casstype
try:
return parse_casstype_args(casstype)
except (ValueError, AssertionError, IndexError) as e:
raise ValueError("Don't know how to parse type string %r: %s" % (casstype, e)) | Given a Cassandra type as a string (possibly including parameters), hand
back the CassandraType class responsible for it. If a name is not
recognized, a custom _UnrecognizedType subclass will be created for it.
Example:
>>> lookup_casstype('org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.Int32Type)')
<class 'cassandra.cqltypes.MapType(UTF8Type, Int32Type)'> |
def error_values_summary(error_values, **summary_df_kwargs):
"""Get summary statistics about calculation errors, including estimated
implementation errors.
Parameters
----------
error_values: pandas DataFrame
Of format output by run_list_error_values (look at it for more
details).
summary_df_kwargs: dict, optional
See pandas_functions.summary_df docstring for more details.
Returns
-------
df: pandas DataFrame
Table showing means and standard deviations of results and diagnostics
for the different runs. Also contains estimated numerical uncertainties
on results.
"""
df = pf.summary_df_from_multi(error_values, **summary_df_kwargs)
# get implementation stds
imp_std, imp_std_unc, imp_frac, imp_frac_unc = \
nestcheck.error_analysis.implementation_std(
df.loc[('values std', 'value')],
df.loc[('values std', 'uncertainty')],
df.loc[('bootstrap std mean', 'value')],
df.loc[('bootstrap std mean', 'uncertainty')])
df.loc[('implementation std', 'value'), df.columns] = imp_std
df.loc[('implementation std', 'uncertainty'), df.columns] = imp_std_unc
df.loc[('implementation std frac', 'value'), :] = imp_frac
df.loc[('implementation std frac', 'uncertainty'), :] = imp_frac_unc
# Get implementation RMSEs (calculated using the values RMSE instead of
# values std)
if 'values rmse' in set(df.index.get_level_values('calculation type')):
imp_rmse, imp_rmse_unc, imp_frac, imp_frac_unc = \
nestcheck.error_analysis.implementation_std(
df.loc[('values rmse', 'value')],
df.loc[('values rmse', 'uncertainty')],
df.loc[('bootstrap std mean', 'value')],
df.loc[('bootstrap std mean', 'uncertainty')])
df.loc[('implementation rmse', 'value'), df.columns] = imp_rmse
df.loc[('implementation rmse', 'uncertainty'), df.columns] = \
imp_rmse_unc
df.loc[('implementation rmse frac', 'value'), :] = imp_frac
df.loc[('implementation rmse frac', 'uncertainty'), :] = imp_frac_unc
# Return only the calculation types we are interested in, in order
calcs_to_keep = ['true values', 'values mean', 'values std',
'values rmse', 'bootstrap std mean',
'implementation std', 'implementation std frac',
'implementation rmse', 'implementation rmse frac',
'thread ks pvalue mean', 'bootstrap ks distance mean',
'bootstrap energy distance mean',
'bootstrap earth mover distance mean']
df = pd.concat([df.xs(calc, level='calculation type', drop_level=False) for
calc in calcs_to_keep if calc in
df.index.get_level_values('calculation type')])
return df | Get summary statistics about calculation errors, including estimated
implementation errors.
Parameters
----------
error_values: pandas DataFrame
Of format output by run_list_error_values (look at it for more
details).
summary_df_kwargs: dict, optional
See pandas_functions.summary_df docstring for more details.
Returns
-------
df: pandas DataFrame
Table showing means and standard deviations of results and diagnostics
for the different runs. Also contains estimated numerical uncertainties
on results. |
def formatPathExpressions(seriesList):
"""
Returns a comma-separated list of unique path expressions.
"""
pathExpressions = sorted(set([s.pathExpression for s in seriesList]))
return ','.join(pathExpressions) | Returns a comma-separated list of unique path expressions. |
def add(self, item):
"""
Add a file to the manifest.
:param item: The pathname to add. This can be relative to the base.
"""
if not item.startswith(self.prefix):
item = os.path.join(self.base, item)
self.files.add(os.path.normpath(item)) | Add a file to the manifest.
:param item: The pathname to add. This can be relative to the base. |
def get_socket(host, port, timeout=None):
"""
Return a socket.
:param str host: the hostname to connect to
:param int port: the port number to connect to
:param timeout: if specified, set the socket timeout
"""
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not None:
sock.settimeout(timeout)
sock.connect(sa)
return sock
except error:
if sock is not None:
sock.close()
raise error | Return a socket.
:param str host: the hostname to connect to
:param int port: the port number to connect to
:param timeout: if specified, set the socket timeout |
def get_max_id(self, object_type, role):
"""Get the highest used ID."""
if object_type == 'user':
objectclass = 'posixAccount'
ldap_attr = 'uidNumber'
elif object_type == 'group': # pragma: no cover
objectclass = 'posixGroup'
ldap_attr = 'gidNumber'
else:
raise ldap_tools.exceptions.InvalidResult('Unknown object type')
minID, maxID = Client.__set_id_boundary(role)
filter = [
"(objectclass={})".format(objectclass), "({}>={})".format(ldap_attr, minID)
]
if maxID is not None:
filter.append("({}<={})".format(ldap_attr, maxID))
id_list = self.search(filter, [ldap_attr])
if id_list == []:
id = minID
else:
if object_type == 'user':
id = max([i.uidNumber.value for i in id_list]) + 1
elif object_type == 'group':
id = max([i.gidNumber.value for i in id_list]) + 1
else:
raise ldap_tools.exceptions.InvalidResult('Unknown object')
return id | Get the highest used ID. |
def _replace_numeric_markers(operation, string_parameters):
"""
Replaces qname, format, and numeric markers in the given operation, from
the string_parameters list.
Raises ProgrammingError on wrong number of parameters or bindings
when using qmark. There is no error checking on numeric parameters.
"""
def replace_markers(marker, op, parameters):
param_count = len(parameters)
marker_index = 0
start_offset = 0
while True:
found_offset = op.find(marker, start_offset)
if not found_offset > -1:
break
if marker_index < param_count:
op = op[:found_offset]+op[found_offset:].replace(marker, parameters[marker_index], 1)
start_offset = found_offset + len(parameters[marker_index])
marker_index += 1
else:
raise ProgrammingError("Incorrect number of bindings "
"supplied. The current statement uses "
"%d or more, and there are %d "
"supplied." % (marker_index + 1,
param_count))
if marker_index != 0 and marker_index != param_count:
raise ProgrammingError("Incorrect number of bindings "
"supplied. The current statement uses "
"%d or more, and there are %d supplied." %
(marker_index + 1, param_count))
return op
# replace qmark parameters and format parameters
operation = replace_markers('?', operation, string_parameters)
operation = replace_markers(r'%s', operation, string_parameters)
# replace numbered parameters
# Go through them backwards so smaller numbers don't replace
# parts of larger ones
for index in range(len(string_parameters), 0, -1):
operation = operation.replace(':' + str(index),
string_parameters[index - 1])
return operation | Replaces qname, format, and numeric markers in the given operation, from
the string_parameters list.
Raises ProgrammingError on wrong number of parameters or bindings
when using qmark. There is no error checking on numeric parameters. |
def infer_year(date):
"""Given a datetime-like object or string infer the year.
Parameters
----------
date : datetime-like object or str
Input date
Returns
-------
int
Examples
--------
>>> infer_year('2000')
2000
>>> infer_year('2000-01')
2000
>>> infer_year('2000-01-31')
2000
>>> infer_year(datetime.datetime(2000, 1, 1))
2000
>>> infer_year(np.datetime64('2000-01-01'))
2000
>>> infer_year(DatetimeNoLeap(2000, 1, 1))
2000
>>>
"""
if isinstance(date, str):
# Look for a string that begins with four numbers; the first four
# numbers found are the year.
pattern = r'(?P<year>\d{4})'
result = re.match(pattern, date)
if result:
return int(result.groupdict()['year'])
else:
raise ValueError('Invalid date string provided: {}'.format(date))
elif isinstance(date, np.datetime64):
return date.item().year
else:
return date.year | Given a datetime-like object or string infer the year.
Parameters
----------
date : datetime-like object or str
Input date
Returns
-------
int
Examples
--------
>>> infer_year('2000')
2000
>>> infer_year('2000-01')
2000
>>> infer_year('2000-01-31')
2000
>>> infer_year(datetime.datetime(2000, 1, 1))
2000
>>> infer_year(np.datetime64('2000-01-01'))
2000
>>> infer_year(DatetimeNoLeap(2000, 1, 1))
2000
>>> |
def combine(self, expert_out, multiply_by_gates=True):
"""Sum together the expert output, weighted by the gates.
The slice corresponding to a particular batch element `b` is computed
as the sum over all experts `i` of the expert output, weighted by the
corresponding gate values. If `multiply_by_gates` is set to False, the
gate values are ignored.
Args:
expert_out: a list of `num_experts` `Tensor`s, each with shape
`[expert_batch_size_i, <extra_output_dims>]`.
multiply_by_gates: a boolean
Returns:
a `Tensor` with shape `[batch_size, <extra_output_dims>]`.
"""
# see comments on convert_gradient_to_tensor
stitched = common_layers.convert_gradient_to_tensor(
tf.concat(expert_out, 0))
if multiply_by_gates:
stitched *= tf.expand_dims(self._nonzero_gates, 1)
combined = tf.unsorted_segment_sum(stitched, self._batch_index,
tf.shape(self._gates)[0])
return combined | Sum together the expert output, weighted by the gates.
The slice corresponding to a particular batch element `b` is computed
as the sum over all experts `i` of the expert output, weighted by the
corresponding gate values. If `multiply_by_gates` is set to False, the
gate values are ignored.
Args:
expert_out: a list of `num_experts` `Tensor`s, each with shape
`[expert_batch_size_i, <extra_output_dims>]`.
multiply_by_gates: a boolean
Returns:
a `Tensor` with shape `[batch_size, <extra_output_dims>]`. |
def cancel_broadcast(self, broadcast_guid):
'''
Cancel a broadcast specified by guid
'''
subpath = 'broadcasts/%s/update' % broadcast_guid
broadcast = {'status': 'CANCELED'}
bcast_dict = self._call(subpath, method='POST', data=broadcast,
content_type='application/json')
return bcast_dict | Cancel a broadcast specified by guid |
def get_image(roi_rec, short, max_size, mean, std):
"""
read, resize, transform image, return im_tensor, im_info, gt_boxes
roi_rec should have keys: ["image", "boxes", "gt_classes", "flipped"]
0 --- x (width, second dim of im)
|
y (height, first dim of im)
"""
im = imdecode(roi_rec['image'])
if roi_rec["flipped"]:
im = im[:, ::-1, :]
im, im_scale = resize(im, short, max_size)
height, width = im.shape[:2]
im_info = np.array([height, width, im_scale], dtype=np.float32)
im_tensor = transform(im, mean, std)
# gt boxes: (x1, y1, x2, y2, cls)
if roi_rec['gt_classes'].size > 0:
gt_inds = np.where(roi_rec['gt_classes'] != 0)[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roi_rec['boxes'][gt_inds, :]
gt_boxes[:, 4] = roi_rec['gt_classes'][gt_inds]
# scale gt_boxes
gt_boxes[:, 0:4] *= im_scale
else:
gt_boxes = np.empty((0, 5), dtype=np.float32)
return im_tensor, im_info, gt_boxes | read, resize, transform image, return im_tensor, im_info, gt_boxes
roi_rec should have keys: ["image", "boxes", "gt_classes", "flipped"]
0 --- x (width, second dim of im)
|
y (height, first dim of im) |
def get_inters(r, L, R_cut):
'''
Return points within a given cut-off of each other,
in a periodic system.
Uses a cell-list.
Parameters
----------
r: array, shape (n, d) where d is one of (2, 3).
A set of n point coordinates.
Coordinates are assumed to lie in [-L / 2, L / 2].
L: float.
Bounds of the system.
R_cut: float.
The maximum distance within which to consider points to lie
near each other.
Returns
-------
inters: array, shape (n, n)
Indices of the nearby points.
For each particle indexed by the first axis,
the second axis lists each index of a nearby point.
intersi: array, shape (n,)
Total number of nearby points.
This array should be used to index `inters`, as for point `i`,
elements in `inters[i]` beyond `intersi[i]` have no well-defined value.
'''
if r.shape[1] == 2:
_cell_list.cell_list_2d.make_inters(r.T, L, R_cut)
elif r.shape[1] == 3:
_cell_list.cell_list_3d.make_inters(r.T, L, R_cut)
else:
print('Warning: cell list not implemented in this dimension, falling'
'back to direct computation')
return get_inters_direct(r, L, R_cut)
return _parse_inters() | Return points within a given cut-off of each other,
in a periodic system.
Uses a cell-list.
Parameters
----------
r: array, shape (n, d) where d is one of (2, 3).
A set of n point coordinates.
Coordinates are assumed to lie in [-L / 2, L / 2].
L: float.
Bounds of the system.
R_cut: float.
The maximum distance within which to consider points to lie
near each other.
Returns
-------
inters: array, shape (n, n)
Indices of the nearby points.
For each particle indexed by the first axis,
the second axis lists each index of a nearby point.
intersi: array, shape (n,)
Total number of nearby points.
This array should be used to index `inters`, as for point `i`,
elements in `inters[i]` beyond `intersi[i]` have no well-defined value. |
def send_message(self, number, content):
"""
Send message
:param str number: phone number with cc (country code)
:param str content: body text of the message
"""
outgoing_message = TextMessageProtocolEntity(content.encode("utf-8") if sys.version_info >= (3, 0)
else content, to=self.normalize_jid(number))
self.toLower(outgoing_message)
return outgoing_message | Send message
:param str number: phone number with cc (country code)
:param str content: body text of the message |
def clearLocatorCache(self, login, tableName):
"""
Parameters:
- login
- tableName
"""
self.send_clearLocatorCache(login, tableName)
self.recv_clearLocatorCache() | Parameters:
- login
- tableName |
def _check_token(self):
""" Simple Mercedes me API.
"""
need_token = (self._token_info is None or
self.auth_handler.is_token_expired(self._token_info))
if need_token:
new_token = \
self.auth_handler.refresh_access_token(
self._token_info['refresh_token'])
# skip when refresh failed
if new_token is None:
return
self._token_info = new_token
self._auth_header = {"content-type": "application/json",
"Authorization": "Bearer {}".format(
self._token_info.get('access_token'))} | Simple Mercedes me API. |
def setup_exchange(self, exchange_name):
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
:param str|unicode exchange_name: The name of the exchange to declare
"""
_logger.debug('Declaring exchange %s', exchange_name)
self._channel.exchange_declare(self.on_exchange_declareok,
exchange_name,
self.EXCHANGE_TYPE,
durable=True,
passive=True) | Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
:param str|unicode exchange_name: The name of the exchange to declare |
def chart_part(self):
"""
The |ChartPart| object containing the chart in this graphic frame.
"""
rId = self._element.chart_rId
chart_part = self.part.related_parts[rId]
return chart_part | The |ChartPart| object containing the chart in this graphic frame. |
def _make_writeable(filename):
"""
Make sure that the file is writeable. Useful if our source is
read-only.
"""
import stat
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions) | Make sure that the file is writeable. Useful if our source is
read-only. |
def _change_color(self, event):
"""Respond to motion of the hsv cursor."""
h = self.bar.get()
self.square.set_hue(h)
(r, g, b), (h, s, v), sel_color = self.square.get()
self.red.set(r)
self.green.set(g)
self.blue.set(b)
self.hue.set(h)
self.saturation.set(s)
self.value.set(v)
self.hexa.delete(0, "end")
self.hexa.insert(0, sel_color.upper())
if self.alpha_channel:
self.alphabar.set_color((r, g, b))
self.hexa.insert('end',
("%2.2x" % self.alpha.get()).upper())
self._update_preview() | Respond to motion of the hsv cursor. |
def p_expression_logical(self, p):
"""
expression : expression logical expression
"""
p[0] = Expression(left=p[1], operator=p[2], right=p[3]) | expression : expression logical expression |
def coarsen_line(line, level=2, exponential=False, draw=True):
"""
Coarsens the specified line (see spinmob.coarsen_data() for more information).
Parameters
----------
line
Matplotlib line instance.
level=2
How strongly to coarsen.
exponential=False
If True, use the exponential method (great for log-x plots).
draw=True
Redraw when complete.
"""
# get the actual data values
xdata = line.get_xdata()
ydata = line.get_ydata()
xdata,ydata = _fun.coarsen_data(xdata, ydata, level=level, exponential=exponential)
# don't do anything if we don't have any data left
if len(ydata) == 0: print("There's nothing left in "+str(line)+"!")
# otherwise set the data with the new arrays
else: line.set_data(xdata, ydata)
# we refresh in real time for giggles
if draw: _pylab.draw() | Coarsens the specified line (see spinmob.coarsen_data() for more information).
Parameters
----------
line
Matplotlib line instance.
level=2
How strongly to coarsen.
exponential=False
If True, use the exponential method (great for log-x plots).
draw=True
Redraw when complete. |
def get_cipher(self):
"""
Return a new Cipher object for each time we want to encrypt/decrypt. This is because
pgcrypto expects a zeroed block for IV (initial value), but the IV on the cipher
object is cumulatively updated each time encrypt/decrypt is called.
"""
return self.cipher_class.new(self.cipher_key, self.cipher_class.MODE_CBC, b'\0' * self.cipher_class.block_size) | Return a new Cipher object for each time we want to encrypt/decrypt. This is because
pgcrypto expects a zeroed block for IV (initial value), but the IV on the cipher
object is cumulatively updated each time encrypt/decrypt is called. |
def read_relative_file(filename, relative_to=None):
"""Returns contents of the given file, which path is supposed relative
to this package."""
if relative_to is None:
relative_to = os.path.dirname(__file__)
with open(os.path.join(os.path.dirname(relative_to), filename)) as f:
return f.read() | Returns contents of the given file, which path is supposed relative
to this package. |
def findNestedClassLike(self, lst):
'''
Recursive helper function for finding nested classes and structs. If this node
is a class or struct, it is appended to ``lst``. Each node also calls each of
its child ``findNestedClassLike`` with the same list.
:Parameters:
``lst`` (list)
The list each class or struct node is to be appended to.
'''
if self.kind == "class" or self.kind == "struct":
lst.append(self)
for c in self.children:
c.findNestedClassLike(lst) | Recursive helper function for finding nested classes and structs. If this node
is a class or struct, it is appended to ``lst``. Each node also calls each of
its child ``findNestedClassLike`` with the same list.
:Parameters:
``lst`` (list)
The list each class or struct node is to be appended to. |
def list_actions(name, location='\\'):
r'''
List all actions that pertain to a task in the specified location.
:param str name: The name of the task for which list actions.
:param str location: A string value representing the location of the task
from which to list actions. Default is '\\' which is the root for the
task scheduler (C:\Windows\System32\tasks).
:return: Returns a list of actions.
:rtype: list
CLI Example:
.. code-block:: bash
salt 'minion-id' task.list_actions <task_name>
'''
# Create the task service object
with salt.utils.winapi.Com():
task_service = win32com.client.Dispatch("Schedule.Service")
task_service.Connect()
# Get the folder to list folders from
task_folder = task_service.GetFolder(location)
task_definition = task_folder.GetTask(name).Definition
actions = task_definition.Actions
ret = []
for action in actions:
ret.append(action.Id)
return ret | r'''
List all actions that pertain to a task in the specified location.
:param str name: The name of the task for which list actions.
:param str location: A string value representing the location of the task
from which to list actions. Default is '\\' which is the root for the
task scheduler (C:\Windows\System32\tasks).
:return: Returns a list of actions.
:rtype: list
CLI Example:
.. code-block:: bash
salt 'minion-id' task.list_actions <task_name> |
def frames(self, flush=True):
""" Returns the latest color image from the stream
Raises:
Exception if opencv sensor gives ret_val of 0
"""
self.flush()
ret_val, frame = self._sensor.read()
if not ret_val:
raise Exception("Unable to retrieve frame from OpenCVCameraSensor for id {0}".format(self._device_id))
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if self._upside_down:
frame = np.flipud(frame).astype(np.uint8)
frame = np.fliplr(frame).astype(np.uint8)
return ColorImage(frame) | Returns the latest color image from the stream
Raises:
Exception if opencv sensor gives ret_val of 0 |
def get_dependent_items(self, item) -> typing.List:
"""Return the list of data items containing data that directly depends on data in this item."""
with self.__dependency_tree_lock:
return copy.copy(self.__dependency_tree_source_to_target_map.get(weakref.ref(item), list())) | Return the list of data items containing data that directly depends on data in this item. |
def pad(num, n=2, sign=False):
'''returns n digit string representation of the num'''
s = unicode(abs(num))
if len(s) < n:
s = '0' * (n - len(s)) + s
if not sign:
return s
if num >= 0:
return '+' + s
else:
return '-' + s | returns n digit string representation of the num |
def _pct_escape_handler(err):
'''
Encoding error handler that does percent-escaping of Unicode, to be used
with codecs.register_error
TODO: replace use of this with urllib.parse.quote as appropriate
'''
chunk = err.object[err.start:err.end]
replacements = _pct_encoded_replacements(chunk)
return ("".join(replacements), err.end) | Encoding error handler that does percent-escaping of Unicode, to be used
with codecs.register_error
TODO: replace use of this with urllib.parse.quote as appropriate |
def clean(self, timeout=60):
"""Deletes the contents of the index.
This method blocks until the index is empty, because it needs to restore
values at the end of the operation.
:param timeout: The time-out period for the operation, in seconds (the
default is 60).
:type timeout: ``integer``
:return: The :class:`Index`.
"""
self.refresh()
tds = self['maxTotalDataSizeMB']
ftp = self['frozenTimePeriodInSecs']
was_disabled_initially = self.disabled
try:
if (not was_disabled_initially and \
self.service.splunk_version < (5,)):
# Need to disable the index first on Splunk 4.x,
# but it doesn't work to disable it on 5.0.
self.disable()
self.update(maxTotalDataSizeMB=1, frozenTimePeriodInSecs=1)
self.roll_hot_buckets()
# Wait until event count goes to 0.
start = datetime.now()
diff = timedelta(seconds=timeout)
while self.content.totalEventCount != '0' and datetime.now() < start+diff:
sleep(1)
self.refresh()
if self.content.totalEventCount != '0':
raise OperationError("Cleaning index %s took longer than %s seconds; timing out." % (self.name, timeout))
finally:
# Restore original values
self.update(maxTotalDataSizeMB=tds, frozenTimePeriodInSecs=ftp)
if (not was_disabled_initially and \
self.service.splunk_version < (5,)):
# Re-enable the index if it was originally enabled and we messed with it.
self.enable()
return self | Deletes the contents of the index.
This method blocks until the index is empty, because it needs to restore
values at the end of the operation.
:param timeout: The time-out period for the operation, in seconds (the
default is 60).
:type timeout: ``integer``
:return: The :class:`Index`. |
def set_href_prefix(self, prefix):
"""
Set the prefix of any hrefs associated with this thing.
prefix -- the prefix
"""
self.href_prefix = prefix
for property_ in self.properties.values():
property_.set_href_prefix(prefix)
for action_name in self.actions.keys():
for action in self.actions[action_name]:
action.set_href_prefix(prefix) | Set the prefix of any hrefs associated with this thing.
prefix -- the prefix |
def get_status_code_and_schema_rst(self, responses):
'''
Function for prepare information about responses with example, prepare only
responses with status code from `101` to `299`
:param responses: -- dictionary that contains responses, with status code as key
:type responses: dict
:return:
'''
for status_code, response_schema in responses.items():
status_code = int(status_code)
schema = response_schema.get('schema', None)
status = HTTP_STATUS_CODES.get(status_code, None)
if status is None or not (100 < status_code < 300):
continue
self.write('**Example Response**', 1)
self.write('')
self.write('.. code-block:: http', 1)
self.write('')
self.write('HTTP/1.1 {} {}'.format(status_code, status), 2)
self.write('Vary: {}'.format(response_schema['description']), 2)
self.write('Content-Type: application/json', 2)
self.write('')
if schema:
self.schema_handler(schema)
else:
self.write('{}', self.indent_depth) | Function for prepare information about responses with example, prepare only
responses with status code from `101` to `299`
:param responses: -- dictionary that contains responses, with status code as key
:type responses: dict
:return: |
def plot_groups_unplaced(self, fout_dir=".", **kws_usr):
"""Plot each GO group."""
# kws: go2color max_gos upper_trigger max_upper
plotobj = PltGroupedGos(self)
return plotobj.plot_groups_unplaced(fout_dir, **kws_usr) | Plot each GO group. |
def sampling_volume_value(self):
"""Returns the device samping volume value in m."""
svi = self.pdx.SamplingVolume
tli = self.pdx.TransmitLength
return self._sampling_volume_value(svi, tli) | Returns the device samping volume value in m. |
def set_color_scheme(self, foreground_color, background_color):
"""Set color scheme of the console (foreground and background)."""
self.ansi_handler.set_color_scheme(foreground_color, background_color)
background_color = QColor(background_color)
foreground_color = QColor(foreground_color)
self.set_palette(background=background_color,
foreground=foreground_color)
self.set_pythonshell_font() | Set color scheme of the console (foreground and background). |
def get_privacy_options(user):
"""Get a user's privacy options to pass as an initial value to a PrivacyOptionsForm."""
privacy_options = {}
for ptype in user.permissions:
for field in user.permissions[ptype]:
if ptype == "self":
privacy_options["{}-{}".format(field, ptype)] = user.permissions[ptype][field]
else:
privacy_options[field] = user.permissions[ptype][field]
return privacy_options | Get a user's privacy options to pass as an initial value to a PrivacyOptionsForm. |
def cumulative_detections(dates=None, template_names=None, detections=None,
plot_grouped=False, group_name=None, rate=False,
plot_legend=True, ax=None, **kwargs):
"""
Plot cumulative detections or detection rate in time.
Simple plotting function to take a list of either datetime objects or
:class:`eqcorrscan.core.match_filter.Detection` objects and plot
a cumulative detections list. Can take dates as a list of lists and will
plot each list separately, e.g. if you have dates from more than one
template it will overlay them in different colours.
:type dates: list
:param dates: Must be a list of lists of datetime.datetime objects
:type template_names: list
:param template_names: List of the template names in order of the dates
:type detections: list
:param detections: List of :class:`eqcorrscan.core.match_filter.Detection`
:type plot_grouped: bool
:param plot_grouped:
Plot detections for each template individually, or group them all
together - set to False (plot template detections individually) by
default.
:type rate: bool
:param rate:
Whether or not to plot the rate of detection per day. Only works for
plot_grouped=True
:type plot_legend: bool
:param plot_legend:
Specify whether to plot legend of template names. Defaults to True.
:returns: :class:`matplotlib.figure.Figure`
.. note::
Can either take lists of
:class:`eqcorrscan.core.match_filter.Detection` objects directly, or
two lists of dates and template names - either/or, not both.
.. rubric:: Example
>>> import datetime as dt
>>> import numpy as np
>>> from eqcorrscan.utils.plotting import cumulative_detections
>>> dates = []
>>> for i in range(3):
... dates.append([dt.datetime(2012, 3, 26) + dt.timedelta(n)
... for n in np.random.randn(100)])
>>> cumulative_detections(dates, ['a', 'b', 'c'],
... show=True) # doctest: +SKIP
.. plot::
import datetime as dt
import numpy as np
from eqcorrscan.utils.plotting import cumulative_detections
dates = []
for i in range(3):
dates.append([dt.datetime(2012, 3, 26) + dt.timedelta(n)
for n in np.random.randn(100)])
cumulative_detections(dates, ['a', 'b', 'c'], show=True)
.. rubric:: Example 2: Rate plotting
>>> import datetime as dt
>>> import numpy as np
>>> from eqcorrscan.utils.plotting import cumulative_detections
>>> dates = []
>>> for i in range(3):
... dates.append([dt.datetime(2012, 3, 26) + dt.timedelta(n)
... for n in np.random.randn(100)])
>>> cumulative_detections(dates, ['a', 'b', 'c'], plot_grouped=True,
... rate=True, show=True) # doctest: +SKIP
.. plot::
import datetime as dt
import numpy as np
from eqcorrscan.utils.plotting import cumulative_detections
dates = []
for i in range(3):
dates.append([dt.datetime(2012, 3, 26) + dt.timedelta(n)
for n in np.random.randn(100)])
cumulative_detections(dates, ['a', 'b', 'c'], plot_grouped=True,
rate=True, show=True)
"""
import matplotlib.pyplot as plt
from eqcorrscan.core.match_filter import Detection
# Set up a default series of parameters for lines
colors = cycle(['red', 'green', 'blue', 'cyan', 'magenta', 'yellow',
'black', 'firebrick', 'purple', 'darkgoldenrod', 'gray'])
linestyles = cycle(['-', '-.', '--', ':'])
# Check that dates is a list of lists
if not detections:
if type(dates[0]) != list:
dates = [dates]
else:
dates = []
template_names = []
for detection in detections:
if not type(detection) == Detection:
raise IOError(
'detection not of type: eqcorrscan.core.match_filter'
'.Detection')
dates.append(detection.detect_time.datetime)
template_names.append(detection.template_name)
_dates = []
_template_names = []
for template_name in list(set(template_names)):
_template_names.append(template_name)
_dates.append([date for i, date in enumerate(dates)
if template_names[i] == template_name])
dates = _dates
template_names = _template_names
if plot_grouped:
_dates = []
for template_dates in dates:
_dates += template_dates
dates = [_dates]
if group_name:
template_names = group_name
else:
template_names = ['All templates']
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
else:
fig = ax.figure()
# Make sure not to pad at edges
ax.margins(0, 0)
min_date = min([min(_d) for _d in dates])
max_date = max([max(_d) for _d in dates])
for k, template_dates in enumerate(dates):
template_dates.sort()
plot_dates = deepcopy(template_dates)
plot_dates.insert(0, min_date)
plot_dates.insert(-1, template_dates[-1])
color = next(colors)
if color == 'red':
linestyle = next(linestyles)
counts = np.arange(-1, len(template_dates) + 1)
if rate:
if not plot_grouped:
msg = 'Plotting rate only implemented for plot_grouped=True'
raise NotImplementedError(msg)
if 31 < (max_date - min_date).days < 365:
bins = (max_date - min_date).days
ax.set_ylabel('Detections per day')
elif (max_date - min_date).days <= 31:
bins = (max_date - min_date).days * 4
ax.set_ylabel('Detections per 6 hour bin')
else:
bins = (max_date - min_date).days // 7
ax.set_ylabel('Detections per week')
if len(plot_dates) <= 10:
bins = 1
ax.hist(mdates.date2num(plot_dates), bins=bins,
label='Rate of detections', color='darkgrey',
alpha=0.5)
else:
ax.plot(plot_dates, counts, linestyle,
color=color, label=template_names[k],
linewidth=2.0, drawstyle='steps')
ax.set_ylabel('Cumulative detections')
ax.set_xlabel('Date')
# Set formatters for x-labels
mins = mdates.MinuteLocator()
max_date = dates[0][0]
min_date = max_date
for date_list in dates:
if max(date_list) > max_date:
max_date = max(date_list)
if min(date_list) < min_date:
min_date = min(date_list)
timedif = max_date - min_date
if 10800 <= timedif.total_seconds() <= 25200:
hours = mdates.MinuteLocator(byminute=[0, 30])
mins = mdates.MinuteLocator(byminute=np.arange(0, 60, 10))
elif 7200 <= timedif.total_seconds() < 10800:
hours = mdates.MinuteLocator(byminute=[0, 15, 30, 45])
mins = mdates.MinuteLocator(byminute=np.arange(0, 60, 5))
elif timedif.total_seconds() <= 1200:
hours = mdates.MinuteLocator(byminute=np.arange(0, 60, 2))
mins = mdates.MinuteLocator(byminute=np.arange(0, 60, 0.5))
elif 25200 < timedif.total_seconds() <= 86400:
hours = mdates.HourLocator(byhour=np.arange(0, 24, 3))
mins = mdates.HourLocator(byhour=np.arange(0, 24, 1))
elif 86400 < timedif.total_seconds() <= 172800:
hours = mdates.HourLocator(byhour=np.arange(0, 24, 6))
mins = mdates.HourLocator(byhour=np.arange(0, 24, 1))
elif timedif.total_seconds() > 172800:
hours = mdates.AutoDateLocator()
mins = mdates.HourLocator(byhour=np.arange(0, 24, 3))
else:
hours = mdates.MinuteLocator(byminute=np.arange(0, 60, 5))
# Minor locator overruns maxticks for ~year-long datasets
if timedif.total_seconds() < 172800:
ax.xaxis.set_minor_locator(mins)
hrFMT = mdates.DateFormatter('%Y/%m/%d %H:%M:%S')
else:
hrFMT = mdates.DateFormatter('%Y/%m/%d')
ax.xaxis.set_major_locator(hours)
ax.xaxis.set_major_formatter(hrFMT)
fig.autofmt_xdate()
locs, labels = plt.xticks()
plt.setp(labels, rotation=15)
if not rate:
ax.set_ylim([0, max([len(_d) for _d in dates])])
if plot_legend:
if ax.legend() is not None:
leg = ax.legend(loc=2, prop={'size': 8}, ncol=2)
leg.get_frame().set_alpha(0.5)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | Plot cumulative detections or detection rate in time.
Simple plotting function to take a list of either datetime objects or
:class:`eqcorrscan.core.match_filter.Detection` objects and plot
a cumulative detections list. Can take dates as a list of lists and will
plot each list separately, e.g. if you have dates from more than one
template it will overlay them in different colours.
:type dates: list
:param dates: Must be a list of lists of datetime.datetime objects
:type template_names: list
:param template_names: List of the template names in order of the dates
:type detections: list
:param detections: List of :class:`eqcorrscan.core.match_filter.Detection`
:type plot_grouped: bool
:param plot_grouped:
Plot detections for each template individually, or group them all
together - set to False (plot template detections individually) by
default.
:type rate: bool
:param rate:
Whether or not to plot the rate of detection per day. Only works for
plot_grouped=True
:type plot_legend: bool
:param plot_legend:
Specify whether to plot legend of template names. Defaults to True.
:returns: :class:`matplotlib.figure.Figure`
.. note::
Can either take lists of
:class:`eqcorrscan.core.match_filter.Detection` objects directly, or
two lists of dates and template names - either/or, not both.
.. rubric:: Example
>>> import datetime as dt
>>> import numpy as np
>>> from eqcorrscan.utils.plotting import cumulative_detections
>>> dates = []
>>> for i in range(3):
... dates.append([dt.datetime(2012, 3, 26) + dt.timedelta(n)
... for n in np.random.randn(100)])
>>> cumulative_detections(dates, ['a', 'b', 'c'],
... show=True) # doctest: +SKIP
.. plot::
import datetime as dt
import numpy as np
from eqcorrscan.utils.plotting import cumulative_detections
dates = []
for i in range(3):
dates.append([dt.datetime(2012, 3, 26) + dt.timedelta(n)
for n in np.random.randn(100)])
cumulative_detections(dates, ['a', 'b', 'c'], show=True)
.. rubric:: Example 2: Rate plotting
>>> import datetime as dt
>>> import numpy as np
>>> from eqcorrscan.utils.plotting import cumulative_detections
>>> dates = []
>>> for i in range(3):
... dates.append([dt.datetime(2012, 3, 26) + dt.timedelta(n)
... for n in np.random.randn(100)])
>>> cumulative_detections(dates, ['a', 'b', 'c'], plot_grouped=True,
... rate=True, show=True) # doctest: +SKIP
.. plot::
import datetime as dt
import numpy as np
from eqcorrscan.utils.plotting import cumulative_detections
dates = []
for i in range(3):
dates.append([dt.datetime(2012, 3, 26) + dt.timedelta(n)
for n in np.random.randn(100)])
cumulative_detections(dates, ['a', 'b', 'c'], plot_grouped=True,
rate=True, show=True) |
def _set_factory_context(factory_class, bundle_context):
# type: (type, Optional[BundleContext]) -> Optional[FactoryContext]
"""
Transforms the context data dictionary into its FactoryContext object form.
:param factory_class: A manipulated class
:param bundle_context: The class bundle context
:return: The factory context, None on error
"""
try:
# Try to get the factory context (built using decorators)
context = getattr(factory_class, constants.IPOPO_FACTORY_CONTEXT)
except AttributeError:
# The class has not been manipulated, or too badly
return None
if not context.completed:
# Partial context (class not manipulated)
return None
# Associate the factory to the bundle context
context.set_bundle_context(bundle_context)
return context | Transforms the context data dictionary into its FactoryContext object form.
:param factory_class: A manipulated class
:param bundle_context: The class bundle context
:return: The factory context, None on error |
def remove_task_db(self, fid, force=False):
'''将任务从数据库中删除'''
self.remove_slice_db(fid)
sql = 'DELETE FROM upload WHERE fid=?'
self.cursor.execute(sql, [fid, ])
self.check_commit(force=force) | 将任务从数据库中删除 |
def parse_datetime(value):
"""Attempts to parse `value` into an instance of ``datetime.datetime``. If
`value` is ``None``, this function will return ``None``.
Args:
value: A timestamp. This can be a string or datetime.datetime value.
"""
if not value:
return None
elif isinstance(value, datetime.datetime):
return value
return dateutil.parser.parse(value) | Attempts to parse `value` into an instance of ``datetime.datetime``. If
`value` is ``None``, this function will return ``None``.
Args:
value: A timestamp. This can be a string or datetime.datetime value. |
def total_border_pixels_from_mask_and_edge_pixels(mask, edge_pixels, masked_grid_index_to_pixel):
"""Compute the total number of borders-pixels in a masks."""
border_pixel_total = 0
for i in range(edge_pixels.shape[0]):
if check_if_border_pixel(mask, edge_pixels[i], masked_grid_index_to_pixel):
border_pixel_total += 1
return border_pixel_total | Compute the total number of borders-pixels in a masks. |
def get_view(self):
""" Get the root view to display. Make sure it is
properly initialized.
"""
view = self.view
if not view.is_initialized:
view.initialize()
if not view.proxy_is_active:
view.activate_proxy()
return view.proxy.widget | Get the root view to display. Make sure it is
properly initialized. |
def reset(self):
"""Reset the input buffer and associated state."""
self.indent_spaces = 0
self._buffer[:] = []
self.source = ''
self.code = None
self._is_complete = False
self._full_dedent = False | Reset the input buffer and associated state. |
def geometry(AA):
'''Generates the geometry of the requested amino acid.
The amino acid needs to be specified by its single-letter
code. If an invalid code is specified, the function
returns the geometry of Glycine.'''
if(AA=='G'):
return GlyGeo()
elif(AA=='A'):
return AlaGeo()
elif(AA=='S'):
return SerGeo()
elif(AA=='C'):
return CysGeo()
elif(AA=='V'):
return ValGeo()
elif(AA=='I'):
return IleGeo()
elif(AA=='L'):
return LeuGeo()
elif(AA=='T'):
return ThrGeo()
elif(AA=='R'):
return ArgGeo()
elif(AA=='K'):
return LysGeo()
elif(AA=='D'):
return AspGeo()
elif(AA=='E'):
return GluGeo()
elif(AA=='N'):
return AsnGeo()
elif(AA=='Q'):
return GlnGeo()
elif(AA=='M'):
return MetGeo()
elif(AA=='H'):
return HisGeo()
elif(AA=='P'):
return ProGeo()
elif(AA=='F'):
return PheGeo()
elif(AA=='Y'):
return TyrGeo()
elif(AA=='W'):
return TrpGeo()
else:
return GlyGeo() | Generates the geometry of the requested amino acid.
The amino acid needs to be specified by its single-letter
code. If an invalid code is specified, the function
returns the geometry of Glycine. |
def post_events(self, events):
"""
Posts a single event to the Keen IO API. The write key must be set first.
:param events: an Event to upload
"""
url = "{0}/{1}/projects/{2}/events".format(self.base_url, self.api_version,
self.project_id)
headers = utilities.headers(self.write_key)
payload = json.dumps(events)
response = self.fulfill(HTTPMethods.POST, url, data=payload, headers=headers, timeout=self.post_timeout)
self._error_handling(response)
return self._get_response_json(response) | Posts a single event to the Keen IO API. The write key must be set first.
:param events: an Event to upload |
def parse_file(src):
"""
find file in config and output to dest dir
"""
#clear the stack between parses
if config.dest_dir == None:
dest = src.dir
else:
dest = config.dest_dir
output = get_output(src)
output_file = dest + '/' + src.basename + '.min.js'
f = open(output_file,'w')
f.write(jsmin.jsmin(output))
f.close()
print "Wrote combined and minified file to: %s" % (output_file) | find file in config and output to dest dir |
def get_logger(name, verbosity, stream):
"""
Returns simple console logger.
"""
logger = logging.getLogger(name)
logger.setLevel(
{0: DEFAULT_LOGGING_LEVEL, 1: logging.INFO, 2: logging.DEBUG}.get(min(2, verbosity), DEFAULT_LOGGING_LEVEL)
)
logger.handlers = []
handler = logging.StreamHandler(stream)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter(LOG_FORMAT))
logger.addHandler(handler)
return logger | Returns simple console logger. |
def select_action(self, pos1, pos2, ctrl, shift):
"""Return a `sc_pb.Action` with the selection filled."""
assert pos1.surf.surf_type == pos2.surf.surf_type
assert pos1.surf.world_to_obs == pos2.surf.world_to_obs
action = sc_pb.Action()
action_spatial = pos1.action_spatial(action)
if pos1.world_pos == pos2.world_pos: # select a point
select = action_spatial.unit_selection_point
pos1.obs_pos.assign_to(select.selection_screen_coord)
mod = sc_spatial.ActionSpatialUnitSelectionPoint
if ctrl:
select.type = mod.AddAllType if shift else mod.AllType
else:
select.type = mod.Toggle if shift else mod.Select
else:
select = action_spatial.unit_selection_rect
rect = select.selection_screen_coord.add()
pos1.obs_pos.assign_to(rect.p0)
pos2.obs_pos.assign_to(rect.p1)
select.selection_add = shift
# Clear the queued action if something will be selected. An alternative
# implementation may check whether the selection changed next frame.
units = self._units_in_area(point.Rect(pos1.world_pos, pos2.world_pos))
if units:
self.clear_queued_action()
return action | Return a `sc_pb.Action` with the selection filled. |
def extract_rzip (archive, compression, cmd, verbosity, interactive, outdir):
"""Extract an RZIP archive."""
cmdlist = [cmd, '-d', '-k']
if verbosity > 1:
cmdlist.append('-v')
outfile = util.get_single_outfile(outdir, archive)
cmdlist.extend(["-o", outfile, archive])
return cmdlist | Extract an RZIP archive. |
def combine_assignments(self, assignments):
"""Rewrite the current graph to combine "Assign" operations.
Combine similar Assign operations into grouped Assign operations.
This is useful when using the rewrite_stack_variables() optimization,
since variables can only be stacked if they are present in the same set
of Assign operations.
This function takes a list of Assign operations and returns a possibly
shorter list of Assign operations. The input Assignment operations
are removed from the graph and become invalid.
Args:
assignments: a list of Assign objects
Returns:
a list of Assign objects
"""
group_by_fn = collections.defaultdict(list)
for a in assignments:
if not isinstance(a, Assign):
raise ValueError("ops should be instances of mtf.Assign")
group_by_fn[a.assign_fn].append(a)
assignments_set = set(assignments)
self._operations = [
op for op in self._operations if op not in assignments_set]
ret = []
for fn, ops in six.iteritems(group_by_fn):
variables = []
values = []
for a in ops:
variables.extend(a.variables)
values.extend(a.inputs)
ret.append(Assign(variables, values, fn))
return ret | Rewrite the current graph to combine "Assign" operations.
Combine similar Assign operations into grouped Assign operations.
This is useful when using the rewrite_stack_variables() optimization,
since variables can only be stacked if they are present in the same set
of Assign operations.
This function takes a list of Assign operations and returns a possibly
shorter list of Assign operations. The input Assignment operations
are removed from the graph and become invalid.
Args:
assignments: a list of Assign objects
Returns:
a list of Assign objects |
def which(name):
""" Returns the full path to executable in path matching provided name.
`name`
String value.
Returns string or ``None``.
"""
# we were given a filename, return it if it's executable
if os.path.dirname(name) != '':
if not os.path.isdir(name) and os.access(name, os.X_OK):
return name
else:
return None
# fetch PATH env var and split
path_val = os.environ.get('PATH', None) or os.defpath
# return the first match in the paths
for path in path_val.split(os.pathsep):
filename = os.path.join(path, name)
if os.access(filename, os.X_OK):
return filename
return None | Returns the full path to executable in path matching provided name.
`name`
String value.
Returns string or ``None``. |
def collectInterest(self):
""" Collects user's daily interest, returns result
Returns
bool - True if successful, False otherwise
"""
if self.collectedInterest:
return False
pg = self.usr.getPage("http://www.neopets.com/bank.phtml")
form = pg.form(action="process_bank.phtml")
form['type'] = "interest"
pg = form.submit()
# Success redirects to bank page
if "It's great to see you again" in pg.content:
self.__loadDetails(pg)
return True
else:
logging.getLogger("neolib.user").info("Failed to collect daily interest for unknown reason.", {'pg': pg})
return False | Collects user's daily interest, returns result
Returns
bool - True if successful, False otherwise |
def y(self, y):
"""Project reversed y"""
if y is None:
return None
return (self.height * (y - self.box.ymin) / self.box.height) | Project reversed y |
def _copy_circuit_metadata(source_dag, coupling_map):
"""Return a copy of source_dag with metadata but empty.
Generate only a single qreg in the output DAG, matching the size of the
coupling_map."""
target_dag = DAGCircuit()
target_dag.name = source_dag.name
for creg in source_dag.cregs.values():
target_dag.add_creg(creg)
device_qreg = QuantumRegister(len(coupling_map.physical_qubits), 'q')
target_dag.add_qreg(device_qreg)
return target_dag | Return a copy of source_dag with metadata but empty.
Generate only a single qreg in the output DAG, matching the size of the
coupling_map. |
def extract_output(self, output_variables_list):
""" extract output variables
"""
variables_mapping = self.session_context.session_variables_mapping
output = {}
for variable in output_variables_list:
if variable not in variables_mapping:
logger.log_warning(
"variable '{}' can not be found in variables mapping, failed to output!"\
.format(variable)
)
continue
output[variable] = variables_mapping[variable]
utils.print_info(output)
return output | extract output variables |
def _rdistributive(self, expr, op_example):
"""
Recursively flatten the `expr` expression for the `op_example`
AND or OR operation instance exmaple.
"""
if expr.isliteral:
return expr
expr_class = expr.__class__
args = (self._rdistributive(arg, op_example) for arg in expr.args)
args = tuple(arg.simplify() for arg in args)
if len(args) == 1:
return args[0]
expr = expr_class(*args)
dualoperation = op_example.dual
if isinstance(expr, dualoperation):
expr = expr.distributive()
return expr | Recursively flatten the `expr` expression for the `op_example`
AND or OR operation instance exmaple. |
def _do_validate_sources_present(self, target):
"""Checks whether sources is empty, and either raises a TaskError or just returns False.
The specifics of this behavior are defined by whether the user sets --allow-empty to True/False:
--allow-empty=False will result in a TaskError being raised in the event of an empty source
set. If --allow-empty=True, this method will just return false and log a warning.
Shared for all SimpleCodegenTask subclasses to help keep errors consistent and descriptive.
:param target: Target to validate.
:return: True if sources is not empty, False otherwise.
"""
if not self.validate_sources_present:
return True
sources = target.sources_relative_to_buildroot()
if not sources:
message = ('Target {} has no sources.'.format(target.address.spec))
if not self.get_options().allow_empty:
raise TaskError(message)
else:
logging.warn(message)
return False
return True | Checks whether sources is empty, and either raises a TaskError or just returns False.
The specifics of this behavior are defined by whether the user sets --allow-empty to True/False:
--allow-empty=False will result in a TaskError being raised in the event of an empty source
set. If --allow-empty=True, this method will just return false and log a warning.
Shared for all SimpleCodegenTask subclasses to help keep errors consistent and descriptive.
:param target: Target to validate.
:return: True if sources is not empty, False otherwise. |
def atmos_worker(srcs, window, ij, args):
"""A simple atmospheric correction user function."""
src = srcs[0]
rgb = src.read(window=window)
rgb = to_math_type(rgb)
atmos = simple_atmo(rgb, args["atmo"], args["contrast"], args["bias"])
# should be scaled 0 to 1, scale to outtype
return scale_dtype(atmos, args["out_dtype"]) | A simple atmospheric correction user function. |
def extract_links(self, selector='', *args, **kwargs):
"""
Method for performing the link extraction for the crawler. \
The selector passed as the argument is a selector to point to the anchor tags \
that the crawler should pass through. A list of links is obtained, and the links \
are iterated through. The relative paths are converted into absolute paths and \
a ``XpathSelector``/``CssSelector`` object (as is the case) is created with the URL of the next page as the argument \
and this created object is yielded.
The extract_links method basically generates ``XpathSelector``/``CssSelector`` objects for all of \
the links to be crawled through.
:param selector: The selector for the anchor tags to be crawled through
:return: A ``XpathSelector``/``CssSelector`` object for every page to be crawled through
"""
try:
links = self.get_tree_tag(selector=selector)
for link in links:
next_url = urljoin(self.url, link.get('href'))
yield type(self)(next_url)
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except Exception:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector)) | Method for performing the link extraction for the crawler. \
The selector passed as the argument is a selector to point to the anchor tags \
that the crawler should pass through. A list of links is obtained, and the links \
are iterated through. The relative paths are converted into absolute paths and \
a ``XpathSelector``/``CssSelector`` object (as is the case) is created with the URL of the next page as the argument \
and this created object is yielded.
The extract_links method basically generates ``XpathSelector``/``CssSelector`` objects for all of \
the links to be crawled through.
:param selector: The selector for the anchor tags to be crawled through
:return: A ``XpathSelector``/``CssSelector`` object for every page to be crawled through |
def adupdates_simple(x, g, L, stepsize, inner_stepsizes, niter,
random=False):
"""Non-optimized version of ``adupdates``.
This function is intended for debugging. It makes a lot of copies and
performs no error checking.
"""
# Initializations
length = len(g)
ranges = [Li.range for Li in L]
duals = [space.zero() for space in ranges]
# Iteratively find a solution
for _ in range(niter):
# Update x = x - 1/stepsize * sum([ops[i].adjoint(duals[i])
# for i in range(length)])
for i in range(length):
x -= (1.0 / stepsize) * L[i].adjoint(duals[i])
rng = np.random.permutation(range(length)) if random else range(length)
for j in rng:
dual_tmp = ranges[j].element()
dual_tmp = (g[j].convex_conj.proximal
(stepsize * inner_stepsizes[j]
if np.isscalar(inner_stepsizes[j])
else stepsize * np.asarray(inner_stepsizes[j]))
(duals[j] + stepsize * inner_stepsizes[j] * L[j](x)
if np.isscalar(inner_stepsizes[j])
else duals[j] + stepsize *
np.asarray(inner_stepsizes[j]) * L[j](x)))
x -= 1.0 / stepsize * L[j].adjoint(dual_tmp - duals[j])
duals[j].assign(dual_tmp) | Non-optimized version of ``adupdates``.
This function is intended for debugging. It makes a lot of copies and
performs no error checking. |
def get_signalcheck(self, sar, **params):
"""get_signalcheck - perform a signal check.
Parameters
----------
sar : dict
signal-api-request specified as a dictionary of parameters.
All of these parameters are optional. For details
check https://api.postcode.nl/documentation/signal-api-example.
returns :
a response dictionary
"""
params = sar
endpoint = 'rest/signal/check'
# The 'sar'-request dictionary should be sent as valid JSON data, so
# we need to convert it to JSON
# when we construct the request in API.request
retValue = self._API__request(endpoint, 'POST',
params=params, convJSON=True)
return retValue | get_signalcheck - perform a signal check.
Parameters
----------
sar : dict
signal-api-request specified as a dictionary of parameters.
All of these parameters are optional. For details
check https://api.postcode.nl/documentation/signal-api-example.
returns :
a response dictionary |
def remove_bond(self, particle_pair):
"""Deletes a bond between a pair of Particles
Parameters
----------
particle_pair : indexable object, length=2, dtype=mb.Compound
The pair of Particles to remove the bond between
"""
from mbuild.port import Port
if self.root.bond_graph is None or not self.root.bond_graph.has_edge(
*particle_pair):
warn("Bond between {} and {} doesn't exist!".format(*particle_pair))
return
self.root.bond_graph.remove_edge(*particle_pair)
bond_vector = particle_pair[0].pos - particle_pair[1].pos
if np.allclose(bond_vector, np.zeros(3)):
warn("Particles {} and {} overlap! Ports will not be added."
"".format(*particle_pair))
return
distance = np.linalg.norm(bond_vector)
particle_pair[0].parent.add(Port(anchor=particle_pair[0],
orientation=-bond_vector,
separation=distance / 2), 'port[$]')
particle_pair[1].parent.add(Port(anchor=particle_pair[1],
orientation=bond_vector,
separation=distance / 2), 'port[$]') | Deletes a bond between a pair of Particles
Parameters
----------
particle_pair : indexable object, length=2, dtype=mb.Compound
The pair of Particles to remove the bond between |
def new_chain(table='filter', chain=None, table_type=None, hook=None, priority=None, family='ipv4'):
'''
.. versionadded:: 2014.7.0
Create new chain to the specified table.
CLI Example:
.. code-block:: bash
salt '*' nftables.new_chain filter input
salt '*' nftables.new_chain filter input \\
table_type=filter hook=input priority=0
salt '*' nftables.new_chain filter foo
IPv6:
salt '*' nftables.new_chain filter input family=ipv6
salt '*' nftables.new_chain filter input \\
table_type=filter hook=input priority=0 family=ipv6
salt '*' nftables.new_chain filter foo family=ipv6
'''
ret = {'comment': '',
'result': False}
if not chain:
ret['comment'] = 'Chain needs to be specified'
return ret
res = check_table(table, family=family)
if not res['result']:
return res
res = check_chain(table, chain, family=family)
if res['result']:
ret['comment'] = 'Chain {0} in table {1} in family {2} already exists'.\
format(chain, table, family)
return ret
nft_family = _NFTABLES_FAMILIES[family]
cmd = '{0} add chain {1} {2} {3}'.\
format(_nftables_cmd(), nft_family, table, chain)
if table_type or hook or priority:
if table_type and hook and six.text_type(priority):
cmd = r'{0} \{{ type {1} hook {2} priority {3}\; \}}'.\
format(cmd, table_type, hook, priority)
else:
# Specify one, require all
ret['comment'] = 'Table_type, hook, and priority required.'
return ret
out = __salt__['cmd.run'](cmd, python_shell=False)
if not out:
ret['comment'] = 'Chain {0} in table {1} in family {2} created'.\
format(chain, table, family)
ret['result'] = True
else:
ret['comment'] = 'Chain {0} in table {1} in family {2} could not be created'.\
format(chain, table, family)
return ret | .. versionadded:: 2014.7.0
Create new chain to the specified table.
CLI Example:
.. code-block:: bash
salt '*' nftables.new_chain filter input
salt '*' nftables.new_chain filter input \\
table_type=filter hook=input priority=0
salt '*' nftables.new_chain filter foo
IPv6:
salt '*' nftables.new_chain filter input family=ipv6
salt '*' nftables.new_chain filter input \\
table_type=filter hook=input priority=0 family=ipv6
salt '*' nftables.new_chain filter foo family=ipv6 |
def normalize(self):
"""
Normalizes the given data such that the area under the histogram/curve
comes to 1. Also re applies smoothing once done.
"""
median_diff = np.median(np.diff(self.x))
bin_edges = [self.x[0] - median_diff/2.0]
bin_edges.extend(median_diff/2.0 + self.x)
self.y_raw = self.y_raw/(self.y_raw.sum()*np.diff(bin_edges))
self.smooth() | Normalizes the given data such that the area under the histogram/curve
comes to 1. Also re applies smoothing once done. |
def import_model(cls, ins_name):
"""Import model class in models package
"""
try:
package_space = getattr(cls, 'package_space')
except AttributeError:
raise ValueError('package_space not exist')
else:
return import_object(ins_name, package_space) | Import model class in models package |
def parse(self, what):
"""
:param what:
can be 'rlz-1/ref-asset1', 'rlz-2/sid-1', ...
"""
if '/' not in what:
key, spec = what, ''
else:
key, spec = what.split('/')
if spec and not spec.startswith(('ref-', 'sid-')):
raise ValueError('Wrong specification in %s' % what)
elif spec == '': # export losses for all assets
aids = []
arefs = []
for aid, rec in enumerate(self.assetcol.array):
aids.append(aid)
arefs.append(self.asset_refs[aid])
elif spec.startswith('sid-'): # passed the site ID
sid = int(spec[4:])
aids = []
arefs = []
for aid, rec in enumerate(self.assetcol.array):
if rec['site_id'] == sid:
aids.append(aid)
arefs.append(self.asset_refs[aid])
elif spec.startswith('ref-'): # passed the asset name
arefs = [spec[4:]]
aids = [self.str2asset[arefs[0]]['ordinal']]
else:
raise ValueError('Wrong specification in %s' % what)
return aids, arefs, spec, key | :param what:
can be 'rlz-1/ref-asset1', 'rlz-2/sid-1', ... |
def add_to_inventory(self):
"""Adds lb IPs to stack inventory"""
if self.lb_attrs:
self.lb_attrs = self.consul.lb_details(
self.lb_attrs[A.loadbalancer.ID]
)
host = self.lb_attrs['virtualIps'][0]['address']
self.stack.add_lb_secgroup(self.name, [host], self.backend_port)
self.stack.add_host(
host,
[self.name],
self.lb_attrs
) | Adds lb IPs to stack inventory |
def has_builder(self):
"""Return whether this Node has a builder or not.
In Boolean tests, this turns out to be a *lot* more efficient
than simply examining the builder attribute directly ("if
node.builder: ..."). When the builder attribute is examined
directly, it ends up calling __getattr__ for both the __len__
and __nonzero__ attributes on instances of our Builder Proxy
class(es), generating a bazillion extra calls and slowing
things down immensely.
"""
try:
b = self.builder
except AttributeError:
# There was no explicit builder for this Node, so initialize
# the self.builder attribute to None now.
b = self.builder = None
return b is not None | Return whether this Node has a builder or not.
In Boolean tests, this turns out to be a *lot* more efficient
than simply examining the builder attribute directly ("if
node.builder: ..."). When the builder attribute is examined
directly, it ends up calling __getattr__ for both the __len__
and __nonzero__ attributes on instances of our Builder Proxy
class(es), generating a bazillion extra calls and slowing
things down immensely. |
def sdot( U, V ):
'''
Computes the tensorproduct reducing last dimensoin of U with first dimension of V.
For matrices, it is equal to regular matrix product.
'''
nu = U.ndim
#nv = V.ndim
return np.tensordot( U, V, axes=(nu-1,0) ) | Computes the tensorproduct reducing last dimensoin of U with first dimension of V.
For matrices, it is equal to regular matrix product. |
def add_fileobj(self, fileobj, path, compress, flags=None):
"""Add the contents of a file object to the MAR file.
Args:
fileobj (file-like object): open file object
path (str): name of this file in the MAR file
compress (str): One of 'xz', 'bz2', or None. Defaults to None.
flags (int): permission of this file in the MAR file. Defaults to the permissions of `path`
"""
f = file_iter(fileobj)
flags = flags or os.stat(path) & 0o777
return self.add_stream(f, path, compress, flags) | Add the contents of a file object to the MAR file.
Args:
fileobj (file-like object): open file object
path (str): name of this file in the MAR file
compress (str): One of 'xz', 'bz2', or None. Defaults to None.
flags (int): permission of this file in the MAR file. Defaults to the permissions of `path` |
def package_data(pkg, root_list):
"""Generic function to find package_data for `pkg` under `root`."""
data = []
for root in root_list:
for dirname, _, files in os.walk(os.path.join(pkg, root)):
for fname in files:
data.append(os.path.relpath(os.path.join(dirname, fname), pkg))
return {pkg: data} | Generic function to find package_data for `pkg` under `root`. |
def init_fakemod_dict(fm,adict=None):
"""Initialize a FakeModule instance __dict__.
Kept as a standalone function and not a method so the FakeModule API can
remain basically empty.
This should be considered for private IPython use, used in managing
namespaces for %run.
Parameters
----------
fm : FakeModule instance
adict : dict, optional
"""
dct = {}
# It seems pydoc (and perhaps others) needs any module instance to
# implement a __nonzero__ method, so we add it if missing:
dct.setdefault('__nonzero__',lambda : True)
dct.setdefault('__file__',__file__)
if adict is not None:
dct.update(adict)
# Hard assignment of the object's __dict__. This is nasty but deliberate.
fm.__dict__.clear()
fm.__dict__.update(dct) | Initialize a FakeModule instance __dict__.
Kept as a standalone function and not a method so the FakeModule API can
remain basically empty.
This should be considered for private IPython use, used in managing
namespaces for %run.
Parameters
----------
fm : FakeModule instance
adict : dict, optional |
def ensure_index(self, index, mappings=None, settings=None, clear=False):
"""
Ensure if an index with mapping exists
"""
mappings = mappings or []
if isinstance(mappings, dict):
mappings = [mappings]
exists = self.indices.exists_index(index)
if exists and not mappings and not clear:
return
if exists and clear:
self.indices.delete_index(index)
exists = False
if exists:
if not mappings:
self.indices.delete_index(index)
self.indices.refresh()
self.indices.create_index(index, settings)
return
if clear:
for maps in mappings:
for key in list(maps.keys()):
self.indices.delete_mapping(index, doc_type=key)
self.indices.refresh()
if isinstance(mappings, SettingsBuilder):
for name, data in list(mappings.mappings.items()):
self.indices.put_mapping(doc_type=name, mapping=data, indices=index)
else:
from pyes.mappings import DocumentObjectField, ObjectField
for maps in mappings:
if isinstance(maps, tuple):
name, mapping = maps
self.indices.put_mapping(doc_type=name, mapping=mapping, indices=index)
elif isinstance(maps, dict):
for name, data in list(maps.items()):
self.indices.put_mapping(doc_type=name, mapping=maps, indices=index)
elif isinstance(maps, (DocumentObjectField, ObjectField)):
self.put_mapping(doc_type=maps.name, mapping=maps.as_dict(), indices=index)
return
if settings:
if isinstance(settings, dict):
settings = SettingsBuilder(settings, mappings)
else:
if isinstance(mappings, SettingsBuilder):
settings = mappings
else:
settings = SettingsBuilder(mappings=mappings)
if not exists:
self.indices.create_index(index, settings)
self.indices.refresh(index, timesleep=1) | Ensure if an index with mapping exists |
def add_metrics(self, metrics: Iterable[float]) -> None:
"""
Helper to add multiple metrics at once.
"""
for metric in metrics:
self.add_metric(metric) | Helper to add multiple metrics at once. |
def is_deb_package_installed(pkg):
""" checks if a particular deb package is installed """
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True, capture=True):
result = sudo('dpkg-query -l "%s" | grep -q ^.i' % pkg)
return not bool(result.return_code) | checks if a particular deb package is installed |
def ellipsis(text, length, symbol="..."):
"""Present a block of text of given length.
If the length of available text exceeds the requested length, truncate and
intelligently append an ellipsis.
"""
if len(text) > length:
pos = text.rfind(" ", 0, length)
if pos < 0:
return text[:length].rstrip(".") + symbol
else:
return text[:pos].rstrip(".") + symbol
else:
return text | Present a block of text of given length.
If the length of available text exceeds the requested length, truncate and
intelligently append an ellipsis. |
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
'''
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
return None
# if we have a load, save it
if 'load' in load:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key,
'return': item}
if 'out' in load:
ret['out'] = load['out']
self._return(ret) | Receive a syndic minion return and format it to look like returns from
individual minions. |
def getSequence(title, db='nucleotide'):
"""
Get information about a sequence from Genbank.
@param title: A C{str} sequence title from a BLAST hit. Of the form
'gi|63148399|gb|DQ011818.1| Description...'.
@param db: The C{str} name of the Entrez database to consult.
NOTE: this uses the network! Also, there is a 3 requests/second limit
imposed by NCBI on these requests so be careful or your IP will be banned.
"""
titleId = title.split(' ', 1)[0]
try:
gi = titleId.split('|')[1]
except IndexError:
# Assume we have a gi number directly, and make sure it's a string.
gi = str(titleId)
try:
client = Entrez.efetch(db=db, rettype='gb', retmode='text', id=gi)
except URLError:
return None
else:
record = SeqIO.read(client, 'gb')
client.close()
return record | Get information about a sequence from Genbank.
@param title: A C{str} sequence title from a BLAST hit. Of the form
'gi|63148399|gb|DQ011818.1| Description...'.
@param db: The C{str} name of the Entrez database to consult.
NOTE: this uses the network! Also, there is a 3 requests/second limit
imposed by NCBI on these requests so be careful or your IP will be banned. |
def uptime(human_readable=False):
'''
.. versionadded:: 2015.8.0
Return the system uptime for the machine
Args:
human_readable (bool):
Return uptime in human readable format if ``True``, otherwise
return seconds. Default is ``False``
.. note::
Human readable format is ``days, hours:min:sec``. Days will only
be displayed if more than 0
Returns:
str:
The uptime in seconds or human readable format depending on the
value of ``human_readable``
CLI Example:
.. code-block:: bash
salt '*' status.uptime
salt '*' status.uptime human_readable=True
'''
# Get startup time
startup_time = datetime.datetime.fromtimestamp(psutil.boot_time())
# Subtract startup time from current time to get the uptime of the system
uptime = datetime.datetime.now() - startup_time
return six.text_type(uptime) if human_readable else uptime.total_seconds() | .. versionadded:: 2015.8.0
Return the system uptime for the machine
Args:
human_readable (bool):
Return uptime in human readable format if ``True``, otherwise
return seconds. Default is ``False``
.. note::
Human readable format is ``days, hours:min:sec``. Days will only
be displayed if more than 0
Returns:
str:
The uptime in seconds or human readable format depending on the
value of ``human_readable``
CLI Example:
.. code-block:: bash
salt '*' status.uptime
salt '*' status.uptime human_readable=True |
def p_try_statement_2(self, p):
"""try_statement : TRY block finally"""
p[0] = ast.Try(statements=p[2], fin=p[3]) | try_statement : TRY block finally |
def get_queryset(self, *args, **kwargs):
"""Django queryset.extra() is used here to add decryption sql to query."""
select_sql = {}
encrypted_fields = []
for f in self.model._meta.get_fields_with_model():
field = f[0]
if isinstance(field, PGPMixin):
select_sql[field.name] = self.get_decrypt_sql(field).format(
field.model._meta.db_table,
field.name,
settings.PGPFIELDS_PRIVATE_KEY,
)
encrypted_fields.append(field.name)
return super(PGPEncryptedManager, self).get_queryset(
*args, **kwargs).defer(*encrypted_fields).extra(select=select_sql) | Django queryset.extra() is used here to add decryption sql to query. |
def is_country(self, text):
"""Check if a piece of text is in the list of countries"""
ct_list = self._just_cts.keys()
if text in ct_list:
return True
else:
return False | Check if a piece of text is in the list of countries |
def invalidate_cache(user, size=None):
"""
Function to be called when saving or changing an user's avatars.
"""
sizes = set(settings.AVATAR_AUTO_GENERATE_SIZES)
if size is not None:
sizes.add(size)
for prefix in cached_funcs:
for size in sizes:
cache.delete(get_cache_key(user, size, prefix)) | Function to be called when saving or changing an user's avatars. |
def chimera_layout(G, scale=1., center=None, dim=2):
"""Positions the nodes of graph G in a Chimera cross topology.
NumPy (http://scipy.org) is required for this function.
Parameters
----------
G : NetworkX graph
Should be a Chimera graph or a subgraph of a
Chimera graph. If every node in G has a `chimera_index`
attribute, those are used to place the nodes. Otherwise makes
a best-effort attempt to find positions.
scale : float (default 1.)
Scale factor. When scale = 1, all positions fit within [0, 1]
on the x-axis and [-1, 0] on the y-axis.
center : None or array (default None)
Coordinates of the top left corner.
dim : int (default 2)
Number of dimensions. When dim > 2, all extra dimensions are
set to 0.
Returns
-------
pos : dict
A dictionary of positions keyed by node.
Examples
--------
>>> G = dnx.chimera_graph(1)
>>> pos = dnx.chimera_layout(G)
"""
if not isinstance(G, nx.Graph):
empty_graph = nx.Graph()
empty_graph.add_edges_from(G)
G = empty_graph
# now we get chimera coordinates for the translation
# first, check if we made it
if G.graph.get("family") == "chimera":
m = G.graph['rows']
n = G.graph['columns']
t = G.graph['tile']
# get a node placement function
xy_coords = chimera_node_placer_2d(m, n, t, scale, center, dim)
if G.graph.get('labels') == 'coordinate':
pos = {v: xy_coords(*v) for v in G.nodes()}
elif G.graph.get('data'):
pos = {v: xy_coords(*dat['chimera_index']) for v, dat in G.nodes(data=True)}
else:
coord = chimera_coordinates(m, n, t)
pos = {v: xy_coords(*coord.tuple(v)) for v in G.nodes()}
else:
# best case scenario, each node in G has a chimera_index attribute. Otherwise
# we will try to determine it using the find_chimera_indices function.
if all('chimera_index' in dat for __, dat in G.nodes(data=True)):
chimera_indices = {v: dat['chimera_index'] for v, dat in G.nodes(data=True)}
else:
chimera_indices = find_chimera_indices(G)
# we could read these off of the name attribute for G, but we would want the values in
# the nodes to override the name in case of conflict.
m = max(idx[0] for idx in itervalues(chimera_indices)) + 1
n = max(idx[1] for idx in itervalues(chimera_indices)) + 1
t = max(idx[3] for idx in itervalues(chimera_indices)) + 1
xy_coords = chimera_node_placer_2d(m, n, t, scale, center, dim)
# compute our coordinates
pos = {v: xy_coords(i, j, u, k) for v, (i, j, u, k) in iteritems(chimera_indices)}
return pos | Positions the nodes of graph G in a Chimera cross topology.
NumPy (http://scipy.org) is required for this function.
Parameters
----------
G : NetworkX graph
Should be a Chimera graph or a subgraph of a
Chimera graph. If every node in G has a `chimera_index`
attribute, those are used to place the nodes. Otherwise makes
a best-effort attempt to find positions.
scale : float (default 1.)
Scale factor. When scale = 1, all positions fit within [0, 1]
on the x-axis and [-1, 0] on the y-axis.
center : None or array (default None)
Coordinates of the top left corner.
dim : int (default 2)
Number of dimensions. When dim > 2, all extra dimensions are
set to 0.
Returns
-------
pos : dict
A dictionary of positions keyed by node.
Examples
--------
>>> G = dnx.chimera_graph(1)
>>> pos = dnx.chimera_layout(G) |
def process_macros(self, content, source=None):
""" Processed all macros.
"""
macro_options = {'relative': self.relative, 'linenos': self.linenos}
classes = []
for macro_class in self.macros:
try:
macro = macro_class(logger=self.logger, embed=self.embed,
options=macro_options)
content, add_classes = macro.process(content, source)
if add_classes:
classes += add_classes
except Exception as e:
self.log(u"%s processing failed in %s: %s"
% (macro, source, e))
return content, classes | Processed all macros. |
def share_item(self, token, item_id, dest_folder_id):
"""
Share an item to the destination folder.
:param token: A valid token for the user in question.
:type token: string
:param item_id: The id of the item to be shared.
:type item_id: int | long
:param dest_folder_id: The id of destination folder where the item is
shared to.
:type dest_folder_id: int | long
:returns: Dictionary containing the details of the shared item.
:rtype: dict
"""
parameters = dict()
parameters['token'] = token
parameters['id'] = item_id
parameters['dstfolderid'] = dest_folder_id
response = self.request('midas.item.share', parameters)
return response | Share an item to the destination folder.
:param token: A valid token for the user in question.
:type token: string
:param item_id: The id of the item to be shared.
:type item_id: int | long
:param dest_folder_id: The id of destination folder where the item is
shared to.
:type dest_folder_id: int | long
:returns: Dictionary containing the details of the shared item.
:rtype: dict |
def _compute_slices(self, start_idx, end_idx, assets):
"""
Compute the raw row indices to load for each asset on a query for the
given dates after applying a shift.
Parameters
----------
start_idx : int
Index of first date for which we want data.
end_idx : int
Index of last date for which we want data.
assets : pandas.Int64Index
Assets for which we want to compute row indices
Returns
-------
A 3-tuple of (first_rows, last_rows, offsets):
first_rows : np.array[intp]
Array with length == len(assets) containing the index of the first
row to load for each asset in `assets`.
last_rows : np.array[intp]
Array with length == len(assets) containing the index of the last
row to load for each asset in `assets`.
offset : np.array[intp]
Array with length == (len(asset) containing the index in a buffer
of length `dates` corresponding to the first row of each asset.
The value of offset[i] will be 0 if asset[i] existed at the start
of a query. Otherwise, offset[i] will be equal to the number of
entries in `dates` for which the asset did not yet exist.
"""
# The core implementation of the logic here is implemented in Cython
# for efficiency.
return _compute_row_slices(
self._first_rows,
self._last_rows,
self._calendar_offsets,
start_idx,
end_idx,
assets,
) | Compute the raw row indices to load for each asset on a query for the
given dates after applying a shift.
Parameters
----------
start_idx : int
Index of first date for which we want data.
end_idx : int
Index of last date for which we want data.
assets : pandas.Int64Index
Assets for which we want to compute row indices
Returns
-------
A 3-tuple of (first_rows, last_rows, offsets):
first_rows : np.array[intp]
Array with length == len(assets) containing the index of the first
row to load for each asset in `assets`.
last_rows : np.array[intp]
Array with length == len(assets) containing the index of the last
row to load for each asset in `assets`.
offset : np.array[intp]
Array with length == (len(asset) containing the index in a buffer
of length `dates` corresponding to the first row of each asset.
The value of offset[i] will be 0 if asset[i] existed at the start
of a query. Otherwise, offset[i] will be equal to the number of
entries in `dates` for which the asset did not yet exist. |
def build(self, builder):
"""
Build XML by appending to builder
.. note:: Questions can contain translations
"""
builder.start("Question", {})
for translation in self.translations:
translation.build(builder)
builder.end("Question") | Build XML by appending to builder
.. note:: Questions can contain translations |
def parse_image_response(self, response):
"""
Parse multiple objects from the RETS feed. A lot of string methods are used to handle the response before
encoding it back into bytes for the object.
:param response: The response from the feed
:return: list of SingleObjectParser
"""
if 'xml' in response.headers.get('Content-Type'):
# Got an XML response, likely an error code.
xml = xmltodict.parse(response.text)
self.analyze_reply_code(xml_response_dict=xml)
multi_parts = self._get_multiparts(response)
parsed = []
# go through each part of the multipart message
for part in multi_parts:
clean_part = part.strip('\r\n\r\n')
if '\r\n\r\n' in clean_part:
header, body = clean_part.split('\r\n\r\n', 1)
else:
header = clean_part
body = None
part_header_dict = {k.strip(): v.strip() for k, v in (h.split(':', 1) for h in header.split('\r\n'))}
# Some multipart requests respond with a text/XML part stating an error
if 'xml' in part_header_dict.get('Content-Type'):
# Got an XML response, likely an error code.
# Some rets servers give characters after the closing brace.
body = body[:body.index('/>') + 2] if '/>' in body else body
xml = xmltodict.parse(body)
try:
self.analyze_reply_code(xml_response_dict=xml)
except RETSException as e:
if e.reply_code == '20403':
# The requested object_id was not found.
continue
raise e
if body:
obj = self._response_object_from_header(
obj_head_dict=part_header_dict,
content=body.encode('latin-1') if six.PY3 else body)
else:
obj = self._response_object_from_header(obj_head_dict=part_header_dict)
parsed.append(obj)
return parsed | Parse multiple objects from the RETS feed. A lot of string methods are used to handle the response before
encoding it back into bytes for the object.
:param response: The response from the feed
:return: list of SingleObjectParser |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.