code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def press_key(self, key, mode=0):
'''
modes:
0 -> simple press
1 -> long press
2 -> release after long press
'''
if isinstance(key, str):
assert key in KEYS, 'No such key: {}'.format(key)
key = KEYS[key]
_LOGGER.info('Press key %s', self.__get_key_name(key))
return self.rq('01', OrderedDict([('key', key), ('mode', mode)])) | modes:
0 -> simple press
1 -> long press
2 -> release after long press |
def start_vm(access_token, subscription_id, resource_group, vm_name):
'''Start a virtual machine.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vm_name (str): Name of the virtual machine.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachines/',
vm_name,
'/start',
'?api-version=', COMP_API])
return do_post(endpoint, '', access_token) | Start a virtual machine.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vm_name (str): Name of the virtual machine.
Returns:
HTTP response. |
def add_execution_data(self, context_id, data):
"""Within a context, append data to the execution result.
Args:
context_id (str): the context id returned by create_context
data (bytes): data to append
Returns:
(bool): True if the operation is successful, False if
the context_id doesn't reference a known context.
"""
if context_id not in self._contexts:
LOGGER.warning("Context_id not in contexts, %s", context_id)
return False
context = self._contexts.get(context_id)
context.add_execution_data(data)
return True | Within a context, append data to the execution result.
Args:
context_id (str): the context id returned by create_context
data (bytes): data to append
Returns:
(bool): True if the operation is successful, False if
the context_id doesn't reference a known context. |
def get_max_instances_of_storage_bus(self, chipset, bus):
"""Returns the maximum number of storage bus instances which
can be configured for each VM. This corresponds to the number of
storage controllers one can have. Value may depend on chipset type
used.
in chipset of type :class:`ChipsetType`
The chipset type to get the value for.
in bus of type :class:`StorageBus`
The storage bus type to get the value for.
return max_instances of type int
The maximum number of instances for the given storage bus.
"""
if not isinstance(chipset, ChipsetType):
raise TypeError("chipset can only be an instance of type ChipsetType")
if not isinstance(bus, StorageBus):
raise TypeError("bus can only be an instance of type StorageBus")
max_instances = self._call("getMaxInstancesOfStorageBus",
in_p=[chipset, bus])
return max_instances | Returns the maximum number of storage bus instances which
can be configured for each VM. This corresponds to the number of
storage controllers one can have. Value may depend on chipset type
used.
in chipset of type :class:`ChipsetType`
The chipset type to get the value for.
in bus of type :class:`StorageBus`
The storage bus type to get the value for.
return max_instances of type int
The maximum number of instances for the given storage bus. |
def p_expression_sra(self, p):
'expression : expression RSHIFTA expression'
p[0] = Sra(p[1], p[3], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | expression : expression RSHIFTA expression |
def get_cutout(self, token, channel,
x_start, x_stop,
y_start, y_stop,
z_start, z_stop,
t_start=0, t_stop=1,
resolution=1,
block_size=DEFAULT_BLOCK_SIZE,
neariso=False):
"""
Get volumetric cutout data from the neurodata server.
Arguments:
token (str): Token to identify data to download
channel (str): Channel
resolution (int): Resolution level
Q_start (int): The lower bound of dimension 'Q'
Q_stop (int): The upper bound of dimension 'Q'
block_size (int[3]): Block size of this dataset. If not provided,
ndio uses the metadata of this tokenchannel to set. If you find
that your downloads are timing out or otherwise failing, it may
be wise to start off by making this smaller.
neariso (bool : False): Passes the 'neariso' param to the cutout.
If you don't know what this means, ignore it!
Returns:
numpy.ndarray: Downloaded data.
"""
if block_size is None:
# look up block size from metadata
block_size = self.get_block_size(token, resolution)
origin = self.get_image_offset(token, resolution)
# If z_stop - z_start is < 16, backend still pulls minimum 16 slices
if (z_stop - z_start) < 16:
z_slices = 16
else:
z_slices = z_stop - z_start
# Calculate size of the data to be downloaded.
size = (x_stop - x_start) * (y_stop - y_start) * z_slices * 4
# Switch which download function to use based on which libraries are
# available in this version of python.
if six.PY2:
dl_func = self._get_cutout_blosc_no_chunking
elif six.PY3:
dl_func = self._get_cutout_no_chunking
else:
raise ValueError("Invalid Python version.")
if size < self._chunk_threshold:
vol = dl_func(token, channel, resolution,
x_start, x_stop,
y_start, y_stop,
z_start, z_stop,
t_start, t_stop,
neariso=neariso)
vol = numpy.rollaxis(vol, 1)
vol = numpy.rollaxis(vol, 2)
return vol
else:
from ndio.utils.parallel import block_compute
blocks = block_compute(x_start, x_stop,
y_start, y_stop,
z_start, z_stop,
origin, block_size)
vol = numpy.zeros(((z_stop - z_start),
(y_stop - y_start),
(x_stop - x_start)))
for b in blocks:
data = dl_func(token, channel, resolution,
b[0][0], b[0][1],
b[1][0], b[1][1],
b[2][0], b[2][1],
0, 1,
neariso=neariso)
if b == blocks[0]: # first block
vol = numpy.zeros(((z_stop - z_start),
(y_stop - y_start),
(x_stop - x_start)), dtype=data.dtype)
vol[b[2][0] - z_start: b[2][1] - z_start,
b[1][0] - y_start: b[1][1] - y_start,
b[0][0] - x_start: b[0][1] - x_start] = data
vol = numpy.rollaxis(vol, 1)
vol = numpy.rollaxis(vol, 2)
return vol | Get volumetric cutout data from the neurodata server.
Arguments:
token (str): Token to identify data to download
channel (str): Channel
resolution (int): Resolution level
Q_start (int): The lower bound of dimension 'Q'
Q_stop (int): The upper bound of dimension 'Q'
block_size (int[3]): Block size of this dataset. If not provided,
ndio uses the metadata of this tokenchannel to set. If you find
that your downloads are timing out or otherwise failing, it may
be wise to start off by making this smaller.
neariso (bool : False): Passes the 'neariso' param to the cutout.
If you don't know what this means, ignore it!
Returns:
numpy.ndarray: Downloaded data. |
def _warning_handler(self, code: int):
"""处理300~399段状态码,抛出对应警告.
Parameters:
(code): - 响应的状态码
Return:
(bool): - 已知的警告类型则返回True,否则返回False
"""
if code == 300:
warnings.warn(
"ExpireWarning",
RuntimeWarning,
stacklevel=3
)
elif code == 301:
warnings.warn(
"ExpireStreamWarning",
RuntimeWarning,
stacklevel=3
)
else:
if self.debug:
print("unknow code {}".format(code))
return False
return True | 处理300~399段状态码,抛出对应警告.
Parameters:
(code): - 响应的状态码
Return:
(bool): - 已知的警告类型则返回True,否则返回False |
def WriteProtoFile(self, printer):
"""Write the messages file to out as proto."""
self.Validate()
extended_descriptor.WriteMessagesFile(
self.__file_descriptor, self.__package, self.__client_info.version,
printer) | Write the messages file to out as proto. |
def get_algorithm(alg: str) -> Callable:
"""
:param alg: The name of the requested `JSON Web Algorithm <https://tools.ietf.org/html/rfc7519#ref-JWA>`_. `RFC7518 <https://tools.ietf.org/html/rfc7518#section-3.2>`_ is related.
:type alg: str
:return: The requested algorithm.
:rtype: Callable
:raises: ValueError
"""
if alg not in algorithms:
raise ValueError('Invalid algorithm: {:s}'.format(alg))
return algorithms[alg] | :param alg: The name of the requested `JSON Web Algorithm <https://tools.ietf.org/html/rfc7519#ref-JWA>`_. `RFC7518 <https://tools.ietf.org/html/rfc7518#section-3.2>`_ is related.
:type alg: str
:return: The requested algorithm.
:rtype: Callable
:raises: ValueError |
def get_nearest_nodes(G, X, Y, method=None):
"""
Return the graph nodes nearest to a list of points. Pass in points
as separate vectors of X and Y coordinates. The 'kdtree' method
is by far the fastest with large data sets, but only finds approximate
nearest nodes if working in unprojected coordinates like lat-lng (it
precisely finds the nearest node if working in projected coordinates).
The 'balltree' method is second fastest with large data sets, but it
is precise if working in unprojected coordinates like lat-lng.
Parameters
----------
G : networkx multidigraph
X : list-like
The vector of longitudes or x's for which we will find the nearest
node in the graph
Y : list-like
The vector of latitudes or y's for which we will find the nearest
node in the graph
method : str {None, 'kdtree', 'balltree'}
Which method to use for finding nearest node to each point.
If None, we manually find each node one at a time using
osmnx.utils.get_nearest_node and haversine. If 'kdtree' we use
scipy.spatial.cKDTree for very fast euclidean search. If
'balltree', we use sklearn.neighbors.BallTree for fast
haversine search.
Returns
-------
nn : array
list of nearest node IDs
"""
start_time = time.time()
if method is None:
# calculate nearest node one at a time for each point
nn = [get_nearest_node(G, (y, x), method='haversine') for x, y in zip(X, Y)]
elif method == 'kdtree':
# check if we were able to import scipy.spatial.cKDTree successfully
if not cKDTree:
raise ImportError('The scipy package must be installed to use this optional feature.')
# build a k-d tree for euclidean nearest node search
nodes = pd.DataFrame({'x':nx.get_node_attributes(G, 'x'),
'y':nx.get_node_attributes(G, 'y')})
tree = cKDTree(data=nodes[['x', 'y']], compact_nodes=True, balanced_tree=True)
# query the tree for nearest node to each point
points = np.array([X, Y]).T
dist, idx = tree.query(points, k=1)
nn = nodes.iloc[idx].index
elif method == 'balltree':
# check if we were able to import sklearn.neighbors.BallTree successfully
if not BallTree:
raise ImportError('The scikit-learn package must be installed to use this optional feature.')
# haversine requires data in form of [lat, lng] and inputs/outputs in units of radians
nodes = pd.DataFrame({'x':nx.get_node_attributes(G, 'x'),
'y':nx.get_node_attributes(G, 'y')})
nodes_rad = np.deg2rad(nodes[['y', 'x']].astype(np.float))
points = np.array([Y.astype(np.float), X.astype(np.float)]).T
points_rad = np.deg2rad(points)
# build a ball tree for haversine nearest node search
tree = BallTree(nodes_rad, metric='haversine')
# query the tree for nearest node to each point
idx = tree.query(points_rad, k=1, return_distance=False)
nn = nodes.iloc[idx[:,0]].index
else:
raise ValueError('You must pass a valid method name, or None.')
log('Found nearest nodes to {:,} points in {:,.2f} seconds'.format(len(X), time.time()-start_time))
return np.array(nn) | Return the graph nodes nearest to a list of points. Pass in points
as separate vectors of X and Y coordinates. The 'kdtree' method
is by far the fastest with large data sets, but only finds approximate
nearest nodes if working in unprojected coordinates like lat-lng (it
precisely finds the nearest node if working in projected coordinates).
The 'balltree' method is second fastest with large data sets, but it
is precise if working in unprojected coordinates like lat-lng.
Parameters
----------
G : networkx multidigraph
X : list-like
The vector of longitudes or x's for which we will find the nearest
node in the graph
Y : list-like
The vector of latitudes or y's for which we will find the nearest
node in the graph
method : str {None, 'kdtree', 'balltree'}
Which method to use for finding nearest node to each point.
If None, we manually find each node one at a time using
osmnx.utils.get_nearest_node and haversine. If 'kdtree' we use
scipy.spatial.cKDTree for very fast euclidean search. If
'balltree', we use sklearn.neighbors.BallTree for fast
haversine search.
Returns
-------
nn : array
list of nearest node IDs |
def remove_timedim(self, var):
"""Remove time dimension from dataset"""
if self.pps and var.dims[0] == 'time':
data = var[0, :, :]
data.attrs = var.attrs
var = data
return var | Remove time dimension from dataset |
def _zp_decode(self, msg):
"""ZP: Zone partitions."""
zone_partitions = [ord(x)-0x31 for x in msg[4:4+Max.ZONES.value]]
return {'zone_partitions': zone_partitions} | ZP: Zone partitions. |
def create_win32tz_map(windows_zones_xml):
"""Creates a map between Windows and Olson timezone names.
Args:
windows_zones_xml: The CLDR XML mapping.
Yields:
(win32_name, olson_name, comment)
"""
coming_comment = None
win32_name = None
territory = None
parser = genshi.input.XMLParser(StringIO(windows_zones_xml))
map_zones = {}
zone_comments = {}
for kind, data, _ in parser:
if kind == genshi.core.START and str(data[0]) == "mapZone":
attrs = data[1]
win32_name, territory, olson_name = (
attrs.get("other"), attrs.get("territory"), attrs.get("type").split(" ")[0])
map_zones[(win32_name, territory)] = olson_name
elif kind == genshi.core.END and str(data) == "mapZone" and win32_name:
if coming_comment:
zone_comments[(win32_name, territory)] = coming_comment
coming_comment = None
win32_name = None
elif kind == genshi.core.COMMENT:
coming_comment = data.strip()
elif kind in (genshi.core.START, genshi.core.END, genshi.core.COMMENT):
coming_comment = None
for win32_name, territory in sorted(map_zones):
yield (win32_name, territory, map_zones[(win32_name, territory)],
zone_comments.get((win32_name, territory), None)) | Creates a map between Windows and Olson timezone names.
Args:
windows_zones_xml: The CLDR XML mapping.
Yields:
(win32_name, olson_name, comment) |
def set_enumerated_subtypes(self, subtype_fields, is_catch_all):
"""
Sets the list of "enumerated subtypes" for this struct. This differs
from regular subtyping in that each subtype is associated with a tag
that is used in the serialized format to indicate the subtype. Also,
this list of subtypes was explicitly defined in an "inner-union" in the
specification. The list of fields must include all defined subtypes of
this struct.
NOTE(kelkabany): For this to work with upcoming forward references, the
hierarchy of parent types for this struct must have had this method
called on them already.
:type subtype_fields: List[UnionField]
"""
assert self._enumerated_subtypes is None, \
'Enumerated subtypes already set.'
assert isinstance(is_catch_all, bool), type(is_catch_all)
self._is_catch_all = is_catch_all
self._enumerated_subtypes = []
if self.parent_type:
raise InvalidSpec(
"'%s' enumerates subtypes so it cannot extend another struct."
% self.name, self._ast_node.lineno, self._ast_node.path)
# Require that if this struct enumerates subtypes, its parent (and thus
# the entire hierarchy above this struct) does as well.
if self.parent_type and not self.parent_type.has_enumerated_subtypes():
raise InvalidSpec(
"'%s' cannot enumerate subtypes if parent '%s' does not." %
(self.name, self.parent_type.name),
self._ast_node.lineno, self._ast_node.path)
enumerated_subtype_names = set() # Set[str]
for subtype_field in subtype_fields:
path = subtype_field._ast_node.path
lineno = subtype_field._ast_node.lineno
# Require that a subtype only has a single type tag.
if subtype_field.data_type.name in enumerated_subtype_names:
raise InvalidSpec(
"Subtype '%s' can only be specified once." %
subtype_field.data_type.name, lineno, path)
# Require that a subtype has this struct as its parent.
if subtype_field.data_type.parent_type != self:
raise InvalidSpec(
"'%s' is not a subtype of '%s'." %
(subtype_field.data_type.name, self.name), lineno, path)
# Check for subtype tags that conflict with this struct's
# non-inherited fields.
if subtype_field.name in self._fields_by_name:
# Since the union definition comes first, use its line number
# as the source of the field's original declaration.
orig_field = self._fields_by_name[subtype_field.name]
raise InvalidSpec(
"Field '%s' already defined on line %d." %
(subtype_field.name, lineno),
orig_field._ast_node.lineno,
orig_field._ast_node.path)
# Walk up parent tree hierarchy to ensure no field conflicts.
# Checks for conflicts with subtype tags and regular fields.
cur_type = self.parent_type
while cur_type:
if subtype_field.name in cur_type._fields_by_name:
orig_field = cur_type._fields_by_name[subtype_field.name]
raise InvalidSpec(
"Field '%s' already defined in parent '%s' (%s:%d)."
% (subtype_field.name, cur_type.name,
orig_field._ast_node.path, orig_field._ast_node.lineno),
lineno, path)
cur_type = cur_type.parent_type
# Note the discrepancy between `fields` which contains only the
# struct fields, and `_fields_by_name` which contains the struct
# fields and enumerated subtype fields.
self._fields_by_name[subtype_field.name] = subtype_field
enumerated_subtype_names.add(subtype_field.data_type.name)
self._enumerated_subtypes.append(subtype_field)
assert len(self._enumerated_subtypes) > 0
# Check that all known subtypes are listed in the enumeration.
for subtype in self.subtypes:
if subtype.name not in enumerated_subtype_names:
raise InvalidSpec(
"'%s' does not enumerate all subtypes, missing '%s'" %
(self.name, subtype.name),
self._ast_node.lineno) | Sets the list of "enumerated subtypes" for this struct. This differs
from regular subtyping in that each subtype is associated with a tag
that is used in the serialized format to indicate the subtype. Also,
this list of subtypes was explicitly defined in an "inner-union" in the
specification. The list of fields must include all defined subtypes of
this struct.
NOTE(kelkabany): For this to work with upcoming forward references, the
hierarchy of parent types for this struct must have had this method
called on them already.
:type subtype_fields: List[UnionField] |
def mcc(y, z):
"""Matthews correlation coefficient
"""
tp, tn, fp, fn = contingency_table(y, z)
return (tp * tn - fp * fn) / K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) | Matthews correlation coefficient |
def intersection(l1, l2):
'''Returns intersection of two lists. Assumes the lists are sorted by start positions'''
if len(l1) == 0 or len(l2) == 0:
return []
out = []
l2_pos = 0
for l in l1:
while l2_pos < len(l2) and l2[l2_pos].end < l.start:
l2_pos += 1
if l2_pos == len(l2):
break
while l2_pos < len(l2) and l.intersects(l2[l2_pos]):
out.append(l.intersection(l2[l2_pos]))
l2_pos += 1
l2_pos = max(0, l2_pos - 1)
return out | Returns intersection of two lists. Assumes the lists are sorted by start positions |
def init_request(self):
""" Init the native request using the okhttp3.Request.Builder """
#: Build the request
builder = Request.Builder()
builder.url(self.url)
#: Set any headers
for k, v in self.headers.items():
builder.addHeader(k, v)
#: Get the body or generate from the data given
body = self.body
if body:
#: Create the request body
media_type = MediaType(
__id__=MediaType.parse(self.content_type))
request_body = RequestBody(
__id__=RequestBody.create(media_type, body))
#: Set the request method
builder.method(self.method, request_body)
elif self.method in ['get', 'delete', 'head']:
#: Set the method
getattr(builder, self.method)()
else:
raise ValueError("Cannot do a '{}' request "
"without a body".format(self.method))
#: Save the okhttp request
self.request = Request(__id__=builder.build()) | Init the native request using the okhttp3.Request.Builder |
def _read_python_source(self, filename):
"""
Do our best to decode a Python source file correctly.
"""
try:
f = open(filename, "rb")
except IOError as err:
self.log_error("Can't open %s: %s", filename, err)
return None, None
try:
encoding = tokenize.detect_encoding(f.readline)[0]
finally:
f.close()
with _open_with_encoding(filename, "r", encoding=encoding) as f:
return _from_system_newlines(f.read()), encoding | Do our best to decode a Python source file correctly. |
def named(self, name):
'''Returns .get_by('name', name)'''
name = self.serialize(name)
return self.get_by('name', name) | Returns .get_by('name', name) |
def getDarkCurrentAverages(exposuretimes, imgs):
'''
return exposure times, image averages for each exposure time
'''
x, imgs_p = sortForSameExpTime(exposuretimes, imgs)
s0, s1 = imgs[0].shape
imgs = np.empty(shape=(len(x), s0, s1),
dtype=imgs[0].dtype)
for i, ip in zip(imgs, imgs_p):
if len(ip) == 1:
i[:] = ip[0]
else:
i[:] = averageSameExpTimes(ip)
return x, imgs | return exposure times, image averages for each exposure time |
def projScatter(lon, lat, **kwargs):
"""
Create a scatter plot on HEALPix projected axes.
Inputs: lon (deg), lat (deg)
"""
hp.projscatter(lon, lat, lonlat=True, **kwargs) | Create a scatter plot on HEALPix projected axes.
Inputs: lon (deg), lat (deg) |
def _string_from_ip_int(self, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if not ip_int and ip_int != 0:
ip_int = int(self._ip)
if ip_int > self._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = []
for x in range(0, 32, 4):
hextets.append('%x' % int(hex_str[x:x+4], 16))
hextets = self._compress_hextets(hextets)
return ':'.join(hextets) | Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones. |
def publish(self, rawtx):
"""Publish signed <rawtx> to bitcoin network."""
tx = deserialize.signedtx(rawtx)
if not self.dryrun:
self.service.send_tx(tx)
return serialize.txid(tx.hash()) | Publish signed <rawtx> to bitcoin network. |
def pgettext(msgctxt, message):
"""'Particular gettext' function.
It works with 'msgctxt' .po modifiers and allow duplicate keys with
different translations.
Python 2 don't have support for this GNU gettext function, so we
reimplement it. It works by joining msgctx and msgid by '4' byte."""
key = msgctxt + '\x04' + message
translation = get_translation().gettext(key)
return message if translation == key else translation | Particular gettext' function.
It works with 'msgctxt' .po modifiers and allow duplicate keys with
different translations.
Python 2 don't have support for this GNU gettext function, so we
reimplement it. It works by joining msgctx and msgid by '4' byte. |
def add_droplets(self, droplet):
"""
Add the Tag to a Droplet.
Attributes accepted at creation time:
droplet: array of string or array of int, or array of Droplets.
"""
droplets = droplet
if not isinstance(droplets, list):
droplets = [droplet]
# Extracting data from the Droplet object
resources = self.__extract_resources_from_droplets(droplets)
if len(resources) > 0:
return self.__add_resources(resources)
return False | Add the Tag to a Droplet.
Attributes accepted at creation time:
droplet: array of string or array of int, or array of Droplets. |
def set_webconfiguration_settings(name, settings, location=''):
r'''
Set the value of the setting for an IIS container.
Args:
name (str): The PSPath of the IIS webconfiguration settings.
settings (list): A list of dictionaries containing setting name, filter and value.
location (str): The location of the settings (optional)
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.set_webconfiguration_settings name='IIS:\' settings="[{'name': 'enabled', 'filter': 'system.webServer/security/authentication/anonymousAuthentication', 'value': False}]"
'''
ps_cmd = []
if not settings:
log.warning('No settings provided')
return False
settings = _prepare_settings(name, settings)
# Treat all values as strings for the purpose of comparing them to existing values.
for idx, setting in enumerate(settings):
if setting['name'].split('.')[-1] != 'Collection':
settings[idx]['value'] = six.text_type(setting['value'])
current_settings = get_webconfiguration_settings(
name=name, settings=settings, location=location)
if settings == current_settings:
log.debug('Settings already contain the provided values.')
return True
for setting in settings:
# If the value is numeric, don't treat it as a string in PowerShell.
if setting['name'].split('.')[-1] != 'Collection':
try:
complex(setting['value'])
value = setting['value']
except ValueError:
value = "'{0}'".format(setting['value'])
else:
configelement_list = []
for value_item in setting['value']:
configelement_construct = []
for key, value in value_item.items():
configelement_construct.append("{0}='{1}'".format(key, value))
configelement_list.append('@{' + ';'.join(configelement_construct) + '}')
value = ','.join(configelement_list)
ps_cmd.extend(['Set-WebConfigurationProperty',
'-PSPath', "'{0}'".format(name),
'-Filter', "'{0}'".format(setting['filter']),
'-Name', "'{0}'".format(setting['name']),
'-Location', "'{0}'".format(location),
'-Value', '{0};'.format(value)])
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to set settings for {0}'.format(name)
raise CommandExecutionError(msg)
# Get the fields post-change so that we can verify tht all values
# were modified successfully. Track the ones that weren't.
new_settings = get_webconfiguration_settings(
name=name, settings=settings, location=location)
failed_settings = []
for idx, setting in enumerate(settings):
is_collection = setting['name'].split('.')[-1] == 'Collection'
if ((not is_collection and six.text_type(setting['value']) != six.text_type(new_settings[idx]['value']))
or (is_collection and list(map(dict, setting['value'])) != list(map(dict, new_settings[idx]['value'])))):
failed_settings.append(setting)
if failed_settings:
log.error('Failed to change settings: %s', failed_settings)
return False
log.debug('Settings configured successfully: %s', settings)
return True | r'''
Set the value of the setting for an IIS container.
Args:
name (str): The PSPath of the IIS webconfiguration settings.
settings (list): A list of dictionaries containing setting name, filter and value.
location (str): The location of the settings (optional)
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.set_webconfiguration_settings name='IIS:\' settings="[{'name': 'enabled', 'filter': 'system.webServer/security/authentication/anonymousAuthentication', 'value': False}]" |
def show(self):
"""Print innards of model, without regards to type."""
if self._future:
self._job.poll_once()
return
if self._model_json is None:
print("No model trained yet")
return
if self.model_id is None:
print("This H2OEstimator has been removed.")
return
model = self._model_json["output"]
print("Model Details")
print("=============")
print(self.__class__.__name__, ": ", self._model_json["algo_full_name"])
print("Model Key: ", self._id)
self.summary()
print()
# training metrics
tm = model["training_metrics"]
if tm: tm.show()
vm = model["validation_metrics"]
if vm: vm.show()
xm = model["cross_validation_metrics"]
if xm: xm.show()
xms = model["cross_validation_metrics_summary"]
if xms: xms.show()
if "scoring_history" in model and model["scoring_history"]:
model["scoring_history"].show()
if "variable_importances" in model and model["variable_importances"]:
model["variable_importances"].show() | Print innards of model, without regards to type. |
def _validate_class(self, cl):
"""return error if class `cl` is not found in the ontology"""
if cl not in self.schema_def.attributes_by_class:
search_string = self._build_search_string(cl)
err = self.err(
"{0} - invalid class", self._field_name_from_uri(cl),
search_string=search_string)
return ValidationWarning(ValidationResult.ERROR, err['err'],
err['line'], err['num']) | return error if class `cl` is not found in the ontology |
def git_path_valid(git_path=None):
"""
Check whether the git executable is found.
"""
if git_path is None and GIT_PATH is None:
return False
if git_path is None: git_path = GIT_PATH
try:
call([git_path, '--version'])
return True
except OSError:
return False | Check whether the git executable is found. |
def _handle_consent_response(self, context):
"""
Endpoint for handling consent service response
:type context: satosa.context.Context
:rtype: satosa.response.Response
:param context: response context
:return: response
"""
consent_state = context.state[STATE_KEY]
saved_resp = consent_state["internal_resp"]
internal_response = InternalData.from_dict(saved_resp)
hash_id = self._get_consent_id(internal_response.requester, internal_response.subject_id,
internal_response.attributes)
try:
consent_attributes = self._verify_consent(hash_id)
except ConnectionError as e:
satosa_logging(logger, logging.ERROR,
"Consent service is not reachable, no consent given.", context.state)
# Send an internal_response without any attributes
consent_attributes = None
if consent_attributes is None:
satosa_logging(logger, logging.INFO, "Consent was NOT given", context.state)
# If consent was not given, then don't send any attributes
consent_attributes = []
else:
satosa_logging(logger, logging.INFO, "Consent was given", context.state)
internal_response.attributes = self._filter_attributes(internal_response.attributes, consent_attributes)
return self._end_consent(context, internal_response) | Endpoint for handling consent service response
:type context: satosa.context.Context
:rtype: satosa.response.Response
:param context: response context
:return: response |
def elements(self):
'''Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
If an element's count has been set to zero or is a negative number,
elements() will ignore it.
'''
for elem, count in iteritems(self):
for _ in range(count):
yield elem | Iterator over elements repeating each as many times as its count.
>>> c = Counter('ABCABC')
>>> sorted(c.elements())
['A', 'A', 'B', 'B', 'C', 'C']
If an element's count has been set to zero or is a negative number,
elements() will ignore it. |
def div(self, key, value=2):
"""Divides the specified key value by the specified value.
:param str|unicode key:
:param int value:
:rtype: bool
"""
return uwsgi.cache_mul(key, value, self.timeout, self.name) | Divides the specified key value by the specified value.
:param str|unicode key:
:param int value:
:rtype: bool |
def walklevel(path, depth = -1, **kwargs):
"""It works just like os.walk, but you can pass it a level parameter
that indicates how deep the recursion will go.
If depth is -1 (or less than 0), the full depth is walked.
"""
# if depth is negative, just walk
if depth < 0:
for root, dirs, files in os.walk(path, **kwargs):
yield root, dirs, files
# path.count works because is a file has a "/" it will show up in the list
# as a ":"
path = path.rstrip(os.path.sep)
num_sep = path.count(os.path.sep)
for root, dirs, files in os.walk(path, **kwargs):
yield root, dirs, files
num_sep_this = root.count(os.path.sep)
if num_sep + depth <= num_sep_this:
del dirs[:] | It works just like os.walk, but you can pass it a level parameter
that indicates how deep the recursion will go.
If depth is -1 (or less than 0), the full depth is walked. |
def on_configurationdone_request(self, py_db, request):
'''
:param ConfigurationDoneRequest request:
'''
self.api.run(py_db)
configuration_done_response = pydevd_base_schema.build_response(request)
return NetCommand(CMD_RETURN, 0, configuration_done_response, is_json=True) | :param ConfigurationDoneRequest request: |
def encode_for_locale(s):
"""
Encode text items for system locale. If encoding fails, fall back to ASCII.
"""
try:
return s.encode(LOCALE_ENCODING, 'ignore')
except (AttributeError, UnicodeDecodeError):
return s.decode('ascii', 'ignore').encode(LOCALE_ENCODING) | Encode text items for system locale. If encoding fails, fall back to ASCII. |
def new(self, *args, **kwargs):
'''Create a new instance of :attr:`model` and commit it to the backend
server. This a shortcut method for the more verbose::
instance = manager.session().add(MyModel(**kwargs))
'''
return self.session().add(self.model(*args, **kwargs)) | Create a new instance of :attr:`model` and commit it to the backend
server. This a shortcut method for the more verbose::
instance = manager.session().add(MyModel(**kwargs)) |
def wrap_exceptions(callable):
"""Call callable into a try/except clause and translate ENOENT,
EACCES and EPERM in NoSuchProcess or AccessDenied exceptions.
"""
def wrapper(self, *args, **kwargs):
try:
return callable(self, *args, **kwargs)
except EnvironmentError:
# ENOENT (no such file or directory) gets raised on open().
# ESRCH (no such process) can get raised on read() if
# process is gone in meantime.
err = sys.exc_info()[1]
if err.errno in (errno.ENOENT, errno.ESRCH):
raise NoSuchProcess(self.pid, self._process_name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._process_name)
raise
return wrapper | Call callable into a try/except clause and translate ENOENT,
EACCES and EPERM in NoSuchProcess or AccessDenied exceptions. |
def publish(self, load):
'''
Publish "load" to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
# Use the Salt IPC server
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
# TODO: switch to the actual asynchronous interface
#pub_sock = salt.transport.ipc.IPCMessageClient(self.opts, io_loop=self.io_loop)
pub_sock = salt.utils.asynchronous.SyncWrapper(
salt.transport.ipc.IPCMessageClient,
(pull_uri,)
)
pub_sock.connect()
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list':
if isinstance(load['tgt'], six.string_types):
# Fetch a list of minions that match
_res = self.ckminions.check_minions(load['tgt'],
tgt_type=load['tgt_type'])
match_ids = _res['minions']
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload['topic_lst'] = match_ids
else:
int_payload['topic_lst'] = load['tgt']
# Send it over IPC!
pub_sock.send(int_payload) | Publish "load" to minions |
def return_action(self, text, loc, ret):
"""Code executed after recognising a return statement"""
exshared.setpos(loc, text)
if DEBUG > 0:
print("RETURN:",ret)
if DEBUG == 2: self.symtab.display()
if DEBUG > 2: return
if not self.symtab.same_types(self.shared.function_index, ret.exp[0]):
raise SemanticException("Incompatible type in return")
#set register for function's return value to expression value
reg = self.codegen.take_function_register()
self.codegen.move(ret.exp[0], reg)
#after return statement, register for function's return value is available again
self.codegen.free_register(reg)
#jump to function's exit
self.codegen.unconditional_jump(self.codegen.label(self.shared.function_name+"_exit", True)) | Code executed after recognising a return statement |
def greenlet_timeouts(self):
""" This greenlet kills jobs in other greenlets if they timeout.
"""
while True:
now = datetime.datetime.utcnow()
for greenlet in list(self.gevent_pool):
job = get_current_job(id(greenlet))
if job and job.timeout and job.datestarted:
expires = job.datestarted + datetime.timedelta(seconds=job.timeout)
if now > expires:
job.kill(block=False, reason="timeout")
time.sleep(1) | This greenlet kills jobs in other greenlets if they timeout. |
def clean_existing(self, value):
"""Clean the data and return an existing document with its fields
updated based on the cleaned values.
"""
existing_pk = value[self.pk_field]
try:
obj = self.fetch_existing(existing_pk)
except ReferenceNotFoundError:
raise ValidationError('Object does not exist.')
orig_data = self.get_orig_data_from_existing(obj)
# Clean the data (passing the new data dict and the original data to
# the schema).
value = self.schema_class(value, orig_data).full_clean()
# Set cleaned data on the object (except for the pk_field).
for field_name, field_value in value.items():
if field_name != self.pk_field:
setattr(obj, field_name, field_value)
return obj | Clean the data and return an existing document with its fields
updated based on the cleaned values. |
def update_extent_from_rectangle(self):
"""Update extent value in GUI based from the QgsMapTool rectangle.
.. note:: Delegates to update_extent()
"""
self.show()
self.canvas.unsetMapTool(self.rectangle_map_tool)
self.canvas.setMapTool(self.pan_tool)
rectangle = self.rectangle_map_tool.rectangle()
if rectangle:
self.bounding_box_group.setTitle(
self.tr('Bounding box from rectangle'))
extent = rectangle_geo_array(rectangle, self.iface.mapCanvas())
self.update_extent(extent) | Update extent value in GUI based from the QgsMapTool rectangle.
.. note:: Delegates to update_extent() |
def ensure_dir(path):
"""Ensure directory exists.
Args:
path(str): dir path
"""
dirpath = os.path.dirname(path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath) | Ensure directory exists.
Args:
path(str): dir path |
def _should_run(het_file):
"""Check for enough input data to proceed with analysis.
"""
has_hets = False
with open(het_file) as in_handle:
for i, line in enumerate(in_handle):
if i > 1:
has_hets = True
break
return has_hets | Check for enough input data to proceed with analysis. |
def type_alias(self):
"""Return the type alias this target was constructed via.
For a target read from a BUILD file, this will be target alias, like 'java_library'.
For a target constructed in memory, this will be the simple class name, like 'JavaLibrary'.
The end result is that the type alias should be the most natural way to refer to this target's
type to the author of the target instance.
:rtype: string
"""
type_alias = self._kwargs.get(self._TYPE_ALIAS_FIELD, None)
return type_alias if type_alias is not None else type(self).__name__ | Return the type alias this target was constructed via.
For a target read from a BUILD file, this will be target alias, like 'java_library'.
For a target constructed in memory, this will be the simple class name, like 'JavaLibrary'.
The end result is that the type alias should be the most natural way to refer to this target's
type to the author of the target instance.
:rtype: string |
def _pred(aclass):
"""
:param aclass
:return: boolean
"""
isaclass = inspect.isclass(aclass)
return isaclass and aclass.__module__ == _pred.__module__ | :param aclass
:return: boolean |
def sample(self, cursor):
"""Extract records randomly from the database.
Continue until the target proportion of the items have been
extracted, or until `min_items` if this is larger.
If `max_items` is non-negative, do not extract more than these.
This function is a generator, yielding items incrementally.
:param cursor: Cursor to sample
:type cursor: pymongo.cursor.Cursor
:return: yields each item
:rtype: dict
:raise: ValueError, if max_items is valid and less than `min_items`
or if target collection is empty
"""
count = cursor.count()
# special case: empty collection
if count == 0:
self._empty = True
raise ValueError("Empty collection")
# special case: entire collection
if self.p >= 1 and self.max_items <= 0:
for item in cursor:
yield item
return
# calculate target number of items to select
if self.max_items <= 0:
n_target = max(self.min_items, self.p * count)
else:
if self.p <= 0:
n_target = max(self.min_items, self.max_items)
else:
n_target = max(self.min_items, min(self.max_items, self.p * count))
if n_target == 0:
raise ValueError("No items requested")
# select first `n_target` items that pop up with
# probability self.p
# This is actually biased to items at the beginning
# of the file if n_target is smaller than (p * count),
n = 0
while n < n_target:
try:
item = next(cursor)
except StopIteration:
# need to keep looping through data until
# we get all our items!
cursor.rewind()
item = next(cursor)
if self._keep():
yield item
n += 1 | Extract records randomly from the database.
Continue until the target proportion of the items have been
extracted, or until `min_items` if this is larger.
If `max_items` is non-negative, do not extract more than these.
This function is a generator, yielding items incrementally.
:param cursor: Cursor to sample
:type cursor: pymongo.cursor.Cursor
:return: yields each item
:rtype: dict
:raise: ValueError, if max_items is valid and less than `min_items`
or if target collection is empty |
def down(force):
"""
destroys an existing cluster
"""
try:
cloud_config = CloudConfig()
cloud_controller = CloudController(cloud_config)
cloud_controller.down(force)
except CloudComposeException as ex:
print(ex) | destroys an existing cluster |
def package_releases(self, package, url_fmt=lambda u: u):
"""List all versions of a package
Along with the version, the caller also receives the file list with all
the available formats.
"""
return [{
'name': package,
'version': version,
'urls': [self.get_urlhash(f, url_fmt) for f in files]
} for version, files in self.storage.get(package, {}).items()] | List all versions of a package
Along with the version, the caller also receives the file list with all
the available formats. |
def focus_up(pymux):
" Move focus up. "
_move_focus(pymux,
lambda wp: wp.xpos,
lambda wp: wp.ypos - 2) | Move focus up. |
def touch(self):
"""
Respond to ``nsqd`` that you need more time to process the message.
"""
assert not self._has_responded
self.trigger(event.TOUCH, message=self) | Respond to ``nsqd`` that you need more time to process the message. |
def close(self, reason=None):
"""Stop consuming messages and shutdown all helper threads.
This method is idempotent. Additional calls will have no effect.
Args:
reason (Any): The reason to close this. If None, this is considered
an "intentional" shutdown. This is passed to the callbacks
specified via :meth:`add_close_callback`.
"""
with self._closing:
if self._closed:
return
# Stop consuming messages.
if self.is_active:
_LOGGER.debug("Stopping consumer.")
self._consumer.stop()
self._consumer = None
# Shutdown all helper threads
_LOGGER.debug("Stopping scheduler.")
self._scheduler.shutdown()
self._scheduler = None
_LOGGER.debug("Stopping leaser.")
self._leaser.stop()
self._leaser = None
_LOGGER.debug("Stopping dispatcher.")
self._dispatcher.stop()
self._dispatcher = None
_LOGGER.debug("Stopping heartbeater.")
self._heartbeater.stop()
self._heartbeater = None
self._rpc = None
self._closed = True
_LOGGER.debug("Finished stopping manager.")
for callback in self._close_callbacks:
callback(self, reason) | Stop consuming messages and shutdown all helper threads.
This method is idempotent. Additional calls will have no effect.
Args:
reason (Any): The reason to close this. If None, this is considered
an "intentional" shutdown. This is passed to the callbacks
specified via :meth:`add_close_callback`. |
def from_string(values, separator, remove_duplicates = False):
"""
Splits specified string into elements using a separator and assigns
the elements to a newly created AnyValueArray.
:param values: a string value to be split and assigned to AnyValueArray
:param separator: a separator to split the string
:param remove_duplicates: (optional) true to remove duplicated elements
:return: a newly created AnyValueArray.
"""
result = AnyValueArray()
if values == None or len(values) == 0:
return result
items = str(values).split(separator)
for item in items:
if (item != None and len(item) > 0) or remove_duplicates == False:
result.append(item)
return result | Splits specified string into elements using a separator and assigns
the elements to a newly created AnyValueArray.
:param values: a string value to be split and assigned to AnyValueArray
:param separator: a separator to split the string
:param remove_duplicates: (optional) true to remove duplicated elements
:return: a newly created AnyValueArray. |
def _scan_block(self, cfg_job):
"""
Scan a basic block starting at a specific address
:param CFGJob cfg_job: The CFGJob instance.
:return: a list of successors
:rtype: list
"""
addr = cfg_job.addr
current_func_addr = cfg_job.func_addr
if self._addr_hooked_or_syscall(addr):
entries = self._scan_procedure(cfg_job, current_func_addr)
else:
entries = self._scan_soot_block(cfg_job, current_func_addr)
return entries | Scan a basic block starting at a specific address
:param CFGJob cfg_job: The CFGJob instance.
:return: a list of successors
:rtype: list |
def constraint(self):
"""Constraint string"""
constraint_arr = []
if self._not_null:
constraint_arr.append("PRIMARY KEY" if self._pk else "NOT NULL")
if self._unique:
constraint_arr.append("UNIQUE")
return " ".join(constraint_arr) | Constraint string |
def get(self, list_id, segment_id):
"""
returns the specified list segment.
"""
return self._mc_client._get(url=self._build_path(list_id, 'segments', segment_id)) | returns the specified list segment. |
def _safe_sendBreak_v2_7(self): # pylint: disable=invalid-name
"""! pyserial 2.7 API implementation of sendBreak/setBreak
@details
Below API is deprecated for pyserial 3.x versions!
http://pyserial.readthedocs.org/en/latest/pyserial_api.html#serial.Serial.sendBreak
http://pyserial.readthedocs.org/en/latest/pyserial_api.html#serial.Serial.setBreak
"""
result = True
try:
self.sendBreak()
except: # pylint: disable=bare-except
# In Linux a termios.error is raised in sendBreak and in setBreak.
# The following setBreak() is needed to release the reset signal on the target mcu.
try:
self.setBreak(False)
except: # pylint: disable=bare-except
result = False
return result | ! pyserial 2.7 API implementation of sendBreak/setBreak
@details
Below API is deprecated for pyserial 3.x versions!
http://pyserial.readthedocs.org/en/latest/pyserial_api.html#serial.Serial.sendBreak
http://pyserial.readthedocs.org/en/latest/pyserial_api.html#serial.Serial.setBreak |
def init_default(m:nn.Module, func:LayerFunc=nn.init.kaiming_normal_)->None:
"Initialize `m` weights with `func` and set `bias` to 0."
if func:
if hasattr(m, 'weight'): func(m.weight)
if hasattr(m, 'bias') and hasattr(m.bias, 'data'): m.bias.data.fill_(0.)
return m | Initialize `m` weights with `func` and set `bias` to 0. |
def ip4_address(self):
"""Returns the IPv4 address of the network interface.
If multiple interfaces are provided,
the address of the first found is returned.
"""
if self._ip4_address is None and self.network is not None:
self._ip4_address = self._get_ip_address(
libvirt.VIR_IP_ADDR_TYPE_IPV4)
return self._ip4_address | Returns the IPv4 address of the network interface.
If multiple interfaces are provided,
the address of the first found is returned. |
def _exec_loop(self, a, bd_all, mask):
"""Solves the kriging system by looping over all specified points.
Less memory-intensive, but involves a Python-level loop."""
npt = bd_all.shape[0]
n = self.X_ADJUSTED.shape[0]
kvalues = np.zeros(npt)
sigmasq = np.zeros(npt)
a_inv = scipy.linalg.inv(a)
for j in np.nonzero(~mask)[0]: # Note that this is the same thing as range(npt) if mask is not defined,
bd = bd_all[j] # otherwise it takes the non-masked elements.
if np.any(np.absolute(bd) <= self.eps):
zero_value = True
zero_index = np.where(np.absolute(bd) <= self.eps)
else:
zero_value = False
zero_index = None
b = np.zeros((n+1, 1))
b[:n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
if zero_value:
b[zero_index[0], 0] = 0.0
b[n, 0] = 1.0
x = np.dot(a_inv, b)
kvalues[j] = np.sum(x[:n, 0] * self.VALUES)
sigmasq[j] = np.sum(x[:, 0] * -b[:, 0])
return kvalues, sigmasq | Solves the kriging system by looping over all specified points.
Less memory-intensive, but involves a Python-level loop. |
def _flip_kron_order(mat4x4: np.ndarray) -> np.ndarray:
"""Given M = sum(kron(a_i, b_i)), returns M' = sum(kron(b_i, a_i))."""
result = np.array([[0] * 4] * 4, dtype=np.complex128)
order = [0, 2, 1, 3]
for i in range(4):
for j in range(4):
result[order[i], order[j]] = mat4x4[i, j]
return result | Given M = sum(kron(a_i, b_i)), returns M' = sum(kron(b_i, a_i)). |
def _push_new_tag_to_git(self):
"""
tags a new release and pushes to origin/master
"""
print("Pushing new version to git")
## stage the releasefile and initfileb
subprocess.call(["git", "add", self.release_file])
subprocess.call(["git", "add", self.init_file])
subprocess.call([
"git", "commit", "-m", "Updating {}/__init__.py to version {}"\
.format(self.package, self.tag)])
## push changes to origin <tracked branch>
subprocess.call(["git", "push", "origin", self.branch])
## create a new tag for the version number on deploy
if self.deploy:
subprocess.call([
"git", "tag", "-a", self.tag,
"-m", "Updating version to {}".format(self.tag),
])
subprocess.call(["git", "push", "origin"]) | tags a new release and pushes to origin/master |
def get(self, byte_sig: str, online_timeout: int = 2) -> List[str]:
"""Get a function text signature for a byte signature 1) try local
cache 2) try online lookup (if enabled; if not flagged as unavailable)
:param byte_sig: function signature hash as hexstr
:param online_timeout: online lookup timeout
:return: list of matching function text signatures
"""
byte_sig = self._normalize_byte_sig(byte_sig)
# check if we have any Solidity signatures to look up
text_sigs = self.solidity_sigs.get(byte_sig)
if text_sigs is not None:
return text_sigs
# try lookup in the local DB
with SQLiteDB(self.path) as cur:
cur.execute("SELECT text_sig FROM signatures WHERE byte_sig=?", (byte_sig,))
text_sigs = cur.fetchall()
if text_sigs:
return [t[0] for t in text_sigs]
# abort if we're not allowed to check 4byte or we already missed
# the signature, or we're on a timeout
if (
not self.enable_online_lookup
or byte_sig in self.online_lookup_miss
or time.time() < self.online_lookup_timeout
):
return []
try:
text_sigs = self.lookup_online(byte_sig=byte_sig, timeout=online_timeout)
if not text_sigs:
self.online_lookup_miss.add(byte_sig)
return []
else:
for resolved in text_sigs:
self.add(byte_sig, resolved)
return text_sigs
except FourByteDirectoryOnlineLookupError as fbdole:
# wait at least 2 mins to try again
self.online_lookup_timeout = int(time.time()) + 2 * 60
log.warning("Online lookup failed, not retrying for 2min: %s", fbdole)
return [] | Get a function text signature for a byte signature 1) try local
cache 2) try online lookup (if enabled; if not flagged as unavailable)
:param byte_sig: function signature hash as hexstr
:param online_timeout: online lookup timeout
:return: list of matching function text signatures |
def inject_code(self, payload, lpParameter = 0):
"""
Injects relocatable code into the process memory and executes it.
@warning: Don't forget to free the memory when you're done with it!
Otherwise you'll be leaking memory in the target process.
@see: L{inject_dll}
@type payload: str
@param payload: Relocatable code to run in a new thread.
@type lpParameter: int
@param lpParameter: (Optional) Parameter to be pushed in the stack.
@rtype: tuple( L{Thread}, int )
@return: The injected Thread object
and the memory address where the code was written.
@raise WindowsError: An exception is raised on error.
"""
# Uncomment for debugging...
## payload = '\xCC' + payload
# Allocate the memory for the shellcode.
lpStartAddress = self.malloc(len(payload))
# Catch exceptions so we can free the memory on error.
try:
# Write the shellcode to our memory location.
self.write(lpStartAddress, payload)
# Start a new thread for the shellcode to run.
aThread = self.start_thread(lpStartAddress, lpParameter,
bSuspended = False)
# Remember the shellcode address.
# It will be freed ONLY by the Thread.kill() method
# and the EventHandler class, otherwise you'll have to
# free it in your code, or have your shellcode clean up
# after itself (recommended).
aThread.pInjectedMemory = lpStartAddress
# Free the memory on error.
except Exception:
self.free(lpStartAddress)
raise
# Return the Thread object and the shellcode address.
return aThread, lpStartAddress | Injects relocatable code into the process memory and executes it.
@warning: Don't forget to free the memory when you're done with it!
Otherwise you'll be leaking memory in the target process.
@see: L{inject_dll}
@type payload: str
@param payload: Relocatable code to run in a new thread.
@type lpParameter: int
@param lpParameter: (Optional) Parameter to be pushed in the stack.
@rtype: tuple( L{Thread}, int )
@return: The injected Thread object
and the memory address where the code was written.
@raise WindowsError: An exception is raised on error. |
def calc_bhhh_hessian_approximation_mixed_logit(params,
design_3d,
alt_IDs,
rows_to_obs,
rows_to_alts,
rows_to_mixers,
choice_vector,
utility_transform,
ridge=None,
weights=None):
"""
Parameters
----------
params : 1D ndarray.
All elements should by ints, floats, or longs. Should have 1 element
for each utility coefficient being estimated (i.e. num_features +
num_coefs_being_mixed).
design_3d : 3D ndarray.
All elements should be ints, floats, or longs. Should have one row per
observation per available alternative. The second axis should have as
many elements as there are draws from the mixing distributions of the
coefficients. The last axis should have one element per index
coefficient being estimated.
alt_IDs : 1D ndarray.
All elements should be ints. There should be one row per obervation per
available alternative for the given observation. Elements denote the
alternative corresponding to the given row of the design matrix.
rows_to_obs : 2D scipy sparse array.
All elements should be zeros and ones. Should have one row per
observation per available alternative and one column per observation.
This matrix maps the rows of the design matrix to the unique
observations (on the columns).
rows_to_alts : 2D scipy sparse array.
All elements should be zeros and ones. Should have one row per
observation per available alternative and one column per possible
alternative. This matrix maps the rows of the design matrix to the
possible alternatives for this dataset.
rows_to_mixers : 2D scipy sparse array.
All elements should be zeros and ones. Will map the rows of the design
matrix to the particular units that the mixing is being performed over.
Note that in the case of panel data, this matrix will be different from
`rows_to_obs`.
choice_vector : 1D ndarray.
All elements should be either ones or zeros. There should be one row
per observation per available alternative for the given observation.
Elements denote the alternative which is chosen by the given
observation with a 1 and a zero otherwise.
utility_transform : callable.
Should accept a 1D array of systematic utility values, a 1D array of
alternative IDs, and miscellaneous args and kwargs. Should return a 2D
array whose elements contain the appropriately transformed systematic
utility values, based on the current model being evaluated and the
given draw of the random coefficients. There should be one column for
each draw of the random coefficients. There should have one row per
individual per choice situation per available alternative.
ridge : int, float, long, or None, optional.
Determines whether or not ridge regression is performed. If a float is
passed, then that float determines the ridge penalty for the
optimization. Default = None.
weights : 1D ndarray or None, optional.
Allows for the calculation of weighted log-likelihoods. The weights can
represent various things. In stratified samples, the weights may be
the proportion of the observations in a given strata for a sample in
relation to the proportion of observations in that strata in the
population. In latent class models, the weights may be the probability
of being a particular class. Default == None.
Returns
-------
bhhh_matrix : 2D ndarray of shape `(design.shape[1], design.shape[1])`.
The returned array is the BHHH approximation of the Fisher Information
Matrix. I.e it is the negative of the sum of the outer product of
each individual's gradient with itself.
"""
# Calculate the weights for the sample
if weights is None:
weights = np.ones(design_3d.shape[0])
weights_per_obs =\
np.max(rows_to_mixers.toarray() * weights[:, None], axis=0)
# Calculate the regular probability array. Note the implicit assumption
# that params == index coefficients.
prob_array = general_calc_probabilities(params,
design_3d,
alt_IDs,
rows_to_obs,
rows_to_alts,
utility_transform,
return_long_probs=True)
# Calculate the simulated probability of correctly predicting each persons
# sequence of choices. Note that this function implicitly assumes that the
# mixing unit is the individual
prob_results = calc_choice_sequence_probs(prob_array,
choice_vector,
rows_to_mixers,
return_type="all")
# Calculate the sequence probabilities given random draws
# and calculate the overal simulated probabilities
sequence_prob_array = prob_results[1]
simulated_probs = prob_results[0]
# Convert the various probabilties to long format
long_sequence_prob_array = rows_to_mixers.dot(sequence_prob_array)
long_simulated_probs = rows_to_mixers.dot(simulated_probs)
# Scale sequence probabilites given random draws by simulated probabilities
scaled_sequence_probs = (long_sequence_prob_array /
long_simulated_probs[:, None])
# Calculate the scaled error. Will have shape == (num_rows, num_draws)
scaled_error = ((choice_vector[:, None] - prob_array) *
scaled_sequence_probs)
# Calculate the gradient. Note that the lines below assume that we are
# taking the gradient of an MNL model. Should refactor to make use of the
# built in gradient function for logit-type models. Should also refactor
# the gradient function for logit-type models to be able to handle 2D
# systematic utility arrays. `gradient` will have shape
# (design_3d.shape[0], design_3d.shape[2])
gradient = (scaled_error[:, :, None] * design_3d).mean(axis=1)
gradient_per_obs = rows_to_mixers.T.dot(gradient)
bhhh_matrix =\
gradient_per_obs.T.dot(weights_per_obs[:, None] * gradient_per_obs)
if ridge is not None:
bhhh_matrix -= 2 * ridge
# Note the "-1" is because we are approximating the Fisher information
# matrix which has a negative one in the front of it?
return -1 * bhhh_matrix | Parameters
----------
params : 1D ndarray.
All elements should by ints, floats, or longs. Should have 1 element
for each utility coefficient being estimated (i.e. num_features +
num_coefs_being_mixed).
design_3d : 3D ndarray.
All elements should be ints, floats, or longs. Should have one row per
observation per available alternative. The second axis should have as
many elements as there are draws from the mixing distributions of the
coefficients. The last axis should have one element per index
coefficient being estimated.
alt_IDs : 1D ndarray.
All elements should be ints. There should be one row per obervation per
available alternative for the given observation. Elements denote the
alternative corresponding to the given row of the design matrix.
rows_to_obs : 2D scipy sparse array.
All elements should be zeros and ones. Should have one row per
observation per available alternative and one column per observation.
This matrix maps the rows of the design matrix to the unique
observations (on the columns).
rows_to_alts : 2D scipy sparse array.
All elements should be zeros and ones. Should have one row per
observation per available alternative and one column per possible
alternative. This matrix maps the rows of the design matrix to the
possible alternatives for this dataset.
rows_to_mixers : 2D scipy sparse array.
All elements should be zeros and ones. Will map the rows of the design
matrix to the particular units that the mixing is being performed over.
Note that in the case of panel data, this matrix will be different from
`rows_to_obs`.
choice_vector : 1D ndarray.
All elements should be either ones or zeros. There should be one row
per observation per available alternative for the given observation.
Elements denote the alternative which is chosen by the given
observation with a 1 and a zero otherwise.
utility_transform : callable.
Should accept a 1D array of systematic utility values, a 1D array of
alternative IDs, and miscellaneous args and kwargs. Should return a 2D
array whose elements contain the appropriately transformed systematic
utility values, based on the current model being evaluated and the
given draw of the random coefficients. There should be one column for
each draw of the random coefficients. There should have one row per
individual per choice situation per available alternative.
ridge : int, float, long, or None, optional.
Determines whether or not ridge regression is performed. If a float is
passed, then that float determines the ridge penalty for the
optimization. Default = None.
weights : 1D ndarray or None, optional.
Allows for the calculation of weighted log-likelihoods. The weights can
represent various things. In stratified samples, the weights may be
the proportion of the observations in a given strata for a sample in
relation to the proportion of observations in that strata in the
population. In latent class models, the weights may be the probability
of being a particular class. Default == None.
Returns
-------
bhhh_matrix : 2D ndarray of shape `(design.shape[1], design.shape[1])`.
The returned array is the BHHH approximation of the Fisher Information
Matrix. I.e it is the negative of the sum of the outer product of
each individual's gradient with itself. |
def check_spelling(spelling_lang, txt):
"""
Check the spelling in the text, and compute a score. The score is the
number of words correctly (or almost correctly) spelled, minus the number
of mispelled words. Words "almost" correct remains neutral (-> are not
included in the score)
Returns:
A tuple : (fixed text, score)
"""
if os.name == "nt":
assert(not "check_spelling() not available on Windows")
return
with _ENCHANT_LOCK:
# Maximum distance from the first suggestion from python-enchant
words_dict = enchant.request_dict(spelling_lang)
try:
tknzr = enchant.tokenize.get_tokenizer(spelling_lang)
except enchant.tokenize.TokenizerNotFoundError:
# Fall back to default tokenization if no match for 'lang'
tknzr = enchant.tokenize.get_tokenizer()
score = 0
offset = 0
for (word, word_pos) in tknzr(txt):
if len(word) < _MIN_WORD_LEN:
continue
if words_dict.check(word):
# immediately correct words are a really good hint for
# orientation
score += 100
continue
suggestions = words_dict.suggest(word)
if (len(suggestions) <= 0):
# this word is useless. It may even indicates a bad orientation
score -= 10
continue
main_suggestion = suggestions[0]
lv_dist = Levenshtein.distance(word, main_suggestion)
if (lv_dist > _MAX_LEVENSHTEIN_DISTANCE):
# hm, this word looks like it's in a bad shape
continue
logger.debug("Spell checking: Replacing: %s -> %s"
% (word, main_suggestion))
# let's replace the word by its suggestion
pre_txt = txt[:word_pos + offset]
post_txt = txt[word_pos + len(word) + offset:]
txt = pre_txt + main_suggestion + post_txt
offset += (len(main_suggestion) - len(word))
# fixed words may be a good hint for orientation
score += 5
return (txt, score) | Check the spelling in the text, and compute a score. The score is the
number of words correctly (or almost correctly) spelled, minus the number
of mispelled words. Words "almost" correct remains neutral (-> are not
included in the score)
Returns:
A tuple : (fixed text, score) |
def get_date_datetime_param(self, request, param):
"""Check the request for the provided query parameter and returns a rounded value.
:param request: WSGI request object to retrieve query parameter data.
:param param: the name of the query parameter.
"""
if param in request.GET:
param_value = request.GET.get(param, None)
# Match and interpret param if formatted as a date.
date_match = dateparse.date_re.match(param_value)
if date_match:
return timezone.datetime.combine(
dateparse.parse_date(date_match.group(0)), timezone.datetime.min.time()
)
datetime_match = dateparse.datetime_re.match(param_value)
if datetime_match:
return timezone.datetime.combine(
dateparse.parse_datetime(datetime_match.group(0)).date(),
timezone.datetime.min.time()
)
return None | Check the request for the provided query parameter and returns a rounded value.
:param request: WSGI request object to retrieve query parameter data.
:param param: the name of the query parameter. |
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
default_template_engine = None
if default_template_engine is None:
template_loaders = []
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if 'vars' in frame:
frame_vars = []
for k, v in frame['vars']:
v = pformat(v)
# The escape filter assume unicode, make sure that works
if isinstance(v, six.binary_type):
v = v.decode('utf-8', 'replace') # don't choke on non-utf-8 input
# Trim large blobs of data
if v and len(v) > 4096:
v = '%s... <trimmed %d bytes string>' % (v[0:4096], len(v))
frame_vars.append((k, v))
frame['vars'] = frame_vars
frames[i] = frame
unicode_hint = ''
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, 'start', None)
end = getattr(self.exc_value, 'end', None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
c = {
'is_email': False,
'frames': frames,
'sys_executable': sys.executable,
'sys_version_info': '%d.%d.%d' % sys.version_info[0:3],
'sys_path': sys.path,
}
# Check whether exception info is available
if self.exc_type:
c['exception_type'] = self.exc_type.__name__
if self.exc_value:
c['exception_value'] = self.exc_value
if frames:
c['lastframe'] = frames[-1]
return c | Return a dictionary containing traceback information. |
def add_source_get_correlated(gta, name, src_dict, correl_thresh=0.25, non_null_src=False):
"""Add a source and get the set of correlated sources
Parameters
----------
gta : `fermipy.gtaanalysis.GTAnalysis`
The analysis object
name : str
Name of the source we are adding
src_dict : dict
Dictionary of the source parameters
correl_thresh : float
Threshold for considering a source to be correlated
non_null_src : bool
If True, don't zero the source
Returns
-------
cdict : dict
Dictionary with names and correlation factors of correlated sources
test_src_name : bool
Name of the test source
"""
if gta.roi.has_source(name):
gta.zero_source(name)
gta.update_source(name)
test_src_name = "%s_test" % name
else:
test_src_name = name
gta.add_source(test_src_name, src_dict)
gta.free_norm(test_src_name)
gta.free_shape(test_src_name, free=False)
fit_result = gta.fit(covar=True)
mask = fit_result['is_norm']
src_names = np.array(fit_result['src_names'])[mask]
idx = (src_names == test_src_name).argmax()
correl_vals = fit_result['correlation'][idx][mask]
cdict = {}
for src_name, correl_val in zip(src_names, correl_vals):
if src_name == name:
continue
if np.fabs(correl_val) > 0.25:
cdict[src_name] = correl_val
if not non_null_src:
gta.zero_source(test_src_name)
gta.fit(covar=True)
return cdict, test_src_name | Add a source and get the set of correlated sources
Parameters
----------
gta : `fermipy.gtaanalysis.GTAnalysis`
The analysis object
name : str
Name of the source we are adding
src_dict : dict
Dictionary of the source parameters
correl_thresh : float
Threshold for considering a source to be correlated
non_null_src : bool
If True, don't zero the source
Returns
-------
cdict : dict
Dictionary with names and correlation factors of correlated sources
test_src_name : bool
Name of the test source |
def get_info(self, wiki=None, show=True, proxy=None, timeout=0):
"""
GET site info (general, statistics, siteviews, mostviewed) via
https://www.mediawiki.org/wiki/API:Siteinfo, and
https://www.mediawiki.org/wiki/Extension:PageViewInfo
Optional arguments:
- [wiki]: <str> alternate wiki site (default=en.wikipedia.org)
- [show]: <bool> echo page data if true
- [proxy]: <str> use this HTTP proxy
- [timeout]: <int> timeout in seconds (0=wait forever)
Data captured:
- info: <dict> API:Siteinfo
- mostviewed: <list> mostviewed articles {ns=0, title, count}
- site: <str> sitename, e.g. 'enwiki'
- siteviews: <int> sitewide pageview totals over last WEEK
- visitors: <int> sitewide unique visitor total over last WEEK
- various counts: activeusers, admins, articles, edits, images
jobs, pages, queued-massmessages, siteviews, users, visitors
"""
if wiki:
self.params.update({'wiki': wiki})
self._get('siteinfo', show=False, proxy=proxy, timeout=timeout)
self._get('sitevisitors', show, proxy, timeout)
return self | GET site info (general, statistics, siteviews, mostviewed) via
https://www.mediawiki.org/wiki/API:Siteinfo, and
https://www.mediawiki.org/wiki/Extension:PageViewInfo
Optional arguments:
- [wiki]: <str> alternate wiki site (default=en.wikipedia.org)
- [show]: <bool> echo page data if true
- [proxy]: <str> use this HTTP proxy
- [timeout]: <int> timeout in seconds (0=wait forever)
Data captured:
- info: <dict> API:Siteinfo
- mostviewed: <list> mostviewed articles {ns=0, title, count}
- site: <str> sitename, e.g. 'enwiki'
- siteviews: <int> sitewide pageview totals over last WEEK
- visitors: <int> sitewide unique visitor total over last WEEK
- various counts: activeusers, admins, articles, edits, images
jobs, pages, queued-massmessages, siteviews, users, visitors |
def elliptical_arc_to(x1, y1, rx, ry, phi, large_arc_flag, sweep_flag, x2, y2):
""" An elliptical arc approximated with Bezier curves or a line segment.
Algorithm taken from the SVG 1.1 Implementation Notes:
http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes
"""
# Basic normalization.
rx = abs(rx)
ry = abs(ry)
phi = phi % 360
# Check for certain special cases.
if x1==x2 and y1==y2:
# Omit the arc.
# x1 and y1 can obviously remain the same for the next segment.
return []
if rx == 0 or ry == 0:
# Line segment.
return [(x2,y2)]
rphi = radians(phi)
cphi = cos(rphi)
sphi = sin(rphi)
# Step 1: Rotate to the local coordinates.
dx = 0.5*(x1 - x2)
dy = 0.5*(y1 - y2)
x1p = cphi * dx + sphi * dy
y1p = -sphi * dx + cphi * dy
# Ensure that rx and ry are large enough to have a unique solution.
lam = (x1p/rx)**2 + (y1p/ry)**2
if lam > 1.0:
scale = sqrt(lam)
rx *= scale
ry *= scale
# Step 2: Solve for the center in the local coordinates.
num = max((rx*ry)**2 - (rx*y1p)**2 - (ry*x1p)**2, 0.0)
den = ((rx*y1p)**2 + (ry*x1p)**2)
a = sqrt(num / den)
cxp = a * rx*y1p/ry
cyp = -a * ry*x1p/rx
if large_arc_flag == sweep_flag:
cxp = -cxp
cyp = -cyp
# Step 3: Transform back.
mx = 0.5*(x1+x2)
my = 0.5*(y1+y2)
# Step 4: Compute the start angle and the angular extent of the arc.
# Note that theta1 is local to the phi-rotated coordinate space.
dx = (x1p-cxp) / rx
dy = (y1p-cyp) / ry
dx2 = (-x1p-cxp) / rx
dy2 = (-y1p-cyp) / ry
theta1 = angle(1,0,dx,dy)
dtheta = angle(dx,dy,dx2,dy2)
if not sweep_flag and dtheta > 0:
dtheta -= 360
elif sweep_flag and dtheta < 0:
dtheta += 360
# Step 5: Break it apart into Bezier arcs.
p = []
control_points = bezier_arc(cxp-rx,cyp-ry,cxp+rx,cyp+ry, theta1, dtheta)
for x1p,y1p, x2p,y2p, x3p,y3p, x4p,y4p in control_points:
# Transform them back to asbolute space.
p.append((
transform_from_local(x2p,y2p,cphi,sphi,mx,my) +
transform_from_local(x3p,y3p,cphi,sphi,mx,my) +
transform_from_local(x4p,y4p,cphi,sphi,mx,my)
))
return p | An elliptical arc approximated with Bezier curves or a line segment.
Algorithm taken from the SVG 1.1 Implementation Notes:
http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes |
def main():
"""
NAME
k15_s.py
DESCRIPTION
converts .k15 format data to .s format.
assumes Jelinek Kappabridge measurement scheme
SYNTAX
k15_s.py [-h][-i][command line options][<filename]
OPTIONS
-h prints help message and quits
-i allows interactive entry of options
-f FILE, specifies input file, default: standard input
-F FILE, specifies output file, default: standard output
-crd [g, t] specifies [g]eographic rotation,
or geographic AND tectonic rotation
INPUT
name [az,pl,strike,dip], followed by
3 rows of 5 measurements for each specimen
OUTPUT
least squares matrix elements and sigma:
x11,x22,x33,x12,x23,x13,sigma
"""
firstline,itilt,igeo,linecnt,key=1,0,0,0,""
out=""
data,k15=[],[]
dir='./'
ofile=""
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
dir=sys.argv[ind+1]+'/'
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if '-i' in sys.argv:
file=input("Input file name [.k15 format]: ")
f=open(file,'r')
data=f.readlines()
f.close()
file=input("Output file name [.s format]: ")
out=open(file,'w')
print (" [g]eographic, [t]ilt corrected, ")
tg=input(" [return for specimen coordinates]: ")
if tg=='g':
igeo=1
elif tg=='t':
igeo,itilt=1,1
elif '-f' in sys.argv:
ind=sys.argv.index('-f')
file=dir+sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
f.close()
else:
data= sys.stdin.readlines()
if len(data)==0:
print(main.__doc__)
sys.exit()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
ofile=dir+sys.argv[ind+1]
out=open(ofile,'w')
if '-crd' in sys.argv:
ind=sys.argv.index('-crd')
tg=sys.argv[ind+1]
if tg=='g':igeo=1
if tg=='t': igeo,itilt=1,1
for line in data:
rec=line.split()
if firstline==1:
firstline=0
nam=rec[0]
if igeo==1: az,pl=float(rec[1]),float(rec[2])
if itilt==1: bed_az,bed_dip=90.+float(rec[3]),float(rec[4])
else:
linecnt+=1
for i in range(5):
k15.append(float(rec[i]))
if linecnt==3:
sbar,sigma,bulk=pmag.dok15_s(k15)
if igeo==1: sbar=pmag.dosgeo(sbar,az,pl)
if itilt==1: sbar=pmag.dostilt(sbar,bed_az,bed_dip)
outstring=""
for s in sbar:outstring+='%10.8f '%(s)
outstring+='%10.8f'%(sigma)
if out=="":
print(outstring)
else:
out.write(outstring+'\n')
linecnt,firstline,k15=0,1,[]
if ofile!="":print ('Output saved in ',ofile) | NAME
k15_s.py
DESCRIPTION
converts .k15 format data to .s format.
assumes Jelinek Kappabridge measurement scheme
SYNTAX
k15_s.py [-h][-i][command line options][<filename]
OPTIONS
-h prints help message and quits
-i allows interactive entry of options
-f FILE, specifies input file, default: standard input
-F FILE, specifies output file, default: standard output
-crd [g, t] specifies [g]eographic rotation,
or geographic AND tectonic rotation
INPUT
name [az,pl,strike,dip], followed by
3 rows of 5 measurements for each specimen
OUTPUT
least squares matrix elements and sigma:
x11,x22,x33,x12,x23,x13,sigma |
def contains_some_of(self, elements):
"""
Ensures :attr:`subject` contains at least one of *elements*, which must be an iterable.
"""
if all(e not in self._subject for e in elements):
raise self._error_factory(_format("Expected {} to have some of {}", self._subject, elements))
return ChainInspector(self._subject) | Ensures :attr:`subject` contains at least one of *elements*, which must be an iterable. |
def random_state(state=None):
"""
Helper function for processing random_state arguments.
Parameters
----------
state : int, np.random.RandomState, None.
If receives an int, passes to np.random.RandomState() as seed.
If receives an np.random.RandomState object, just returns object.
If receives `None`, returns np.random.
If receives anything else, raises an informative ValueError.
Default None.
Returns
-------
np.random.RandomState
"""
if is_integer(state):
return np.random.RandomState(state)
elif isinstance(state, np.random.RandomState):
return state
elif state is None:
return np.random
else:
raise ValueError("random_state must be an integer, a numpy "
"RandomState, or None") | Helper function for processing random_state arguments.
Parameters
----------
state : int, np.random.RandomState, None.
If receives an int, passes to np.random.RandomState() as seed.
If receives an np.random.RandomState object, just returns object.
If receives `None`, returns np.random.
If receives anything else, raises an informative ValueError.
Default None.
Returns
-------
np.random.RandomState |
def write_sampler_metadata(self, sampler):
"""Writes the sampler's metadata."""
self.attrs['sampler'] = sampler.name
self[self.sampler_group].attrs['nwalkers'] = sampler.nwalkers
# write the model's metadata
sampler.model.write_metadata(self) | Writes the sampler's metadata. |
def get_dist(dist):
"""Return a distribution object from scipy.stats.
"""
from scipy import stats
dc = getattr(stats, dist, None)
if dc is None:
e = "Statistical distribution `{}` is not in scipy.stats.".format(dist)
raise ValueError(e)
return dc | Return a distribution object from scipy.stats. |
def _get_prefixes(self, metric_type):
"""Get prefixes where applicable
Add metric prefix counters, timers respectively if
:attr:`prepend_metric_type` flag is True.
:param str metric_type: The metric type
:rtype: list
"""
prefixes = []
if self._prepend_metric_type:
prefixes.append(self.METRIC_TYPES[metric_type])
return prefixes | Get prefixes where applicable
Add metric prefix counters, timers respectively if
:attr:`prepend_metric_type` flag is True.
:param str metric_type: The metric type
:rtype: list |
def cleanTempDirs(job):
"""Remove temporarly created directories."""
if job is CWLJob and job._succeeded: # Only CWLJobs have this attribute.
for tempDir in job.openTempDirs:
if os.path.exists(tempDir):
shutil.rmtree(tempDir)
job.openTempDirs = [] | Remove temporarly created directories. |
def drop_if_exists(self, table):
"""
Drop a table from the schema.
:param table: The table
:type table: str
"""
blueprint = self._create_blueprint(table)
blueprint.drop_if_exists()
self._build(blueprint) | Drop a table from the schema.
:param table: The table
:type table: str |
def get_events(fd, timeout=None):
"""get_events(fd[, timeout])
Return a list of InotifyEvent instances representing events read from
inotify. If timeout is None, this will block forever until at least one
event can be read. Otherwise, timeout should be an integer or float
specifying a timeout in seconds. If get_events times out waiting for
events, an empty list will be returned. If timeout is zero, get_events
will not block.
This version of get_events() will only block the current greenlet.
"""
(rlist, _, _) = select([fd], [], [], timeout)
if not rlist:
return []
events = []
while True:
buf = os.read(fd, _BUF_LEN)
i = 0
while i < len(buf):
(wd, mask, cookie, len_) = struct.unpack_from(_EVENT_FMT, buf, i)
name = None
if len_ > 0:
start = i + _EVENT_SIZE
end = start + len_
# remove \0 terminator and padding
name = buf[start:end].rstrip(b'\0').decode(ENCODING)
events.append(InotifyEvent(wd, mask, cookie, name))
i += _EVENT_SIZE + len_
(rlist, _, _) = select([fd], [], [], 0)
if not rlist:
break
return events | get_events(fd[, timeout])
Return a list of InotifyEvent instances representing events read from
inotify. If timeout is None, this will block forever until at least one
event can be read. Otherwise, timeout should be an integer or float
specifying a timeout in seconds. If get_events times out waiting for
events, an empty list will be returned. If timeout is zero, get_events
will not block.
This version of get_events() will only block the current greenlet. |
def encode_dict(dynamizer, value):
""" Encode a dict for the DynamoDB format """
encoded_dict = {}
for k, v in six.iteritems(value):
encoded_type, encoded_value = dynamizer.raw_encode(v)
encoded_dict[k] = {
encoded_type: encoded_value,
}
return 'M', encoded_dict | Encode a dict for the DynamoDB format |
def _count_spaces_startswith(line):
'''
Count the number of spaces before the first character
'''
if line.split('#')[0].strip() == "":
return None
spaces = 0
for i in line:
if i.isspace():
spaces += 1
else:
return spaces | Count the number of spaces before the first character |
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object): pass
return cls.__mro__[1:]
return cls.__mro__ | Get an mro for a type or classic class |
def plot(self, joints, ax, target=None, show=False):
"""Plots the Chain using Matplotlib
Parameters
----------
joints: list
The list of the positions of each joint
ax: matplotlib.axes.Axes
A matplotlib axes
target: numpy.array
An optional target
show: bool
Display the axe. Defaults to False
"""
from . import plot_utils
if ax is None:
# If ax is not given, create one
ax = plot_utils.init_3d_figure()
plot_utils.plot_chain(self, joints, ax)
plot_utils.plot_basis(ax, self._length)
# Plot the goal position
if target is not None:
plot_utils.plot_target(target, ax)
if show:
plot_utils.show_figure() | Plots the Chain using Matplotlib
Parameters
----------
joints: list
The list of the positions of each joint
ax: matplotlib.axes.Axes
A matplotlib axes
target: numpy.array
An optional target
show: bool
Display the axe. Defaults to False |
def img_from_vgg(x):
'''Decondition an image from the VGG16 model.'''
x = x.transpose((1, 2, 0))
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
x = x[:,:,::-1] # to RGB
return x | Decondition an image from the VGG16 model. |
def _parse_byte_data(self, byte_data):
"""Extract the values from byte string."""
chunks = unpack('<iiiii', byte_data[:self.size])
det_id, run, time_slice, time_stamp, ticks = chunks
self.det_id = det_id
self.run = run
self.time_slice = time_slice
self.time_stamp = time_stamp
self.ticks = ticks | Extract the values from byte string. |
def is_contained_in(pe_pe, root):
'''
Determine if a PE_PE is contained within a EP_PKG or a C_C.
'''
if not pe_pe:
return False
if type(pe_pe).__name__ != 'PE_PE':
pe_pe = one(pe_pe).PE_PE[8001]()
ep_pkg = one(pe_pe).EP_PKG[8000]()
c_c = one(pe_pe).C_C[8003]()
if root in [ep_pkg, c_c]:
return True
elif is_contained_in(ep_pkg, root):
return True
elif is_contained_in(c_c, root):
return True
else:
return False | Determine if a PE_PE is contained within a EP_PKG or a C_C. |
def _get_signed_predecessors(im, node, polarity):
"""Get upstream nodes in the influence map.
Return the upstream nodes along with the overall polarity of the path
to that node by account for the polarity of the path to the given node
and the polarity of the edge between the given node and its immediate
predecessors.
Parameters
----------
im : networkx.MultiDiGraph
Graph containing the influence map.
node : str
The node (rule name) in the influence map to get predecessors (upstream
nodes) for.
polarity : int
Polarity of the overall path to the given node.
Returns
-------
generator of tuples, (node, polarity)
Each tuple returned contains two elements, a node (string) and the
polarity of the overall path (int) to that node.
"""
signed_pred_list = []
for pred in im.predecessors(node):
pred_edge = (pred, node)
yield (pred, _get_edge_sign(im, pred_edge) * polarity) | Get upstream nodes in the influence map.
Return the upstream nodes along with the overall polarity of the path
to that node by account for the polarity of the path to the given node
and the polarity of the edge between the given node and its immediate
predecessors.
Parameters
----------
im : networkx.MultiDiGraph
Graph containing the influence map.
node : str
The node (rule name) in the influence map to get predecessors (upstream
nodes) for.
polarity : int
Polarity of the overall path to the given node.
Returns
-------
generator of tuples, (node, polarity)
Each tuple returned contains two elements, a node (string) and the
polarity of the overall path (int) to that node. |
def zoneToRegion(zone):
"""Get a region (e.g. us-west-2) from a zone (e.g. us-west-1c)."""
from toil.lib.context import Context
return Context.availability_zone_re.match(zone).group(1) | Get a region (e.g. us-west-2) from a zone (e.g. us-west-1c). |
def add_user_to_group(user_name, group_name, region=None, key=None, keyid=None,
profile=None):
'''
Add user to group.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_iam.add_user_to_group myuser mygroup
'''
user = get_user(user_name, region, key, keyid, profile)
if not user:
log.error('Username : %s does not exist.', user_name)
return False
if user_exists_in_group(user_name, group_name, region=region, key=key,
keyid=keyid, profile=profile):
return True
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
info = conn.add_user_to_group(group_name, user_name)
if not info:
return False
return info
except boto.exception.BotoServerError as e:
log.debug(e)
log.error('Failed to add IAM user %s to group %s.', user_name, group_name)
return False | Add user to group.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion boto_iam.add_user_to_group myuser mygroup |
def maxsize(self, size):
"""Resize the cache, evicting the oldest items if necessary."""
if size < 0:
raise ValueError('maxsize must be non-negative')
with self._lock:
self._enforce_size_limit(size)
self._maxsize = size | Resize the cache, evicting the oldest items if necessary. |
def time_report(self, include_overhead=False, header=None,
include_server=True, digits=4):
"""
Returns a str table of the times for this api call
:param include_overhead: bool if True include information from
overhead, such as the time for this class code
:param header: bool if True includes the column header
:param include_server: bool if True includes times reported by the
server in the header
:param digits: int of the number of significant digits
:return: str table of the times for the api call
"""
try:
self._timestamps.setdefault('report', time.time())
header = header or ['Message', 'Start', 'End', 'Sum', 'Count']
ret = []
if include_overhead:
ret.append(self.time_report_item(
'create', 'Overhead from api call "Create"'))
ret.append(self.time_report_item(
'setup', 'Overhead from api call "Setup"'))
if 'repeat' in self._timestamps:
ret.append(self.time_report_item(
'repeat', 'First Request of the api call'))
ret.append(self.time_report_item('send', 'Send the api call'))
if self._server_timing and include_server:
send_start = ret[-1]['Start']
delta = max(0, ret[-1]['Sum'] - (
self._server_timing['End'] - self._server_timing['Start']))
if include_overhead:
ret.append(
{'Message': "Internet overhead", "Start": send_start,
"End": ret[0]['Start'] + delta, 'Sum': delta,
'Count': 1})
if 'Overhead' in self._server_timing:
ret.append({'Message': "Time profile overhead",
"Start": send_start + delta,
'Sum': self._server_timing['Overhead'],
'End': send_start + delta +
self._server_timing['End'] -
self._server_timing['Start'],
'Count': sum(
[len(msg.get('Times', [])) for msg in
self._server_timing[
'Messages']]) + 1})
for msg in self._server_timing['Messages']:
ret.append(msg.copy())
ret[-1]['Start'] = ret[-1].setdefault(
'Start', 0) + delta + send_start
ret[-1]['End'] = ret[-1].setdefault(
'End', ret[-1]['Sum']) + delta + send_start
else:
ret += self._server_timing['Messages']
if 'stream' in self._timestamps:
ret.append(
self.time_report_item('stream', 'Streaming the api call'))
if include_overhead:
ret.append(self.time_report_item(
'receive', 'Overhead from api call "Post Processing"'))
return 'Total Time: %s \t\tStart Time: %s\n%s' % (
round(self.timedelta.total_seconds(), digits),
reformat_date(self._timestamps.get('send', '')),
str(ReprListDict(ret, col_names=header,
digits=digits).list_of_list()))
except Exception as ex:
return "Exception creating time report with %s" % ex.message | Returns a str table of the times for this api call
:param include_overhead: bool if True include information from
overhead, such as the time for this class code
:param header: bool if True includes the column header
:param include_server: bool if True includes times reported by the
server in the header
:param digits: int of the number of significant digits
:return: str table of the times for the api call |
def get_byte(self, i):
"""Get byte."""
value = []
for x in range(2):
c = next(i)
if c.lower() in _HEX:
value.append(c)
else: # pragma: no cover
raise SyntaxError('Invalid byte character at %d!' % (i.index - 1))
return ''.join(value) | Get byte. |
def pi_zoom_origin(self, viewer, event, msg=True):
"""Like pi_zoom(), but pans the image as well to keep the
coordinate under the cursor in that same position relative
to the window.
"""
origin = (event.data_x, event.data_y)
return self._pinch_zoom_rotate(viewer, event.state, event.rot_deg,
event.scale, msg=msg, origin=origin) | Like pi_zoom(), but pans the image as well to keep the
coordinate under the cursor in that same position relative
to the window. |
def get_netloc(self):
"""Determine scheme, host and port for this connection taking
proxy data into account.
@return: tuple (scheme, host, port)
@rtype: tuple(string, string, int)
"""
if self.proxy:
scheme = self.proxytype
host = self.proxyhost
port = self.proxyport
else:
scheme = self.scheme
host = self.host
port = self.port
return (scheme, host, port) | Determine scheme, host and port for this connection taking
proxy data into account.
@return: tuple (scheme, host, port)
@rtype: tuple(string, string, int) |
def deserialize(self, value, **kwargs):
"""Deserialization of value.
:return: Deserialized value.
:raises: :class:`halogen.exception.ValidationError` exception if value is not valid.
"""
for validator in self.validators:
validator.validate(value, **kwargs)
return value | Deserialization of value.
:return: Deserialized value.
:raises: :class:`halogen.exception.ValidationError` exception if value is not valid. |
def _reciprocal_condition_number(lu_mat, one_norm):
r"""Compute reciprocal condition number of a matrix.
Args:
lu_mat (numpy.ndarray): A 2D array of a matrix :math:`A` that has been
LU-factored, with the non-diagonal part of :math:`L` stored in the
strictly lower triangle and :math:`U` stored in the upper triangle.
one_norm (float): The 1-norm of the original matrix :math:`A`.
Returns:
float: The reciprocal condition number of :math:`A`.
Raises:
OSError: If SciPy is not installed.
RuntimeError: If the reciprocal 1-norm condition number could not
be computed.
"""
if _scipy_lapack is None:
raise OSError("This function requires SciPy for calling into LAPACK.")
# pylint: disable=no-member
rcond, info = _scipy_lapack.dgecon(lu_mat, one_norm)
# pylint: enable=no-member
if info != 0:
raise RuntimeError(
"The reciprocal 1-norm condition number could not be computed."
)
return rcond | r"""Compute reciprocal condition number of a matrix.
Args:
lu_mat (numpy.ndarray): A 2D array of a matrix :math:`A` that has been
LU-factored, with the non-diagonal part of :math:`L` stored in the
strictly lower triangle and :math:`U` stored in the upper triangle.
one_norm (float): The 1-norm of the original matrix :math:`A`.
Returns:
float: The reciprocal condition number of :math:`A`.
Raises:
OSError: If SciPy is not installed.
RuntimeError: If the reciprocal 1-norm condition number could not
be computed. |
def set_user_attribute(self, user_name, key, value):
"""Sets a user attribute
:param user_name: name of user to modify
:param key: key of the attribute to set
:param value: value to set
:returns: True if the operation succeeded, False otherwise
:raises: HTTPResponseError in case an HTTP error status was returned
"""
res = self._make_ocs_request(
'PUT',
self.OCS_SERVICE_CLOUD,
'users/' + parse.quote(user_name),
data={'key': self._encode_string(key),
'value': self._encode_string(value)}
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree, [100])
return True
raise HTTPResponseError(res) | Sets a user attribute
:param user_name: name of user to modify
:param key: key of the attribute to set
:param value: value to set
:returns: True if the operation succeeded, False otherwise
:raises: HTTPResponseError in case an HTTP error status was returned |
def get_content(self, obj):
"""
Obtain the QuerySet of content items.
:param obj: Page object.
:return: List of rendered content items.
"""
serializer = ContentSerializer(
instance=obj.contentitem_set.all(),
many=True,
context=self.context,
)
return serializer.data | Obtain the QuerySet of content items.
:param obj: Page object.
:return: List of rendered content items. |
def p_elseif_list(p):
'''elseif_list : empty
| elseif_list ELSEIF LPAREN expr RPAREN statement'''
if len(p) == 2:
p[0] = []
else:
p[0] = p[1] + [ast.ElseIf(p[4], p[6], lineno=p.lineno(2))] | elseif_list : empty
| elseif_list ELSEIF LPAREN expr RPAREN statement |
Subsets and Splits