text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _post(url, headers={}, data=None, files=None):
"""Tries to POST data to an endpoint"""
try:
response = requests.post(url, headers=headers, data=data, files=files, verify=VERIFY_SSL)
return _process_response(response)
except requests.exceptions.RequestException as e:
_log_and_raise_exception('Error connecting with foursquare API', e) | 0.005348 |
def strip_code(self, normalize=True, collapse=True,
keep_template_params=False):
"""Return a rendered string without unprintable code such as templates.
The way a node is stripped is handled by the
:meth:`~.Node.__strip__` method of :class:`.Node` objects, which
generally return a subset of their nodes or ``None``. For example,
templates and tags are removed completely, links are stripped to just
their display part, headings are stripped to just their title.
If *normalize* is ``True``, various things may be done to strip code
further, such as converting HTML entities like ``Σ``, ``Σ``,
and ``Σ`` to ``Σ``. If *collapse* is ``True``, we will try to
remove excess whitespace as well (three or more newlines are converted
to two, for example). If *keep_template_params* is ``True``, then
template parameters will be preserved in the output (normally, they are
removed completely).
"""
kwargs = {
"normalize": normalize,
"collapse": collapse,
"keep_template_params": keep_template_params
}
nodes = []
for node in self.nodes:
stripped = node.__strip__(**kwargs)
if stripped:
nodes.append(str(stripped))
if collapse:
stripped = "".join(nodes).strip("\n")
while "\n\n\n" in stripped:
stripped = stripped.replace("\n\n\n", "\n\n")
return stripped
else:
return "".join(nodes) | 0.00186 |
def search(self, lat_range=None, long_range=None, variance=None,
bssid=None, ssid=None,
last_update=None,
address=None, state=None, zipcode=None,
on_new_page=None, max_results=100):
"""
Search the Wigle wifi database for matching entries. The following
criteria are supported:
Args:
lat_range ((float, float)): latitude range
long_range ((float, float)): longitude range
variance (float): radius tolerance in degrees
bssid (str): BSSID/MAC of AP
ssid (str): SSID of network
last_update (datetime): when was the AP last seen
address (str): location, address
state (str): location, state
zipcode (str): location, zip code
on_new_page (func(int)): callback to notify when requesting new
page of results
Returns:
[dict]: list of dicts describing matching wifis
"""
params = {
'latrange1': lat_range[0] if lat_range else "",
'latrange2': lat_range[1] if lat_range else "",
'longrange1': long_range[0] if long_range else "",
'longrange2': long_range[1] if long_range else "",
'variance': str(variance) if variance else "0.01",
'netid': bssid or "",
'ssid': ssid or "",
'lastupdt': last_update.strftime("%Y%m%d%H%M%S") if last_update else "",
'addresscode': address or "",
'statecode': state or "",
'zipcode': zipcode or "",
'Query': "Query",
}
wifis = []
while True:
if on_new_page:
on_new_page(params.get('first', 1))
resp = self._authenticated_request('jsonSearch', params=params)
data = resp.json()
if not data['success']:
raise_wigle_error(data)
for result in data['results'][:max_results-len(wifis)]:
normalise_entry(result)
wifis.append(result)
if data['resultCount'] < WIGLE_PAGESIZE or len(wifis) >= max_results:
break
params['first'] = data['last'] + 1
return wifis | 0.003509 |
def getcodeobj(consts, intcode, newcodeobj, oldcodeobj):
"""Get code object from decompiled code.
:param list consts: constants to add in the result.
:param list intcode: list of byte code to use.
:param newcodeobj: new code object with empty body.
:param oldcodeobj: old code object.
:return: new code object to produce."""
# get code string
if PY3:
codestr = bytes(intcode)
else:
codestr = reduce(lambda x, y: x + y, (chr(b) for b in intcode))
# get vargs
vargs = [
newcodeobj.co_argcount, newcodeobj.co_nlocals, newcodeobj.co_stacksize,
newcodeobj.co_flags, codestr, tuple(consts), newcodeobj.co_names,
newcodeobj.co_varnames, newcodeobj.co_filename, newcodeobj.co_name,
newcodeobj.co_firstlineno, newcodeobj.co_lnotab,
oldcodeobj.co_freevars, newcodeobj.co_cellvars
]
if PY3:
vargs.insert(1, newcodeobj.co_kwonlyargcount)
# instanciate a new newcodeobj object
result = type(newcodeobj)(*vargs)
return result | 0.000957 |
def write(self, data, mode='w'):
"""
Write data to the file.
`data` is the data to write
`mode` is the mode argument to pass to `open()`
"""
with open(self.path, mode) as f:
f.write(data) | 0.008065 |
def search(self, **kwargs):
"""
Method to search vlan's based on extends search.
:param search: Dict containing QuerySets to find vlan's.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing vlan's
"""
return super(ApiVlan, self).get(self.prepare_url('api/v3/vlan/',
kwargs)) | 0.004525 |
def _set_ldp_hello_timeout_basic(self, v, load=False):
"""
Setter method for ldp_hello_timeout_basic, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/ldp/ldp_holder/ldp_hello_timeout_basic (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_ldp_hello_timeout_basic is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ldp_hello_timeout_basic() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'2..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(15), is_leaf=True, yang_name="ldp-hello-timeout-basic", rest_name="hello-timeout-link", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'In seconds (2-65535, default 15)', u'cli-full-no': None, u'alt-name': u'hello-timeout-link'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ldp_hello_timeout_basic must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'2..65535']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(15), is_leaf=True, yang_name="ldp-hello-timeout-basic", rest_name="hello-timeout-link", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'In seconds (2-65535, default 15)', u'cli-full-no': None, u'alt-name': u'hello-timeout-link'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""",
})
self.__ldp_hello_timeout_basic = t
if hasattr(self, '_set'):
self._set() | 0.004559 |
def rl_get_point() -> int: # pragma: no cover
"""
Returns the offset of the current cursor position in rl_line_buffer
"""
if rl_type == RlType.GNU:
return ctypes.c_int.in_dll(readline_lib, "rl_point").value
elif rl_type == RlType.PYREADLINE:
return readline.rl.mode.l_buffer.point
else:
return 0 | 0.00289 |
def query_bypass(self, query, raw_output=True):
''' Bypass query meaning that field check and validation is skipped, then query object directly executed by pymongo.
:param raw_output: Skip OmMongo ORM layer (default: True)
'''
if not isinstance(query, dict):
raise BadQueryException('Query must be dict.')
self.__query = query
if raw_output:
self._raw_output = True
return self.__get_query_result().cursor
else:
return self | 0.007394 |
def __register_class(self, parsed_config):
"""Register the class implementing this config, so we only add it once.
Args:
parsed_config: The JSON object with the API configuration being added.
Raises:
ApiConfigurationError: If the class has already been registered.
"""
methods = parsed_config.get('methods')
if not methods:
return
# Determine the name of the class that implements this configuration.
service_classes = set()
for method in methods.itervalues():
rosy_method = method.get('rosyMethod')
if rosy_method and '.' in rosy_method:
method_class = rosy_method.split('.', 1)[0]
service_classes.add(method_class)
for service_class in service_classes:
if service_class in self.__registered_classes:
raise api_exceptions.ApiConfigurationError(
'API class %s has already been registered.' % service_class)
self.__registered_classes.add(service_class) | 0.006166 |
def safe_unit_norm(a):
"""
Ensure that the vector or vectors have unit norm
"""
if 1 == len(a.shape):
n = np.linalg.norm(a)
if n:
return a / n
return a
norm = np.sum(np.abs(a) ** 2, axis=-1) ** (1. / 2)
# Dividing by a norm of zero will cause a warning to be issued. Set those
# values to another number. It doesn't matter what, since we'll be dividing
# a vector of zeros by the number, and 0 / N always equals 0.
norm[norm == 0] = -1e12
return a / norm[:, np.newaxis] | 0.001828 |
def get_access_string(self, shape, table):
"""Returns a string, with which the selection can be accessed
Parameters
----------
shape: 3-tuple of Integer
\tShape of grid, for which the generated keys are valid
table: Integer
\tThird component of all returned keys. Must be in dimensions
"""
rows, columns, tables = shape
# Negative dimensions cannot be
assert all(dim > 0 for dim in shape)
# Current table has to be in dimensions
assert 0 <= table < tables
string_list = []
# Block selections
templ = "[(r, c, {}) for r in xrange({}, {}) for c in xrange({}, {})]"
for (top, left), (bottom, right) in izip(self.block_tl, self.block_br):
string_list += [templ.format(table, top, bottom + 1,
left, right + 1)]
# Fully selected rows
template = "[({}, c, {}) for c in xrange({})]"
for row in self.rows:
string_list += [template.format(row, table, columns)]
# Fully selected columns
template = "[(r, {}, {}) for r in xrange({})]"
for column in self.cols:
string_list += [template.format(column, table, rows)]
# Single cells
for row, column in self.cells:
string_list += [repr([(row, column, table)])]
key_string = " + ".join(string_list)
if len(string_list) == 0:
return ""
elif len(self.cells) == 1 and len(string_list) == 1:
return "S[{}]".format(string_list[0][1:-1])
else:
template = "[S[key] for key in {} if S[key] is not None]"
return template.format(key_string) | 0.001146 |
def get_playlists(self, offset=0, limit=50):
""" Get user's playlists. """
response = self.client.get(
self.client.USER_PLAYLISTS % (self.name, offset, limit))
return self._parse_response(response, splaylist)
return playlists | 0.007407 |
def parse_soap_enveloped_saml_thingy(text, expected_tags):
"""Parses a SOAP enveloped SAML thing and returns the thing as
a string.
:param text: The SOAP object as XML string
:param expected_tags: What the tag of the SAML thingy is expected to be.
:return: SAML thingy as a string
"""
envelope = defusedxml.ElementTree.fromstring(text)
# Make sure it's a SOAP message
assert envelope.tag == '{%s}Envelope' % soapenv.NAMESPACE
assert len(envelope) >= 1
body = None
for part in envelope:
if part.tag == '{%s}Body' % soapenv.NAMESPACE:
assert len(part) == 1
body = part
break
if body is None:
return ""
saml_part = body[0]
if saml_part.tag in expected_tags:
return ElementTree.tostring(saml_part, encoding="UTF-8")
else:
raise WrongMessageType("Was '%s' expected one of %s" % (saml_part.tag,
expected_tags)) | 0.000995 |
def has_dag_access(**dag_kwargs):
"""
Decorator to check whether the user has read / write permission on the dag.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
has_access = self.appbuilder.sm.has_access
dag_id = request.args.get('dag_id')
# if it is false, we need to check whether user has write access on the dag
can_dag_edit = dag_kwargs.get('can_dag_edit', False)
# 1. check whether the user has can_dag_edit permissions on all_dags
# 2. if 1 false, check whether the user
# has can_dag_edit permissions on the dag
# 3. if 2 false, check whether it is can_dag_read view,
# and whether user has the permissions
if (
has_access('can_dag_edit', 'all_dags') or
has_access('can_dag_edit', dag_id) or (not can_dag_edit and
(has_access('can_dag_read',
'all_dags') or
has_access('can_dag_read',
dag_id)))):
return f(self, *args, **kwargs)
else:
flash("Access is Denied", "danger")
return redirect(url_for(self.appbuilder.sm.auth_view.
__class__.__name__ + ".login"))
return wrapper
return decorator | 0.003812 |
def get_instance(self, payload):
"""
Build an instance of IpAccessControlListInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.trunking.v1.trunk.ip_access_control_list.IpAccessControlListInstance
:rtype: twilio.rest.trunking.v1.trunk.ip_access_control_list.IpAccessControlListInstance
"""
return IpAccessControlListInstance(self._version, payload, trunk_sid=self._solution['trunk_sid'], ) | 0.01046 |
def get_jwt_decrypt_keys(self, jwt, **kwargs):
"""
Get decryption keys from this keyjar based on information carried
in a JWE. These keys should be usable to decrypt an encrypted JWT.
:param jwt: A cryptojwt.jwt.JWT instance
:param kwargs: Other key word arguments
:return: list of usable keys
"""
try:
_key_type = jwe_alg2keytype(jwt.headers['alg'])
except KeyError:
_key_type = ''
try:
_kid = jwt.headers['kid']
except KeyError:
logger.info('Missing kid')
_kid = ''
keys = self.get(key_use='enc', owner='', key_type=_key_type)
try:
_aud = kwargs['aud']
except KeyError:
_aud = ''
if _aud:
try:
allow_missing_kid = kwargs['allow_missing_kid']
except KeyError:
allow_missing_kid = False
try:
nki = kwargs['no_kid_issuer']
except KeyError:
nki = {}
keys = self._add_key(keys, _aud, 'enc', _key_type, _kid, nki,
allow_missing_kid)
# Only want the appropriate keys.
keys = [k for k in keys if k.appropriate_for('decrypt')]
return keys | 0.001509 |
def load_from_file(self, path):
"""
Load cookies from the file.
Content of file should be a JSON-serialized list of dicts.
"""
with open(path) as inf:
data = inf.read()
if data:
items = json.loads(data)
else:
items = {}
for item in items:
extra = dict((x, y) for x, y in item.items()
if x not in ['name', 'value', 'domain'])
self.set(item['name'], item['value'], item['domain'], **extra) | 0.00361 |
def media(soup):
"""
All media tags and some associated data about the related component doi
and the parent of that doi (not always present)
"""
media = []
media_tags = raw_parser.media(soup)
position = 1
for tag in media_tags:
media_item = {}
copy_attribute(tag.attrs, 'mime-subtype', media_item)
copy_attribute(tag.attrs, 'mimetype', media_item)
copy_attribute(tag.attrs, 'xlink:href', media_item, 'xlink_href')
copy_attribute(tag.attrs, 'content-type', media_item)
nodenames = ["sub-article", "media", "fig-group", "fig", "supplementary-material"]
details = tag_details(tag, nodenames)
copy_attribute(details, 'component_doi', media_item)
copy_attribute(details, 'type', media_item)
copy_attribute(details, 'sibling_ordinal', media_item)
# Try to get the component DOI of the parent tag
parent_tag = first_parent(tag, nodenames)
if parent_tag:
acting_parent_tag = component_acting_parent_tag(parent_tag, tag)
if acting_parent_tag:
details = tag_details(acting_parent_tag, nodenames)
copy_attribute(details, 'type', media_item, 'parent_type')
copy_attribute(details, 'ordinal', media_item, 'parent_ordinal')
copy_attribute(details, 'asset', media_item, 'parent_asset')
copy_attribute(details, 'sibling_ordinal', media_item, 'parent_sibling_ordinal')
copy_attribute(details, 'component_doi', media_item, 'parent_component_doi')
# Try to get the parent parent
p_parent_tag = first_parent(parent_tag, nodenames)
if p_parent_tag:
acting_p_parent_tag = component_acting_parent_tag(p_parent_tag, parent_tag)
if acting_p_parent_tag:
details = tag_details(acting_p_parent_tag, nodenames)
copy_attribute(details, 'type', media_item, 'p_parent_type')
copy_attribute(details, 'ordinal', media_item, 'p_parent_ordinal')
copy_attribute(details, 'asset', media_item, 'p_parent_asset')
copy_attribute(details, 'sibling_ordinal', media_item, 'p_parent_sibling_ordinal')
copy_attribute(details, 'component_doi', media_item, 'p_parent_component_doi')
# Try to get the parent parent parent
p_p_parent_tag = first_parent(p_parent_tag, nodenames)
if p_p_parent_tag:
acting_p_p_parent_tag = component_acting_parent_tag(p_p_parent_tag, p_parent_tag)
if acting_p_p_parent_tag:
details = tag_details(acting_p_p_parent_tag, nodenames)
copy_attribute(details, 'type', media_item, 'p_p_parent_type')
copy_attribute(details, 'ordinal', media_item, 'p_p_parent_ordinal')
copy_attribute(details, 'asset', media_item, 'p_p_parent_asset')
copy_attribute(details, 'sibling_ordinal', media_item, 'p_p_parent_sibling_ordinal')
copy_attribute(details, 'component_doi', media_item, 'p_p_parent_component_doi')
# Increment the position
media_item['position'] = position
# Ordinal should be the same as position in this case but set it anyway
media_item['ordinal'] = tag_ordinal(tag)
media.append(media_item)
position += 1
return media | 0.004805 |
def auth_in_stage2(self,stanza):
"""Handle the second stage (<iq type='set'/>) of legacy ("plain" or
"digest") authentication.
[server only]"""
self.lock.acquire()
try:
if "plain" not in self.auth_methods and "digest" not in self.auth_methods:
iq=stanza.make_error_response("not-allowed")
self.send(iq)
return
username=stanza.xpath_eval("a:query/a:username",{"a":"jabber:iq:auth"})
if username:
username=from_utf8(username[0].getContent())
resource=stanza.xpath_eval("a:query/a:resource",{"a":"jabber:iq:auth"})
if resource:
resource=from_utf8(resource[0].getContent())
if not username or not resource:
self.__logger.debug("No username or resource found in auth request")
iq=stanza.make_error_response("bad-request")
self.send(iq)
return
if stanza.xpath_eval("a:query/a:password",{"a":"jabber:iq:auth"}):
if "plain" not in self.auth_methods:
iq=stanza.make_error_response("not-allowed")
self.send(iq)
return
else:
return self._plain_auth_in_stage2(username,resource,stanza)
if stanza.xpath_eval("a:query/a:digest",{"a":"jabber:iq:auth"}):
if "plain" not in self.auth_methods:
iq=stanza.make_error_response("not-allowed")
self.send(iq)
return
else:
return self._digest_auth_in_stage2(username,resource,stanza)
finally:
self.lock.release() | 0.015873 |
def cleanup(self):
"""
Cleans up references to the plot after the plot has been
deleted. Traverses through all plots cleaning up Callbacks and
Stream subscribers.
"""
plots = self.traverse(lambda x: x, [BokehPlot])
for plot in plots:
if not isinstance(plot, (GenericCompositePlot, GenericElementPlot, GenericOverlayPlot)):
continue
streams = list(plot.streams)
plot.streams = []
plot._document = None
if plot.subplots:
plot.subplots.clear()
if isinstance(plot, GenericElementPlot):
for callback in plot.callbacks:
streams += callback.streams
callback.cleanup()
for stream in set(streams):
stream._subscribers = [
(p, subscriber) for p, subscriber in stream._subscribers
if get_method_owner(subscriber) not in plots
]
if self.comm and self.root is self.handles.get('plot'):
self.comm.close() | 0.002693 |
def create(cls, name, ipv4_network=None, ipv6_network=None,
comment=None):
"""
Create the network element
:param str name: Name of element
:param str ipv4_network: network cidr (optional if ipv6)
:param str ipv6_network: network cidr (optional if ipv4)
:param str comment: comment (optional)
:raises CreateElementFailed: element creation failed with reason
:return: instance with meta
:rtype: Network
.. note:: Either an ipv4_network or ipv6_network must be specified
"""
ipv4_network = ipv4_network if ipv4_network else None
ipv6_network = ipv6_network if ipv6_network else None
json = {'name': name,
'ipv4_network': ipv4_network,
'ipv6_network': ipv6_network,
'comment': comment}
return ElementCreator(cls, json) | 0.004405 |
def iterparse_elements(element_function, file_or_path, **kwargs):
"""
Applies element_function to each of the sub-elements in the XML file.
The passed in function must take at least one element, and an optional
list of **kwarg which are relevant to each of the elements in the list:
def elem_func(each_elem, **kwargs)
Implements the recommended cElementTree iterparse pattern, which is
efficient for reading in a file, making changes and writing it again.
"""
if not hasattr(element_function, '__call__'):
return
file_path = getattr(file_or_path, 'name', file_or_path)
context = iter(iterparse(file_path, events=('start', 'end')))
root = None # Capture root for Memory management
# Start event loads child; by the End event it's ready for processing
for event, child in context:
if root is None:
root = child
if event == 'end': # Ensures the element has been fully read
element_function(child, **kwargs)
root.clear() | 0.000959 |
def create_roles(apps, schema_editor):
"""Create the enterprise roles if they do not already exist."""
EnterpriseFeatureRole = apps.get_model('enterprise', 'EnterpriseFeatureRole')
EnterpriseFeatureRole.objects.update_or_create(name=ENTERPRISE_CATALOG_ADMIN_ROLE)
EnterpriseFeatureRole.objects.update_or_create(name=ENTERPRISE_DASHBOARD_ADMIN_ROLE)
EnterpriseFeatureRole.objects.update_or_create(name=ENTERPRISE_ENROLLMENT_API_ADMIN_ROLE) | 0.010917 |
def submit_evaluation(self, variant_obj, user_obj, institute_obj, case_obj, link, criteria):
"""Submit an evaluation to the database
Get all the relevant information, build a evaluation_obj
Args:
variant_obj(dict)
user_obj(dict)
institute_obj(dict)
case_obj(dict)
link(str): variant url
criteria(list(dict)):
[
{
'term': str,
'comment': str,
'links': list(str)
},
.
.
]
"""
variant_specific = variant_obj['_id']
variant_id = variant_obj['variant_id']
user_id = user_obj['_id']
user_name = user_obj.get('name', user_obj['_id'])
institute_id = institute_obj['_id']
case_id = case_obj['_id']
evaluation_terms = [evluation_info['term'] for evluation_info in criteria]
classification = get_acmg(evaluation_terms)
evaluation_obj = build_evaluation(
variant_specific=variant_specific,
variant_id=variant_id,
user_id=user_id,
user_name=user_name,
institute_id=institute_id,
case_id=case_id,
classification=classification,
criteria=criteria
)
self._load_evaluation(evaluation_obj)
# Update the acmg classification for the variant:
self.update_acmg(institute_obj, case_obj, user_obj, link, variant_obj, classification)
return classification | 0.003209 |
def _get_retention_policy_value(self):
"""
Sets the deletion policy on this resource. The default is 'Retain'.
:return: value for the DeletionPolicy attribute.
"""
if self.RetentionPolicy is None or self.RetentionPolicy.lower() == self.RETAIN.lower():
return self.RETAIN
elif self.RetentionPolicy.lower() == self.DELETE.lower():
return self.DELETE
elif self.RetentionPolicy.lower() not in self.retention_policy_options:
raise InvalidResourceException(self.logical_id,
"'{}' must be one of the following options: {}."
.format('RetentionPolicy', [self.RETAIN, self.DELETE])) | 0.006649 |
def isdir(self, path):
"""Return true if the path refers to an existing directory.
Parameters
----------
path : str
Path of directory on the remote side to check.
"""
result = True
try:
self.sftp_client.lstat(path)
except FileNotFoundError:
result = False
return result | 0.005277 |
def esrchc(value, array):
"""
Search for a given value within a character string array.
Return the index of the first equivalent array entry, or -1
if no equivalent element is found.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/esrchc_c.html
:param value: Key value to be found in array.
:type value: str
:param array: Character string array to search.
:type array: list of str.
:return:
The index of the first array entry equivalent to value,
or -1 if none is found.
:rtype: int
"""
value = stypes.stringToCharP(value)
ndim = ctypes.c_int(len(array))
lenvals = ctypes.c_int(len(max(array, key=len)) + 1)
array = stypes.listToCharArray(array, xLen=lenvals, yLen=ndim)
return libspice.esrchc_c(value, ndim, lenvals, array) | 0.001211 |
def date(name=None):
"""
Creates the grammar for a Date (D) field, accepting only numbers in a
certain pattern.
:param name: name for the field
:return: grammar for the date field
"""
if name is None:
name = 'Date Field'
# Basic field
# This regex allows values from 00000101 to 99991231
field = pp.Regex('[0-9][0-9][0-9][0-9](0[1-9]|1[0-2])'
'(0[1-9]|[1-2][0-9]|3[0-1])')
# Parse action
field.setParseAction(lambda d: datetime.datetime.strptime(d[0], '%Y%m%d')
.date())
# Name
field.setName(name)
# White spaces are not removed
field.leaveWhitespace()
return field | 0.001441 |
def setVisible( self, state ):
"""
Updates the visible state for this message box.
:param state | <bool>
"""
super(XMessageBox, self).setVisible(state)
if ( state ):
self.startTimer(100)
self.layout().setSizeConstraint(QLayout.SetNoConstraint)
self.resize( self.width() + 100, self.height() ) | 0.025063 |
def get_vnetwork_vswitches_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches")
config = get_vnetwork_vswitches
output = ET.SubElement(get_vnetwork_vswitches, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003953 |
def systemInformationType5ter():
"""SYSTEM INFORMATION TYPE 5ter Section 9.1.39"""
a = L2PseudoLength(l2pLength=0x12)
b = TpPd(pd=0x6)
c = MessageType(mesType=0x6) # 00000110
d = NeighbourCellsDescription2()
packet = a / b / c / d
return packet | 0.003663 |
def lowercase_to_camelcase(python_input, camelcase_input=None):
'''
a function to recursively convert data with lowercase key names into camelcase keys
:param camelcase_input: list or dictionary with lowercase keys
:param python_input: [optional] list or dictionary with default camelcase keys in output
:return: dictionary with camelcase key names
'''
if camelcase_input:
if python_input.__class__ != camelcase_input.__class__:
raise ValueError('camelcase_input type %s does not match python_input type %s' % (camelcase_input.__class__, python_input.__class__))
if isinstance(python_input, dict):
return _to_camelcase_dict(python_input, camelcase_input)
elif isinstance(python_input, list):
return _ingest_list(python_input, _to_camelcase_dict, camelcase_input)
else:
return python_input | 0.010033 |
def configure():
"""
Configure information about Databricks account and default behavior.
Configuration is stored in a `.apparatecfg` file. A config file must exist
before this package can be used, and can be supplied either directly as a
text file or generated using this configuration tool.
"""
config = _load_config(CFG_FILE)
_update_value(
config,
'host',
'Databricks host (e.g. https://my-organization.cloud.databricks.com)',
is_sensitive=False,
)
_update_value(
config,
'token',
'Databricks API token',
is_sensitive=True,
)
_update_value(
config,
'prod_folder',
'Databricks folder for production libraries',
is_sensitive=False,
)
with open(CFG_FILE, 'w+') as f:
config.write(f) | 0.001179 |
def list_all_versions(pkg,
bin_env=None,
include_alpha=False,
include_beta=False,
include_rc=False,
user=None,
cwd=None,
index_url=None,
extra_index_url=None):
'''
.. versionadded:: 2017.7.3
List all available versions of a pip package
pkg
The package to check
bin_env
Path to pip (or to a virtualenv). This can be used to specify the path
to the pip to use when more than one Python release is installed (e.g.
``/usr/bin/pip-2.7`` or ``/usr/bin/pip-2.6``. If a directory path is
specified, it is assumed to be a virtualenv.
include_alpha
Include alpha versions in the list
include_beta
Include beta versions in the list
include_rc
Include release candidates versions in the list
user
The user under which to run pip
cwd
Directory from which to run pip
index_url
Base URL of Python Package Index
.. versionadded:: 2019.2.0
extra_index_url
Additional URL of Python Package Index
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' pip.list_all_versions <package name>
'''
cmd = _get_pip_bin(bin_env)
cmd.extend(['install', '{0}==versions'.format(pkg)])
if index_url:
if not salt.utils.url.validate(index_url, VALID_PROTOS):
raise CommandExecutionError(
'\'{0}\' is not a valid URL'.format(index_url)
)
cmd.extend(['--index-url', index_url])
if extra_index_url:
if not salt.utils.url.validate(extra_index_url, VALID_PROTOS):
raise CommandExecutionError(
'\'{0}\' is not a valid URL'.format(extra_index_url)
)
cmd.extend(['--extra-index-url', extra_index_url])
cmd_kwargs = dict(cwd=cwd, runas=user, output_loglevel='quiet', redirect_stderr=True)
if bin_env and os.path.isdir(bin_env):
cmd_kwargs['env'] = {'VIRTUAL_ENV': bin_env}
result = __salt__['cmd.run_all'](cmd, **cmd_kwargs)
filtered = []
if not include_alpha:
filtered.append('a')
if not include_beta:
filtered.append('b')
if not include_rc:
filtered.append('rc')
if filtered:
excludes = re.compile(r'^((?!{0}).)*$'.format('|'.join(filtered)))
else:
excludes = re.compile(r'')
versions = []
for line in result['stdout'].splitlines():
match = re.search(r'\s*Could not find a version.* \(from versions: (.*)\)', line)
if match:
versions = [v for v in match.group(1).split(', ') if v and excludes.match(v)]
versions.sort(key=pkg_resources.parse_version)
break
if not versions:
return None
return versions | 0.001361 |
def loadgrants(source=None, setspec=None, all_grants=False):
"""Harvest grants from OpenAIRE.
:param source: Load the grants from a local sqlite db (offline).
The value of the parameter should be a path to the local file.
:type source: str
:param setspec: Harvest specific set through OAI-PMH
Creates a remote connection to OpenAIRE.
:type setspec: str
:param all_grants: Harvest all sets through OAI-PMH,
as specified in the configuration OPENAIRE_GRANTS_SPEC. Sets are
harvested sequentially in the order specified in the configuration.
Creates a remote connection to OpenAIRE.
:type all_grants: bool
"""
assert all_grants or setspec or source, \
"Either '--all', '--setspec' or '--source' is required parameter."
if all_grants:
harvest_all_openaire_projects.delay()
elif setspec:
click.echo("Remote grants loading sent to queue.")
harvest_openaire_projects.delay(setspec=setspec)
else: # if source
loader = LocalOAIRELoader(source=source)
loader._connect()
cnt = loader._count()
click.echo("Sending grants to queue.")
with click.progressbar(loader.iter_grants(), length=cnt) as grants_bar:
for grant_json in grants_bar:
register_grant.delay(grant_json) | 0.000743 |
def poly(self, polys):
"""Creates a POLYGON shape.
Polys is a collection of polygons, each made up of a list of xy values.
Note that for ordinary polygons the coordinates must run in a clockwise direction.
If some of the polygons are holes, these must run in a counterclockwise direction."""
shapeType = POLYGON
self._shapeparts(parts=polys, shapeType=shapeType) | 0.009615 |
def components(arg):
"""Converts a dict of components to the format expected by the Google Maps
server.
For example:
c = {"country": "US", "postal_code": "94043"}
convert.components(c)
# 'country:US|postal_code:94043'
:param arg: The component filter.
:type arg: dict
:rtype: basestring
"""
# Components may have multiple values per type, here we
# expand them into individual key/value items, eg:
# {"country": ["US", "AU"], "foo": 1} -> "country:AU", "country:US", "foo:1"
def expand(arg):
for k, v in arg.items():
for item in as_list(v):
yield "%s:%s" % (k, item)
if isinstance(arg, dict):
return "|".join(sorted(expand(arg)))
raise TypeError(
"Expected a dict for components, "
"but got %s" % type(arg).__name__) | 0.002364 |
def setupTable_post(self):
"""
Make the post table.
**This should not be called externally.** Subclasses
may override or supplement this method to handle the
table creation in a different way if desired.
"""
if "post" not in self.tables:
return
self.otf["post"] = post = newTable("post")
font = self.ufo
post.formatType = 3.0
# italic angle
italicAngle = getAttrWithFallback(font.info, "italicAngle")
post.italicAngle = italicAngle
# underline
underlinePosition = getAttrWithFallback(font.info, "postscriptUnderlinePosition")
post.underlinePosition = otRound(underlinePosition)
underlineThickness = getAttrWithFallback(font.info, "postscriptUnderlineThickness")
post.underlineThickness = otRound(underlineThickness)
post.isFixedPitch = getAttrWithFallback(font.info, "postscriptIsFixedPitch")
# misc
post.minMemType42 = 0
post.maxMemType42 = 0
post.minMemType1 = 0
post.maxMemType1 = 0 | 0.004583 |
def max(x, axis=None, keepdims=False, with_index=False, only_index=False):
"""Reduce the input N-D array `x` along the given `axis` using the max
operation. The `axis` argument may be a single integer to reduce
over one axis, a tuple of integers to reduce over multiple axes,
or ``None`` to reduce over all axes. If `keepdims` is ``True``,
the output will keep all reduced dimensions with size 1. If
`with_index` is True, result is a tuple ``(sorted, indices)`` or
only ``indices`` if `only_index` is True. Setting `only_index` to
True implies that `with_index` is also True.
.. code-block:: python
import numpy as np
import nnabla as nn
import nnabla.functions as F
nn.set_auto_forward(True)
x = nn.Variable.from_numpy_array(np.random.rand(2, 3, 4))
maxval = F.max(x, axis=1)
assert np.allclose(maxval.d, np.max(x.d, axis=1))
maxval, indices = F.max(x, axis=1, with_index=True)
assert np.allclose(maxval.d, np.max(x.d, axis=1))
assert np.all(indices.d == np.argmax(x.d, axis=1))
indices = F.max(x, axis=1, only_index=True)
assert np.all(indices.d == np.argmax(x.d, axis=1))
Args:
x (Variable): An input variable.
axis (None, int or tuple of ints): Axis or axes along which max is
calculated. The default value `None` will reduce all dimensions.
keepdims(bool): Keep reduced axes as dimension with 1 element.
with_index(bool): Return tuple of max values and index.
only_index(bool): Return only the index of max values.
Returns:
~nnabla.Variable: N-D array.
"""
from .function_bases import max as max_base
if axis is None:
axis = range(x.ndim)
elif not hasattr(axis, '__iter__'):
axis = [axis]
n_outputs = 2 if with_index and not only_index else 1
return max_base(x, axis, keepdims, with_index, only_index, n_outputs) | 0.000509 |
def gen_radio_list(sig_dic):
'''
For generating List view HTML file for RADIO.
for each item.
'''
view_zuoxiang = '''<span class="iga_pd_val">'''
dic_tmp = sig_dic['dic']
for key in dic_tmp.keys():
tmp_str = '''{{% if postinfo.extinfo['{0}'][0] == "{1}" %}} {2} {{% end %}}
'''.format(sig_dic['en'], key, dic_tmp[key])
view_zuoxiang += tmp_str
view_zuoxiang += '''</span>'''
return view_zuoxiang | 0.004376 |
def compliance(self, value):
"""Set the compliance profile URI."""
if (self.api_version < '2.0'):
self.profile = value
else:
try:
self.profile[0] = value
except AttributeError:
# handle case where profile not initialized as array
self.profile = [value] | 0.005556 |
def get_mac_address_table(self):
"""
Returns a lists of dictionaries. Each dictionary represents an entry in the MAC Address
Table, having the following keys
* mac (string)
* interface (string)
* vlan (int)
* active (boolean)
* static (boolean)
* moves (int)
* last_move (float)
Format1:
Destination Address Address Type VLAN Destination Port
------------------- ------------ ---- --------------------
6400.f1cf.2cc6 Dynamic 1 Wlan-GigabitEthernet0
Cat 6500:
Legend: * - primary entry
age - seconds since last seen
n/a - not available
vlan mac address type learn age ports
------+----------------+--------+-----+----------+--------------------------
* 999 1111.2222.3333 dynamic Yes 0 Port-channel1
999 1111.2222.3333 dynamic Yes 0 Port-channel1
Cat 4948
Unicast Entries
vlan mac address type protocols port
-------+---------------+--------+---------------------+--------------------
999 1111.2222.3333 dynamic ip Port-channel1
Cat 2960
Mac Address Table
-------------------------------------------
Vlan Mac Address Type Ports
---- ----------- -------- -----
All 1111.2222.3333 STATIC CPU
"""
RE_MACTABLE_DEFAULT = r"^" + MAC_REGEX
RE_MACTABLE_6500_1 = r"^\*\s+{}\s+{}\s+".format(
VLAN_REGEX, MAC_REGEX
) # 7 fields
RE_MACTABLE_6500_2 = r"^{}\s+{}\s+".format(VLAN_REGEX, MAC_REGEX) # 6 fields
RE_MACTABLE_6500_3 = r"^\s{51}\S+" # Fill down prior
RE_MACTABLE_6500_4 = r"^R\s+{}\s+.*Router".format(
VLAN_REGEX, MAC_REGEX
) # Router field
RE_MACTABLE_6500_5 = r"^R\s+N/A\s+{}.*Router".format(
MAC_REGEX
) # Router skipped
RE_MACTABLE_4500_1 = r"^{}\s+{}\s+".format(VLAN_REGEX, MAC_REGEX) # 5 fields
RE_MACTABLE_4500_2 = r"^\s{32,34}\S+" # Fill down prior
RE_MACTABLE_4500_3 = r"^{}\s+{}\s+".format(
INT_REGEX, MAC_REGEX
) # Matches PHY int
RE_MACTABLE_2960_1 = r"^All\s+{}".format(MAC_REGEX)
RE_MACTABLE_GEN_1 = r"^{}\s+{}\s+".format(
VLAN_REGEX, MAC_REGEX
) # 4 fields-2960/4500
def process_mac_fields(vlan, mac, mac_type, interface):
"""Return proper data for mac address fields."""
if mac_type.lower() in ["self", "static", "system"]:
static = True
if vlan.lower() == "all":
vlan = 0
if (
interface.lower() == "cpu"
or re.search(r"router", interface.lower())
or re.search(r"switch", interface.lower())
):
interface = ""
else:
static = False
return {
"mac": napalm.base.helpers.mac(mac),
"interface": self._canonical_int(interface),
"vlan": int(vlan),
"static": static,
"active": True,
"moves": -1,
"last_move": -1.0,
}
mac_address_table = []
command = IOS_COMMANDS["show_mac_address"]
output = self._send_command(command)
# Skip the header lines
output = re.split(r"^----.*", output, flags=re.M)[1:]
output = "\n".join(output).strip()
# Strip any leading asterisks
output = re.sub(r"^\*", "", output, flags=re.M)
fill_down_vlan = fill_down_mac = fill_down_mac_type = ""
for line in output.splitlines():
# Cat6500 one off and 4500 multicast format
if re.search(RE_MACTABLE_6500_3, line) or re.search(
RE_MACTABLE_4500_2, line
):
interface = line.strip()
if "," in interface:
interfaces = interface.split(",")
else:
interfaces = [interface]
for single_interface in interfaces:
mac_address_table.append(
process_mac_fields(
fill_down_vlan,
fill_down_mac,
fill_down_mac_type,
single_interface,
)
)
continue
line = line.strip()
if line == "":
continue
if re.search(r"^---", line):
# Convert any '---' to VLAN 0
line = re.sub(r"^---", "0", line, flags=re.M)
# Format1
if re.search(RE_MACTABLE_DEFAULT, line):
if len(line.split()) == 4:
mac, mac_type, vlan, interface = line.split()
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, interface)
)
else:
raise ValueError("Unexpected output from: {}".format(line.split()))
# Cat6500 format
elif (
re.search(RE_MACTABLE_6500_1, line)
or re.search(RE_MACTABLE_6500_2, line)
) and len(line.split()) >= 6:
if len(line.split()) == 7:
_, vlan, mac, mac_type, _, _, interface = line.split()
elif len(line.split()) == 6:
vlan, mac, mac_type, _, _, interface = line.split()
if "," in interface:
interfaces = interface.split(",")
fill_down_vlan = vlan
fill_down_mac = mac
fill_down_mac_type = mac_type
for single_interface in interfaces:
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, single_interface)
)
else:
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, interface)
)
# Cat4500 format
elif re.search(RE_MACTABLE_4500_1, line) and len(line.split()) == 5:
vlan, mac, mac_type, _, interface = line.split()
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, interface)
)
# Cat4500 w/PHY interface in Mac Table. Vlan will be -1.
elif re.search(RE_MACTABLE_4500_3, line) and len(line.split()) == 5:
interface, mac, mac_type, _, _ = line.split()
interface = canonical_interface_name(interface)
vlan = "-1"
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, interface)
)
# Cat2960 format - ignore extra header line
elif re.search(r"^Vlan\s+Mac Address\s+", line):
continue
# Cat2960 format (Cat4500 format multicast entries)
elif (
re.search(RE_MACTABLE_2960_1, line)
or re.search(RE_MACTABLE_GEN_1, line)
) and len(line.split()) == 4:
vlan, mac, mac_type, interface = line.split()
if "," in interface:
interfaces = interface.split(",")
fill_down_vlan = vlan
fill_down_mac = mac
fill_down_mac_type = mac_type
for single_interface in interfaces:
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, single_interface)
)
else:
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, interface)
)
# 4500 in case of unused Vlan 1.
elif re.search(RE_MACTABLE_4500_1, line) and len(line.split()) == 3:
vlan, mac, mac_type = line.split()
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, interface="")
)
# 4500 w/PHY interface in Multicast table. Vlan will be -1.
elif re.search(RE_MACTABLE_4500_3, line) and len(line.split()) == 4:
vlan, mac, mac_type, interface = line.split()
vlan = "-1"
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, interface)
)
elif re.search(RE_MACTABLE_6500_4, line) and len(line.split()) == 7:
line = re.sub(r"^R\s+", "", line)
vlan, mac, mac_type, _, _, interface = line.split()
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, interface)
)
continue
elif re.search(RE_MACTABLE_6500_5, line):
line = re.sub(r"^R\s+", "", line)
vlan, mac, mac_type, _, _, interface = line.split()
# Convert 'N/A' VLAN to to 0
vlan = re.sub(r"N/A", "0", vlan)
mac_address_table.append(
process_mac_fields(vlan, mac, mac_type, interface)
)
continue
elif re.search(r"Total Mac Addresses", line):
continue
elif re.search(r"Multicast Entries", line):
continue
elif re.search(r"vlan.*mac.*address.*type.*", line):
continue
elif re.search(
r"Displaying entries from active supervisor:\s+\w+\s+\[\d\]:", line
):
continue
else:
raise ValueError("Unexpected output from: {}".format(repr(line)))
return mac_address_table | 0.001469 |
def list_joysticks():
'''Print a list of available joysticks'''
print('Available joysticks:')
print()
for jid in range(pygame.joystick.get_count()):
j = pygame.joystick.Joystick(jid)
print('({}) {}'.format(jid, j.get_name())) | 0.003876 |
def log_pdf(self, y, mu, weights=None):
"""
computes the log of the pdf or pmf of the values under the current distribution
Parameters
----------
y : array-like of length n
target values
mu : array-like of length n
expected values
weights : array-like shape (n,) or None, default: None
containing sample weights
if None, defaults to array of ones
Returns
-------
pdf/pmf : np.array of length n
"""
if weights is None:
weights = np.ones_like(mu)
gamma = weights / self.scale
return sp.stats.invgauss.logpdf(y, mu, scale=1./gamma) | 0.004274 |
def n_sum(n, nums, target, **kv):
"""
n: int
nums: list[object]
target: object
sum_closure: function, optional
Given two elements of nums, return sum of both.
compare_closure: function, optional
Given one object of nums and target, return -1, 1, or 0.
same_closure: function, optional
Given two object of nums, return bool.
return: list[list[object]]
Note:
1. type of sum_closure's return should be same
as type of compare_closure's first param
"""
def sum_closure_default(a, b):
return a + b
def compare_closure_default(num, target):
""" above, below, or right on? """
if num < target:
return -1
elif num > target:
return 1
else:
return 0
def same_closure_default(a, b):
return a == b
def n_sum(n, nums, target):
if n == 2: # want answers with only 2 terms? easy!
results = two_sum(nums, target)
else:
results = []
prev_num = None
for index, num in enumerate(nums):
if prev_num is not None and \
same_closure(prev_num, num):
continue
prev_num = num
n_minus1_results = (
n_sum( # recursive call
n - 1, # a
nums[index + 1:], # b
target - num # c
) # x = n_sum( a, b, c )
) # n_minus1_results = x
n_minus1_results = (
append_elem_to_each_list(num, n_minus1_results)
)
results += n_minus1_results
return union(results)
def two_sum(nums, target):
nums.sort()
lt = 0
rt = len(nums) - 1
results = []
while lt < rt:
sum_ = sum_closure(nums[lt], nums[rt])
flag = compare_closure(sum_, target)
if flag == -1:
lt += 1
elif flag == 1:
rt -= 1
else:
results.append(sorted([nums[lt], nums[rt]]))
lt += 1
rt -= 1
while (lt < len(nums) and
same_closure(nums[lt - 1], nums[lt])):
lt += 1
while (0 <= rt and
same_closure(nums[rt], nums[rt + 1])):
rt -= 1
return results
def append_elem_to_each_list(elem, container):
results = []
for elems in container:
elems.append(elem)
results.append(sorted(elems))
return results
def union(duplicate_results):
results = []
if len(duplicate_results) != 0:
duplicate_results.sort()
results.append(duplicate_results[0])
for result in duplicate_results[1:]:
if results[-1] != result:
results.append(result)
return results
sum_closure = kv.get('sum_closure', sum_closure_default)
same_closure = kv.get('same_closure', same_closure_default)
compare_closure = kv.get('compare_closure', compare_closure_default)
nums.sort()
return n_sum(n, nums, target) | 0.000594 |
def get_extra_pids(self):
"""
Gets the list of process ids that should be marked as high priority.
:return: A list of process ids that are used by this bot in addition to the ones inside the python process.
"""
while not self.is_retired:
for proc in psutil.process_iter():
for conn in proc.connections():
if conn.laddr.port == self.port:
self.logger.debug(f'py4j server for {self.name} appears to have pid {proc.pid}')
return [proc.pid]
if self.is_executable_configured():
# The helper process will start java and report the PID. Nothing to do here.
return []
time.sleep(1)
if self.java_executable_path is None:
self.logger.info(
"Can't auto-start java because no executable is configured. Please start java manually!")
else:
self.logger.info(f"Can't auto-start java because {self.java_executable_path} is not found. "
"Please start java manually!") | 0.006087 |
def _add_include_arg(arg_parser):
"""
Adds optional repeatable include parameter to a parser.
:param arg_parser: ArgumentParser parser to add this argument to.
"""
arg_parser.add_argument("--include",
metavar='Path',
action='append',
type=to_unicode,
dest='include_paths',
help="Specifies a single path to include. This argument can be repeated.",
default=[]) | 0.003656 |
def create(path, value='', acls=None, ephemeral=False, sequence=False, makepath=False, profile=None,
hosts=None, scheme=None, username=None, password=None, default_acl=None):
'''
Create Znode
path
path of znode to create
value
value to assign to znode (Default: '')
acls
list of acl dictionaries to be assigned (Default: None)
ephemeral
indicate node is ephemeral (Default: False)
sequence
indicate node is suffixed with a unique index (Default: False)
makepath
Create parent paths if they do not exist (Default: False)
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
CLI Example:
.. code-block:: bash
salt minion1 zookeeper.create /test/name daniel profile=prod
'''
if acls is None:
acls = []
acls = [make_digest_acl(**acl) for acl in acls]
conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme,
username=username, password=password, default_acl=default_acl)
return conn.create(path, salt.utils.stringutils.to_bytes(value), acls, ephemeral, sequence, makepath) | 0.003876 |
def drange(start, stop, step):
"""
A generator that yields successive samples from start (inclusive)
to stop (exclusive) in step intervals.
Parameters
----------
start : float
starting point
stop : float
stopping point
step : float
stepping interval
Yields
------
x : float
next sample
"""
x = start
if step > 0:
while x + step <= stop: # produces same behaviour as numpy.arange
yield x
x += step
elif step < 0:
while x + step >= stop: # produces same behaviour as numpy.arange
yield x
x += step
else:
raise ZeroDivisionError("Step must be non-zero") | 0.005548 |
def datetime_format(desired_format, datetime_instance=None, *args, **kwargs):
"""
Replaces format style phrases (listed in the dt_exps dictionary)
with this datetime instance's information.
.. code :: python
reusables.datetime_format("Hey, it's {month-full} already!")
"Hey, it's March already!"
:param desired_format: string to add datetime details too
:param datetime_instance: datetime.datetime instance, defaults to 'now'
:param args: additional args to pass to str.format
:param kwargs: additional kwargs to pass to str format
:return: formatted string
"""
for strf, exp in datetime_regex.datetime.format.items():
desired_format = exp.sub(strf, desired_format)
if not datetime_instance:
datetime_instance = now()
return datetime_instance.strftime(desired_format.format(*args, **kwargs)) | 0.001138 |
def _parse_s3_location(location):
"""
Parses the given location input as a S3 Location and returns the file's bucket, key and version as separate
values. Input can be in two different formats:
1. Dictionary with ``Bucket``, ``Key``, ``Version`` keys
2. String of S3 URI in format ``s3://<bucket>/<key>?versionId=<version>``
If the input is not in either of the above formats, this method will return (None, None, None) tuple for all
the values.
Parameters
----------
location : str or dict
Location of the S3 file
Returns
-------
str
Name of the S3 Bucket. None, if bucket value was not found
str
Key of the file from S3. None, if key was not provided
str
Optional Version ID of the file. None, if version ID is not provided
"""
bucket, key, version = None, None, None
if isinstance(location, dict):
# This is a S3 Location dictionary. Just grab the fields. It is very well possible that
# this dictionary has none of the fields we expect. Return None if the fields don't exist.
bucket, key, version = (
location.get("Bucket"),
location.get("Key"),
location.get("Version")
)
elif isinstance(location, string_types) and location.startswith("s3://"):
# This is a S3 URI. Parse it using a standard URI parser to extract the components
parsed = urlparse(location)
query = parse_qs(parsed.query)
bucket = parsed.netloc
key = parsed.path.lstrip('/') # Leading '/' messes with S3 APIs. Remove it.
# If there is a query string that has a single versionId field,
# set the object version and return
if query and 'versionId' in query and len(query['versionId']) == 1:
version = query['versionId'][0]
return bucket, key, version | 0.005382 |
def consumer(self, fn):
"""Consumer decorator
:param fn: coroutine consumer function
Example:
>>> api = StreamingAPI('my_service_key')
>>> stream = api.get_stream()
>>> @stream.consumer
>>> @asyncio.coroutine
>>> def handle_event(payload):
>>> print(payload)
"""
if self._consumer_fn is not None:
raise ValueError('Consumer function is already defined for this '
'Stream instance')
if not any([asyncio.iscoroutine(fn), asyncio.iscoroutinefunction(fn)]):
raise ValueError('Consumer function must be a coroutine')
self._consumer_fn = fn | 0.002857 |
def shifted(self, rows, cols):
"""Returns a new selection that is shifted by rows and cols.
Negative values for rows and cols may result in a selection
that addresses negative cells.
Parameters
----------
rows: Integer
\tNumber of rows that the new selection is shifted down
cols: Integer
\tNumber of columns that the new selection is shifted right
"""
shifted_block_tl = \
[(row + rows, col + cols) for row, col in self.block_tl]
shifted_block_br = \
[(row + rows, col + cols) for row, col in self.block_br]
shifted_rows = [row + rows for row in self.rows]
shifted_cols = [col + cols for col in self.cols]
shifted_cells = [(row + rows, col + cols) for row, col in self.cells]
return Selection(shifted_block_tl, shifted_block_br, shifted_rows,
shifted_cols, shifted_cells) | 0.002096 |
def element_not_contains(self, element_id, value):
"""
Assert provided content is not contained within an element found by ``id``.
"""
elem = world.browser.find_elements_by_xpath(str(
'id("{id}")[contains(., "{value}")]'.format(
id=element_id, value=value)))
assert not elem, \
"Expected element not to contain the given text." | 0.002667 |
def createStatus(self,
repo_user, repo_name, sha, state, target_url=None,
context=None, issue=None, description=None):
"""
:param repo_user: GitHub user or organization
:param repo_name: Name of the repository
:param sha: Full sha to create the status for.
:param state: one of the following 'pending', 'success', 'error'
or 'failure'.
:param target_url: Target url to associate with this status.
:param description: Short description of the status.
:param context: Build context
:return: A deferred with the result from GitHub.
This code comes from txgithub by @tomprince.
txgithub is based on twisted's webclient agent, which is much less reliable and featureful
as txrequest (support for proxy, connection pool, keep alive, retry, etc)
"""
payload = {'state': state}
if description is not None:
payload['description'] = description
if target_url is not None:
payload['target_url'] = target_url
if context is not None:
payload['context'] = context
return self._http.post(
'/'.join(['/repos', repo_user, repo_name, 'statuses', sha]),
json=payload) | 0.004542 |
def set_context(context=None, font_scale=1, rc=None):
"""Set the plotting context parameters.
This affects things like the size of the labels, lines, and other
elements of the plot, but not the overall style. The base context
is "notebook", and the other contexts are "paper", "talk", and "poster",
which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
respectively.
Parameters
----------
context : dict, None, or one of {paper, notebook, talk, poster}
A dictionary of parameters or the name of a preconfigured set.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
context dictionaries. This only updates parameters that are
considered part of the context definition.
Examples
--------
>>> set_context("paper")
>>> set_context("talk", font_scale=1.4)
>>> set_context("talk", rc={"lines.linewidth": 2})
See Also
--------
plotting_context : return a dictionary of rc parameters, or use in
a ``with`` statement to temporarily set the context.
set_style : set the default parameters for figure style
set_palette : set the default color palette for figures
"""
context_object = plotting_context(context, font_scale, rc)
mpl.rcParams.update(context_object) | 0.000677 |
def get_child_family_ids(self, family_id):
"""Gets the child ``Ids`` of the given family.
arg: family_id (osid.id.Id): the ``Id`` to query
return: (osid.id.IdList) - the children of the family
raise: NotFound - ``family_id`` is not found
raise: NullArgument - ``family_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_child_bin_ids
if self._catalog_session is not None:
return self._catalog_session.get_child_catalog_ids(catalog_id=family_id)
return self._hierarchy_session.get_children(id_=family_id) | 0.003614 |
def run(self, value, model=None, context=None):
"""
Run validation
Wraps concrete implementation to ensure custom validators return
proper type of result.
:param value: a value to validate
:param model: parent model of the property
:param context: parent model or custom context
:return: shiftschema.result.Error
"""
res = self.validate(value, model, context)
if not isinstance(res, Error):
err = 'Validator "{}" result must be of type "{}", got "{}"'
raise InvalidErrorType(err.format(
self.__class__.__name__,
Error,
type(res))
)
return res | 0.002554 |
def _add_meta_dict_to_xml(self, doc, parent, meta_dict):
"""
Values in the meta element dict are converted to a BadgerFish-style
encoding (see _convert_hbf_meta_val_for_xml), so regardless of input_format,
we treat them as if they were BadgerFish.
"""
if not meta_dict:
return
key_list = list(meta_dict.keys())
key_list.sort()
for key in key_list:
el_list = _index_list_of_values(meta_dict, key)
for el in el_list:
self._add_meta_value_to_xml_doc(doc, parent, el) | 0.005042 |
def get_shutit_pexpect_sessions():
"""Returns all the shutit_pexpect sessions in existence.
"""
sessions = []
for shutit_object in shutit_global_object.shutit_objects:
for key in shutit_object.shutit_pexpect_sessions:
sessions.append(shutit_object.shutit_pexpect_sessions[key])
return sessions | 0.026403 |
def get(ctx, key):
'''Retrieve the value for the given key.'''
file = ctx.obj['FILE']
stored_value = get_key(file, key)
if stored_value:
click.echo('%s=%s' % (key, stored_value))
else:
exit(1) | 0.004386 |
def _GetOutputModulesInformation(self):
"""Retrieves the output modules information.
Returns:
list[tuple[str, str]]: pairs of output module names and descriptions.
"""
output_modules_information = []
for name, output_class in output_manager.OutputManager.GetOutputClasses():
output_modules_information.append((name, output_class.DESCRIPTION))
return output_modules_information | 0.004831 |
def expand_groups(grp):
"""Expand group names.
Args:
grp (string): group names to expand
Returns:
list of groups
Examples:
* grp[1-3] will be expanded to [grp1, grp2, grp3]
* grp1 will be expanded to [grp1]
"""
p = re.compile(r"(?P<name>.+)\[(?P<start>\d+)-(?P<end>\d+)\]")
m = p.match(grp)
if m is not None:
s = int(m.group('start'))
e = int(m.group('end'))
n = m.group('name')
return list(map(lambda x: n + str(x), range(s, e + 1)))
else:
return [grp] | 0.00177 |
def create_or_replace_primary_key(self,
table: str,
fieldnames: Sequence[str]) -> int:
"""Make a primary key, or replace it if it exists."""
# *** create_or_replace_primary_key: Uses code specific to MySQL
sql = """
SELECT COUNT(*)
FROM information_schema.table_constraints
WHERE table_name=?
AND table_schema={}
AND constraint_name='PRIMARY'
""".format(self.get_current_schema_expr())
# http://forums.mysql.com/read.php?10,114742,114748#msg-114748
row = self.fetchone(sql, table)
has_pk_already = True if row[0] >= 1 else False
drop_pk_if_exists = " DROP PRIMARY KEY," if has_pk_already else ""
fieldlist = ",".join([self.delimit(f) for f in fieldnames])
sql = ("ALTER TABLE " + self.delimit(table) +
drop_pk_if_exists +
" ADD PRIMARY KEY(" + fieldlist + ")")
# http://stackoverflow.com/questions/8859353
return self.db_exec(sql) | 0.003663 |
def stop(self):
"""
Stop the node process.
"""
if self._wrapper_telnet_server:
self._wrapper_telnet_server.close()
yield from self._wrapper_telnet_server.wait_closed()
self.status = "stopped" | 0.007843 |
def configure(self, statistics="max", max_ticks=5, plot_hists=True, flip=True,
serif=True, sigma2d=False, sigmas=None, summary=None, bins=None, rainbow=None,
colors=None, linestyles=None, linewidths=None, kde=False, smooth=None,
cloud=None, shade=None, shade_alpha=None, shade_gradient=None, bar_shade=None,
num_cloud=None, color_params=None, plot_color_params=False, cmaps=None,
plot_contour=None, plot_point=None, global_point=True, marker_style=None, marker_size=None, marker_alpha=None,
usetex=True, diagonal_tick_labels=True, label_font_size=12, tick_font_size=10,
spacing=None, contour_labels=None, contour_label_font_size=10,
legend_kwargs=None, legend_location=None, legend_artists=None,
legend_color_text=True, watermark_text_kwargs=None, summary_area=0.6827): # pragma: no cover
r""" Configure the general plotting parameters common across the bar
and contour plots.
If you do not call this explicitly, the :func:`plot`
method will invoke this method automatically.
Please ensure that you call this method *after* adding all the relevant data to the
chain consumer, as the consume changes configuration values depending on
the supplied data.
Parameters
----------
statistics : string|list[str], optional
Which sort of statistics to use. Defaults to `"max"` for maximum likelihood
statistics. Other available options are `"mean"`, `"cumulative"`, `"max_symmetric"`,
`"max_closest"` and `"max_central"`. In the
very, very rare case you want to enable different statistics for different
chains, you can pass in a list of strings.
max_ticks : int, optional
The maximum number of ticks to use on the plots
plot_hists : bool, optional
Whether to plot marginalised distributions or not
flip : bool, optional
Set to false if, when plotting only two parameters, you do not want it to
rotate the histogram so that it is horizontal.
sigma2d: bool, optional
Defaults to `False`. When `False`, uses :math:`\sigma` levels for 1D Gaussians - ie confidence
levels of 68% and 95%. When `True`, uses the confidence levels for 2D Gaussians, where 1 and 2
:math:`\sigma` represents 39% and 86% confidence levels respectively.
sigmas : np.array, optional
The :math:`\sigma` contour levels to plot. Defaults to [0, 1, 2, 3] for a single chain
and [0, 1, 2] for multiple chains.
serif : bool, optional
Whether to display ticks and labels with serif font.
summary : bool, optional
If overridden, sets whether parameter summaries should be set as axis titles.
Will not work if you have multiple chains
bins : int|float,list[int|float], optional
The number of bins to use. By default uses :math:`\frac{\sqrt{n}}{10}`, where
:math:`n` are the number of data points. Giving an integer will set the number
of bins to the given value. Giving a float will scale the number of bins, such
that giving ``bins=1.5`` will result in using :math:`\frac{1.5\sqrt{n}}{10}` bins.
Note this parameter is most useful if `kde=False` is also passed, so you
can actually see the bins and not a KDE.
rainbow : bool|list[bool], optional
Set to True to force use of rainbow colours
colors : str(hex)|list[str(hex)], optional
Provide a list of colours to use for each chain. If you provide more chains
than colours, you *will* get the rainbow colour spectrum. If you only pass
one colour, all chains are set to this colour. This probably won't look good.
linestyles : str|list[str], optional
Provide a list of line styles to plot the contours and marginalised
distributions with. By default, this will become a list of solid lines. If a
string is passed instead of a list, this style is used for all chains.
linewidths : float|list[float], optional
Provide a list of line widths to plot the contours and marginalised
distributions with. By default, this is a width of 1. If a float
is passed instead of a list, this width is used for all chains.
kde : bool|float|list[bool|float], optional
Whether to use a Gaussian KDE to smooth marginalised posteriors. If false, uses
bins and linear interpolation, so ensure you have plenty of samples if your
distribution is highly non-gaussian. Due to the slowness of performing a
KDE on all data, it is often useful to disable this before producing final
plots. If float, scales the width of the KDE bandpass manually.
smooth : int|list[int], optional
Defaults to 3. How much to smooth the marginalised distributions using a gaussian filter.
If ``kde`` is set to true, this parameter is ignored. Setting it to either
``0``, ``False`` disables smoothing. For grid data, smoothing
is set to 0 by default, not 3.
cloud : bool|list[bool], optional
If set, overrides the default behaviour and plots the cloud or not
shade : bool|list[bool] optional
If set, overrides the default behaviour and plots filled contours or not. If a list of
bools is passed, you can turn shading on or off for specific chains.
shade_alpha : float|list[float], optional
Filled contour alpha value override. Default is 1.0. If a list is passed, you can set the
shade opacity for specific chains.
shade_gradient : float|list[float], optional
How much to vary colours in different contour levels.
bar_shade : bool|list[bool], optional
If set to true, shades in confidence regions in under histogram. By default
this happens if you less than 3 chains, but is disabled if you are comparing
more chains. You can pass a list if you wish to shade some chains but not others.
num_cloud : int|list[int], optional
The number of scatter points to show when enabling `cloud` or setting one of the parameters
to colour scatter. Defaults to 15k per chain.
color_params : str|list[str], optional
The name of the parameter to use for the colour scatter. Defaults to none, for no colour. If set
to 'weights', 'log_weights', or 'posterior' (without the quotes), and that is not a parameter in the chain,
it will respectively use the weights, log weights, or posterior, to colour the points.
plot_color_params : bool|list[bool], optional
Whether or not the colour parameter should also be plotted as a posterior surface.
cmaps : str|list[str], optional
The matplotlib colourmap to use in the `colour_param`. If you have multiple `color_param`s, you can
specific a different cmap for each variable. By default ChainConsumer will cycle between several
cmaps.
plot_contour : bool|list[bool], optional
Whether to plot the whole contour (as opposed to a point). Defaults to true for less than
25 concurrent chains.
plot_point : bool|list[bool], optional
Whether to plot a maximum likelihood point. Defaults to true for more then 24 chains.
global_point : bool, optional
Whether the point which gets plotted is the global posterior maximum, or the marginalised 2D
posterior maximum. Note that when you use marginalised 2D maximums for the points, you do not
get the 1D histograms. Defaults to `True`, for a global maximum value.
marker_style : str|list[str], optional
The marker style to use when plotting points. Defaults to `'.'`
marker_size : numeric|list[numeric], optional
Size of markers, if plotted. Defaults to `4`.
marker_alpha : numeric|list[numeric], optional
The alpha values when plotting markers.
usetex : bool, optional
Whether or not to parse text as LaTeX in plots.
diagonal_tick_labels : bool, optional
Whether to display tick labels on a 45 degree angle.
label_font_size : int|float, optional
The font size for plot axis labels and axis titles if summaries are configured to display.
tick_font_size : int|float, optional
The font size for the tick labels in the plots.
spacing : float, optional
The amount of spacing to add between plots. Defaults to `None`, which equates to 1.0 for less
than 6 dimensions and 0.0 for higher dimensions.
contour_labels : string, optional
If unset do not plot contour labels. If set to "confidence", label the using confidence
intervals. If set to "sigma", labels using sigma.
contour_label_font_size : int|float, optional
The font size for contour labels, if they are enabled.
legend_kwargs : dict, optional
Extra arguments to pass to the legend api.
legend_location : tuple(int,int), optional
Specifies the subplot in which to locate the legend. By default, this will be (0, -1),
corresponding to the top right subplot if there are more than two parameters,
and the bottom left plot for only two parameters with flip on.
For having the legend in the primary subplot
in the bottom left, set to (-1,0).
legend_artists : bool, optional
Whether to include hide artists in the legend. If all linestyles and line widths are identical,
this will default to false (as only the colours change). Otherwise it will be true.
legend_color_text : bool, optional
Whether to colour the legend text.
watermark_text_kwargs : dict, optional
Options to pass to the fontdict property when generating text for the watermark.
summary_area : float, optional
The confidence interval used when generating parameter summaries. Defaults to 1 sigma, aka 0.6827
Returns
-------
ChainConsumer
Itself, to allow chaining calls.
"""
# Dirty way of ensuring overrides happen when requested
l = locals()
explicit = []
for k in l.keys():
if l[k] is not None:
explicit.append(k)
if k.endswith("s"):
explicit.append(k[:-1])
self._init_params()
num_chains = len(self.chains)
assert rainbow is None or colors is None, \
"You cannot both ask for rainbow colours and then give explicit colours"
# Determine statistics
assert statistics is not None, "statistics should be a string or list of strings!"
if isinstance(statistics, str):
assert statistics in list(Analysis.summaries), "statistics %s not recognised. Should be in %s" % (statistics, Analysis.summaries)
statistics = [statistics.lower()] * len(self.chains)
elif isinstance(statistics, list):
for i, l in enumerate(statistics):
statistics[i] = l.lower()
else:
raise ValueError("statistics is not a string or a list!")
# Determine KDEs
if isinstance(kde, bool) or isinstance(kde, float):
kde = [False if c.grid else kde for c in self.chains]
kde_override = [c.kde for c in self.chains]
kde = [c2 if c2 is not None else c1 for c1, c2 in zip(kde, kde_override)]
# Determine bins
if bins is None:
bins = get_bins(self.chains)
elif isinstance(bins, list):
bins = [b2 if isinstance(b2, int) else np.floor(b2 * b1) for b1, b2 in zip(get_bins(self.chains), bins)]
elif isinstance(bins, float):
bins = [np.floor(b * bins) for b in get_bins(self.chains)]
elif isinstance(bins, int):
bins = [bins] * len(self.chains)
else:
raise ValueError("bins value is not a recognised class (float or int)")
# Determine smoothing
if smooth is None:
smooth = [0 if c.grid or k else 3 for c, k in zip(self.chains, kde)]
else:
if smooth is not None and not smooth:
smooth = 0
if isinstance(smooth, list):
smooth = [0 if k else s for s, k in zip(smooth, kde)]
else:
smooth = [0 if k else smooth for k in kde]
# Determine color parameters
if color_params is None:
color_params = [None] * num_chains
else:
if isinstance(color_params, str):
color_params = [
color_params if color_params in cs.parameters + ["log_weights", "weights", "posterior"] else None
for cs in self.chains]
color_params = [None if c == "posterior" and self.chains[i].posterior is None else c for i, c in
enumerate(color_params)]
elif isinstance(color_params, list) or isinstance(color_params, tuple):
for c, chain in zip(color_params, self.chains):
p = chain.parameters
if c is not None:
assert c in p, "Color parameter %s not in parameters %s" % (c, p)
# Determine if we should plot color parameters
if isinstance(plot_color_params, bool):
plot_color_params = [plot_color_params] * len(color_params)
# Determine cmaps
if cmaps is None:
param_cmaps = {}
cmaps = []
i = 0
for cp in color_params:
if cp is None:
cmaps.append(None)
elif cp in param_cmaps:
cmaps.append(param_cmaps[cp])
else:
param_cmaps[cp] = self._cmaps[i]
cmaps.append(self._cmaps[i])
i = (i + 1) % len(self._cmaps)
# Determine colours
if colors is None:
if rainbow:
colors = self.color_finder.get_colormap(num_chains)
else:
if num_chains > len(self._all_colours):
num_needed_colours = np.sum([c is None for c in color_params])
colour_list = self.color_finder.get_colormap(num_needed_colours)
else:
colour_list = self._all_colours
colors = []
ci = 0
for c in color_params:
if c:
colors.append('#000000')
else:
colors.append(colour_list[ci])
ci += 1
elif isinstance(colors, str):
colors = [colors] * len(self.chains)
colors = self.color_finder.get_formatted(colors)
# Determine linestyles
if linestyles is None:
i = 0
linestyles = []
for c in color_params:
if c is None:
linestyles.append(self._linestyles[0])
else:
linestyles.append(self._linestyles[i])
i = (i + 1) % len(self._linestyles)
elif isinstance(linestyles, str):
linestyles = [linestyles] * len(self.chains)
# Determine linewidths
if linewidths is None:
linewidths = [1.0] * len(self.chains)
elif isinstance(linewidths, float) or isinstance(linewidths, int):
linewidths = [linewidths] * len(self.chains)
# Determine clouds
if cloud is None:
cloud = False
cloud = [cloud or c is not None for c in color_params]
# Determine cloud points
if num_cloud is None:
num_cloud = 30000
if isinstance(num_cloud, int) or isinstance(num_cloud, float):
num_cloud = [int(num_cloud)] * num_chains
# Should we shade the contours
if shade is None:
if shade_alpha is None:
shade = num_chains <= 3
else:
shade = True
if isinstance(shade, bool):
# If not overridden, do not shade chains with colour scatter points
shade = [shade and c is None for c in color_params]
# Modify shade alpha based on how many chains we have
if shade_alpha is None:
if num_chains == 1:
if contour_labels is not None:
shade_alpha = 0.75
else:
shade_alpha = 1.0
else:
shade_alpha = 1.0 / num_chains
# Decrease the shading amount if there are colour scatter points
if isinstance(shade_alpha, float) or isinstance(shade_alpha, int):
shade_alpha = [shade_alpha if c is None else 0.25 * shade_alpha for c in color_params]
if shade_gradient is None:
shade_gradient = 1.0
if isinstance(shade_gradient, float):
shade_gradient = [shade_gradient] * num_chains
elif isinstance(shade_gradient, list):
assert len(shade_gradient) == num_chains, \
"Have %d shade_gradient but % chains" % (len(shade_gradient), num_chains)
contour_over_points = num_chains < 20
if plot_contour is None:
plot_contour = [contour_over_points if chain.posterior is not None else True for chain in self.chains]
elif isinstance(plot_contour, bool):
plot_contour = [plot_contour] * num_chains
if plot_point is None:
plot_point = [not contour_over_points] * num_chains
elif isinstance(plot_point, bool):
plot_point = [plot_point] * num_chains
if marker_style is None:
marker_style = ['.'] * num_chains
elif isinstance(marker_style, str):
marker_style = [marker_style] * num_chains
if marker_size is None:
marker_size = [4] * num_chains
elif isinstance(marker_style, (int, float)):
marker_size = [marker_size] * num_chains
if marker_alpha is None:
marker_alpha = [1.0] * num_chains
elif isinstance(marker_alpha, (int, float)):
marker_alpha = [marker_alpha] * num_chains
# Figure out if we should display parameter summaries
if summary is not None:
summary = summary and num_chains == 1
# Figure out bar shading
if bar_shade is None:
bar_shade = num_chains <= 3
if isinstance(bar_shade, bool):
bar_shade = [bar_shade] * num_chains
# Figure out how many sigmas to plot
if sigmas is None:
if num_chains == 1:
sigmas = np.array([0, 1, 2])
else:
sigmas = np.array([0, 1, 2])
if sigmas[0] != 0:
sigmas = np.concatenate(([0], sigmas))
sigmas = np.sort(sigmas)
if contour_labels is not None:
assert isinstance(contour_labels, str), "contour_labels parameter should be a string"
contour_labels = contour_labels.lower()
assert contour_labels in ["sigma", "confidence"], "contour_labels should be either sigma or confidence"
assert isinstance(contour_label_font_size, int) or isinstance(contour_label_font_size, float), \
"contour_label_font_size needs to be numeric"
if legend_artists is None:
legend_artists = len(set(linestyles)) > 1 or len(set(linewidths)) > 1
if legend_kwargs is not None:
assert isinstance(legend_kwargs, dict), "legend_kwargs should be a dict"
else:
legend_kwargs = {}
if num_chains < 3:
labelspacing = 0.5
elif num_chains == 3:
labelspacing = 0.2
else:
labelspacing = 0.15
legend_kwargs_default = {
"labelspacing": labelspacing,
"loc": "upper right",
"frameon": False,
"fontsize": label_font_size,
"handlelength": 1,
"handletextpad": 0.2,
"borderaxespad": 0.0
}
legend_kwargs_default.update(legend_kwargs)
watermark_text_kwargs_default = {
"color": "#333333",
"alpha": 0.7,
"verticalalignment": "center",
"horizontalalignment": "center"
}
if watermark_text_kwargs is None:
watermark_text_kwargs = {}
watermark_text_kwargs_default.update(watermark_text_kwargs)
assert isinstance(summary_area, float), "summary_area needs to be a float, not %s!" % type(summary_area)
assert summary_area > 0, "summary_area should be a positive number, instead is %s!" % summary_area
assert summary_area < 1, "summary_area must be less than unity, instead is %s!" % summary_area
assert isinstance(global_point, bool), "global_point should be a bool"
# List options
for i, c in enumerate(self.chains):
try:
c.update_unset_config("statistics", statistics[i], override=explicit)
c.update_unset_config("color", colors[i], override=explicit)
c.update_unset_config("linestyle", linestyles[i], override=explicit)
c.update_unset_config("linewidth", linewidths[i], override=explicit)
c.update_unset_config("cloud", cloud[i], override=explicit)
c.update_unset_config("shade", shade[i], override=explicit)
c.update_unset_config("shade_alpha", shade_alpha[i], override=explicit)
c.update_unset_config("shade_gradient", shade_gradient[i], override=explicit)
c.update_unset_config("bar_shade", bar_shade[i], override=explicit)
c.update_unset_config("bins", bins[i], override=explicit)
c.update_unset_config("kde", kde[i], override=explicit)
c.update_unset_config("smooth", smooth[i], override=explicit)
c.update_unset_config("color_params", color_params[i], override=explicit)
c.update_unset_config("plot_color_params", plot_color_params[i], override=explicit)
c.update_unset_config("cmap", cmaps[i], override=explicit)
c.update_unset_config("num_cloud", num_cloud[i], override=explicit)
c.update_unset_config("marker_style", marker_style[i], override=explicit)
c.update_unset_config("marker_size", marker_size[i], override=explicit)
c.update_unset_config("marker_alpha", marker_alpha[i], override=explicit)
c.update_unset_config("plot_contour", plot_contour[i], override=explicit)
c.update_unset_config("plot_point", plot_point[i], override=explicit)
c.config["summary_area"] = summary_area
except IndentationError as e:
print("Index error when assigning chain properties, make sure you "
"have enough properties set for the number of chains you have loaded! "
"See the stack trace for which config item has the wrong number of entries.")
raise e
# Non list options
self.config["sigma2d"] = sigma2d
self.config["sigmas"] = sigmas
self.config["summary"] = summary
self.config["flip"] = flip
self.config["serif"] = serif
self.config["plot_hists"] = plot_hists
self.config["max_ticks"] = max_ticks
self.config["usetex"] = usetex
self.config["diagonal_tick_labels"] = diagonal_tick_labels
self.config["label_font_size"] = label_font_size
self.config["tick_font_size"] = tick_font_size
self.config["spacing"] = spacing
self.config["contour_labels"] = contour_labels
self.config["contour_label_font_size"] = contour_label_font_size
self.config["legend_location"] = legend_location
self.config["legend_kwargs"] = legend_kwargs_default
self.config["legend_artists"] = legend_artists
self.config["legend_color_text"] = legend_color_text
self.config["watermark_text_kwargs"] = watermark_text_kwargs_default
self.config["global_point"] = global_point
self._configured = True
return self | 0.004796 |
def save_swagger_spec(self, filepath=None):
"""
Saves a copy of the origin_spec to a local file in JSON format
"""
if filepath is True or filepath is None:
filepath = self.file_spec.format(server=self.server)
json.dump(self.origin_spec, open(filepath, 'w+'), indent=3) | 0.006231 |
def reproject_to_grid_coordinates(self, grid_coordinates, interp=gdalconst.GRA_NearestNeighbour):
""" Reprojects data in this layer to match that in the GridCoordinates
object. """
source_dataset = self.grid_coordinates._as_gdal_dataset()
dest_dataset = grid_coordinates._as_gdal_dataset()
rb = source_dataset.GetRasterBand(1)
rb.SetNoDataValue(NO_DATA_VALUE)
rb.WriteArray(np.ma.filled(self.raster_data, NO_DATA_VALUE))
gdal.ReprojectImage(source_dataset, dest_dataset,
source_dataset.GetProjection(),
dest_dataset.GetProjection(),
interp)
dest_layer = self.clone_traits()
dest_layer.grid_coordinates = grid_coordinates
rb = dest_dataset.GetRasterBand(1)
dest_layer.raster_data = np.ma.masked_values(rb.ReadAsArray(), NO_DATA_VALUE)
return dest_layer | 0.004255 |
def H10(self):
"Difference variance."
c = (self.rlevels * self.p_xminusy).sum(1)
c1 = np.tile(c, (self.nlevels,1)).transpose()
e = self.rlevels - c1
return (self.p_xminusy * e ** 2).sum(1) | 0.013158 |
def bar3_chart(self, title, labels, data1, file_name, data2, data3, legend=["", ""]):
"""
Generate a bar plot with three columns in each x position and save it to file_name
:param title: title to be used in the chart
:param labels: list of labels for the x axis
:param data1: values for the first columns
:param file_name: name of the file in which to save the chart
:param data2: values for the second columns
:param data3: values for the third columns
:param legend: legend to be shown in the chart
:return:
"""
colors = ["orange", "grey"]
data1 = self.__convert_none_to_zero(data1)
data2 = self.__convert_none_to_zero(data2)
data3 = self.__convert_none_to_zero(data3)
fig, ax = plt.subplots(1)
xpos = np.arange(len(data1))
width = 0.28
plt.title(title)
y_pos = np.arange(len(data1))
ppl.bar(xpos + width + width, data3, color="orange", width=0.28, annotate=True)
ppl.bar(xpos + width, data1, color='grey', width=0.28, annotate=True)
ppl.bar(xpos, data2, grid='y', width=0.28, annotate=True)
plt.xticks(xpos + width, labels)
plt.legend(legend, loc=2)
os.makedirs(os.path.dirname(file_name), exist_ok=True)
plt.savefig(file_name)
plt.close() | 0.003642 |
def remove_object_metadata_key(self, container, obj, key, prefix=None):
"""
Removes the specified key from the storage object's metadata. If the key
does not exist in the metadata, nothing is done.
"""
self.set_object_metadata(container, obj, {key: ""}, prefix=prefix) | 0.00974 |
def search_images(q, start=1, count=10, wait=10, asynchronous=False, cached=False):
""" Returns a Yahoo images query formatted as a YahooSearch list object.
"""
service = YAHOO_IMAGES
return YahooSearch(q, start, count, service, None, wait, asynchronous, cached) | 0.017361 |
def get_gsims(self, trt):
"""
:param trt: tectonic region type
:returns: sorted list of available GSIMs for that trt
"""
if trt == '*' or trt == b'*': # fake logictree
[trt] = self.values
return sorted(self.values[trt]) | 0.007143 |
def join(tokens, start, result):
"""Join tokens into a single string with spaces between."""
texts = []
if len(result) > 0:
for e in result:
for child in e.iter():
if child.text is not None:
texts.append(child.text)
return [E(result[0].tag, ' '.join(texts))] | 0.002994 |
def percent_rank(expr, sort=None, ascending=True):
"""
Calculate percentage rank of a sequence expression.
:param expr: expression for calculation
:param sort: name of the sort column
:param ascending: whether to sort in ascending order
:return: calculated column
"""
return _rank_op(expr, PercentRank, types.float64, sort=sort, ascending=ascending) | 0.005236 |
def to_json_data(self, model_name=None):
"""
Parameters
----------
model_name: str, default None
if given, will be used as external file directory base name
Returns
-------
A dictionary of serialized data.
"""
return collections.OrderedDict([(k, self.get_serialized_value(k, model_name=model_name )) for k in self._data]) | 0.009852 |
def ebi_expression_atlas(accession: str, *, filter_boring: bool = False):
"""Load a dataset from the `EBI Single Cell Expression Atlas <https://www.ebi.ac.uk/gxa/sc/experiments>`__.
Downloaded datasets are saved in directory specified by `sc.settings.datasetdir`.
Params
------
accession
Dataset accession. Like ``E-GEOD-98816`` or ``E-MTAB-4888``. This can
be found in the url on the datasets page. For example:
``https://www.ebi.ac.uk/gxa/sc/experiments/E-GEOD-98816/results/tsne``
filter_boring
Whether boring labels in `.obs` should be automatically removed.
Example
-------
>>> adata = sc.datasets.ebi_expression_atlas("E-MTAB-4888")
"""
experiment_dir = settings.datasetdir / accession
dataset_path = experiment_dir / "{}.h5ad".format(accession)
try:
adata = anndata.read(dataset_path)
if filter_boring:
adata.obs = _filter_boring(adata.obs)
return adata
except OSError:
# Dataset couldn't be read for whatever reason
pass
download_experiment(accession)
print("Downloaded {} to {}".format(accession, experiment_dir.absolute()))
with ZipFile(experiment_dir / "expression_archive.zip", "r") as f:
adata = read_expression_from_archive(f)
obs = pd.read_csv(
experiment_dir / "experimental_design.tsv", sep="\t", index_col=0
)
adata.obs[obs.columns] = obs
adata.write(dataset_path, compression="gzip") # To be kind to disk space
if filter_boring:
adata.obs = _filter_boring(adata.obs)
return adata | 0.001866 |
def grid(self, start=None, stop=None, St=None, **kwargs):
"""Grid-like representation of payoff & profit structure.
Returns
-------
tuple
Tuple of `St` (price at expiry), `payoffs`, `profits`.
"""
lb = 0.75
rb = 1.25
if not any((start, stop, St)) and self.St is None:
St = np.mean([op.K for op in self.options], axis=0)
start = St * lb
stop = St * rb
elif not any((start, stop)):
St = self.St if St is None else St
start = np.max(St) * lb
stop = np.max(St) * rb
St = np.linspace(start, stop, **kwargs)
payoffs = self.payoff(St=St)
profits = self.profit(St=St)
return St, payoffs, profits | 0.002503 |
def get_page_number(self, index):
"""
Given an index, return page label as specified by
catalog['PageLabels']['Nums']
In a PDF, page labels are stored as a list of pairs, like
[starting_index, label_format, starting_index, label_format ...]
For example:
[0, {'S': 'D', 'St': 151}, 4, {'S':'R', 'P':'Foo'}]
So we have to first find the correct label_format based on the closest
starting_index lower than the requested index, then use the
label_format to convert the index to a page label.
Label format meaning:
/S = [
D Decimal arabic numerals
R Uppercase roman numerals
r Lowercase roman numerals
A Uppercase letters (A to Z for the first 26 pages, AA to ZZ
for the next 26, and so on)
a Lowercase letters (a to z for the first 26 pages, aa to zz
for the next 26, and so on)
] (if no /S, just use prefix ...)
/P = text string label
/St = integer start value
"""
# get and cache page ranges
if not hasattr(self, 'page_range_pairs'):
try:
page_ranges = resolve1(self.catalog['PageLabels'])['Nums']
assert len(page_ranges) > 1 and len(page_ranges) % 2 == 0
self.page_range_pairs = list(
reversed(list(zip(page_ranges[::2], page_ranges[1::2]))))
except:
self.page_range_pairs = []
if not self.page_range_pairs:
return ""
# find page range containing index
for starting_index, label_format in self.page_range_pairs:
if starting_index <= index:
break # we found correct label_format
label_format = resolve1(label_format)
page_label = ""
# handle numeric part of label
if 'S' in label_format:
# first find number for this page ...
page_label = index - starting_index
if 'St' in label_format: # alternate start value
page_label += label_format['St']
else:
page_label += 1
# ... then convert to correct format
num_type = label_format['S'].name
# roman (upper or lower)
if num_type.lower() == 'r':
import roman
page_label = roman.toRoman(page_label)
if num_type == 'r':
page_label = page_label.lower()
# letters
elif num_type.lower() == 'a':
# a to z for the first 26 pages, aa to zz for the next 26, and
# so on
letter = chr(page_label % 26 + 65)
letter *= page_label / 26 + 1
if num_type == 'a':
letter = letter.lower()
page_label = letter
# decimal arabic
else: # if num_type == 'D':
page_label = obj_to_string(page_label)
# handle string prefix
if 'P' in label_format:
page_label = smart_unicode_decode(label_format['P']) + page_label
return page_label | 0.001481 |
def devserver(arguments):
"""Run a development server."""
import coil.web
if coil.web.app:
port = int(arguments['--port'])
url = 'http://localhost:{0}/'.format(port)
coil.web.configure_url(url)
coil.web.app.config['DEBUG'] = True
if arguments['--browser']:
webbrowser.open(url)
coil.web.app.logger.info("Coil CMS running @ {0}".format(url))
coil.web.app.run('localhost', port, debug=True)
return 0
else:
print("FATAL: no conf.py found")
return 255 | 0.001795 |
async def enable_analog_reporting(self, pin):
"""
Enables analog reporting. By turning reporting on for a single pin,
:param pin: Analog pin number. For example for A0, the number is 0.
:returns: No return value
"""
command = [PrivateConstants.REPORT_ANALOG + pin,
PrivateConstants.REPORTING_ENABLE]
await self._send_command(command) | 0.004878 |
def alter_configs(self, config_resources):
"""Alter configuration parameters of one or more Kafka resources.
Warning:
This is currently broken for BROKER resources because those must be
sent to that specific broker, versus this always picks the
least-loaded node. See the comment in the source code for details.
We would happily accept a PR fixing this.
:param config_resources: A list of ConfigResource objects.
:return: Appropriate version of AlterConfigsResponse class.
"""
version = self._matching_api_version(AlterConfigsRequest)
if version == 0:
request = AlterConfigsRequest[version](
resources=[self._convert_alter_config_resource_request(config_resource) for config_resource in config_resources]
)
else:
raise NotImplementedError(
"Support for AlterConfigs v{} has not yet been added to KafkaAdminClient."
.format(version))
# TODO the Java client has the note:
# // We must make a separate AlterConfigs request for every BROKER resource we want to alter
# // and send the request to that specific broker. Other resources are grouped together into
# // a single request that may be sent to any broker.
#
# So this is currently broken as it always sends to the least_loaded_node()
return self._send_request_to_node(self._client.least_loaded_node(), request) | 0.00527 |
def predict(self, n_periods=10, exogenous=None,
return_conf_int=False, alpha=0.05):
"""Forecast future values
Generate predictions (forecasts) ``n_periods`` in the future.
Note that if ``exogenous`` variables were used in the model fit, they
will be expected for the predict procedure and will fail otherwise.
Parameters
----------
n_periods : int, optional (default=10)
The number of periods in the future to forecast.
exogenous : array-like, shape=[n_obs, n_vars], optional (default=None)
An optional 2-d array of exogenous variables. If provided, these
variables are used as additional features in the regression
operation. This should not include a constant or trend. Note that
if an ``ARIMA`` is fit on exogenous features, it must be provided
exogenous features for making predictions.
return_conf_int : bool, optional (default=False)
Whether to get the confidence intervals of the forecasts.
alpha : float, optional (default=0.05)
The confidence intervals for the forecasts are (1 - alpha) %
Returns
-------
forecasts : array-like, shape=(n_periods,)
The array of fore-casted values.
conf_int : array-like, shape=(n_periods, 2), optional
The confidence intervals for the forecasts. Only returned if
``return_conf_int`` is True.
"""
check_is_fitted(self, 'arima_res_')
if not isinstance(n_periods, (int, long)):
raise TypeError("n_periods must be an int or a long")
# if we fit with exog, make sure one was passed:
exogenous = self._check_exog(exogenous) # type: np.ndarray
if exogenous is not None and exogenous.shape[0] != n_periods:
raise ValueError('Exogenous array dims (n_rows) != n_periods')
# ARIMA/ARMA predict differently...
if not self._is_seasonal():
# use the results wrapper to predict so it injects its own params
# (also if I was 0, ARMA will not have a forecast method natively)
f, _, conf_int = self.arima_res_.forecast(
steps=n_periods, exog=exogenous, alpha=alpha)
else: # SARIMAX
# Unfortunately, SARIMAX does not really provide a nice way to get
# the confidence intervals out of the box, so we have to perform
# the get_prediction code here and unpack the confidence intervals
# manually.
# f = self.arima_res_.forecast(steps=n_periods, exog=exogenous)
arima = self.arima_res_
end = arima.nobs + n_periods - 1
results = arima.get_prediction(start=arima.nobs,
end=end,
exog=exogenous)
f = results.predicted_mean
conf_int = results.conf_int(alpha=alpha)
if return_conf_int:
# The confidence intervals may be a Pandas frame if it comes from
# SARIMAX & we want Numpy. We will to duck type it so we don't add
# new explicit requirements for the package
return f, check_array(conf_int, force_all_finite=False)
return f | 0.000901 |
def buildhtml(self):
"""Build the HTML page
Create the htmlheader with css / js
Create html page
"""
self.buildcontent()
self.buildhtmlheader()
self.content = self._htmlcontent.decode('utf-8') # need to ensure unicode
self._htmlcontent = self.template_page_highcharts.render(chart=self)
return self._htmlcontent | 0.010444 |
def escape(s):
"""Convert the characters &, <, >, ' and " in string s to HTML-safe
sequences. Use this if you need to display text that might contain
such characters in HTML. Marks return value as markup string.
"""
if hasattr(s, '__html__'):
return s.__html__()
if isinstance(s, six.binary_type):
s = six.text_type(str(s), 'utf8')
elif isinstance(s, six.text_type):
s = s
else:
s = str(s)
return (s
.replace('&', '&')
.replace('>', '>')
.replace('<', '<')
.replace("'", ''')
.replace('"', '"')
) | 0.011129 |
def bind_transient(type_to_bind: hexdi.core.restype, accessor: hexdi.core.clstype):
"""
shortcut for bind_type with PerResolveLifeTimeManager on root container
:param type_to_bind: type that will be resolved by accessor
:param accessor: accessor for resolving object
"""
hexdi.core.get_root_container().bind_type(type_to_bind, accessor, lifetime.PerResolveLifeTimeManager) | 0.007557 |
def _has_next_page(self):
"""Determines whether or not there are more pages with results.
Returns:
bool: Whether the iterator has more pages.
"""
if self.page_number == 0:
return True
if self.max_results is not None:
if self.num_results >= self.max_results:
return False
# Note: intentionally a falsy check instead of a None check. The RPC
# can return an empty string indicating no more pages.
return True if self.next_page_token else False | 0.003571 |
def add_voice_call_api(mock):
'''Add org.ofono.VoiceCallManager API to a mock'''
# also add an emergency number which is not a real one, in case one runs a
# test case against a production ofono :-)
mock.AddProperty('org.ofono.VoiceCallManager', 'EmergencyNumbers', ['911', '13373'])
mock.calls = [] # object paths
mock.AddMethods('org.ofono.VoiceCallManager', [
('GetProperties', '', 'a{sv}', 'ret = self.GetAll("org.ofono.VoiceCallManager")'),
('Transfer', '', '', ''),
('SwapCalls', '', '', ''),
('ReleaseAndAnswer', '', '', ''),
('ReleaseAndSwap', '', '', ''),
('HoldAndAnswer', '', '', ''),
('SendTones', 's', '', ''),
('PrivateChat', 'o', 'ao', NOT_IMPLEMENTED),
('CreateMultiparty', '', 'o', NOT_IMPLEMENTED),
('HangupMultiparty', '', '', NOT_IMPLEMENTED),
('GetCalls', '', 'a(oa{sv})', 'ret = [(c, objects[c].GetAll("org.ofono.VoiceCall")) for c in self.calls]')
]) | 0.004028 |
def set_dialect(dialect):
'''set the MAVLink dialect to work with.
For example, set_dialect("ardupilotmega")
'''
global mavlink, current_dialect
from .generator import mavparse
if 'MAVLINK20' in os.environ:
wire_protocol = mavparse.PROTOCOL_2_0
modname = "pymavlink.dialects.v20." + dialect
elif mavlink is None or mavlink.WIRE_PROTOCOL_VERSION == "1.0" or not 'MAVLINK09' in os.environ:
wire_protocol = mavparse.PROTOCOL_1_0
modname = "pymavlink.dialects.v10." + dialect
else:
wire_protocol = mavparse.PROTOCOL_0_9
modname = "pymavlink.dialects.v09." + dialect
try:
mod = __import__(modname)
except Exception:
# auto-generate the dialect module
from .generator.mavgen import mavgen_python_dialect
mavgen_python_dialect(dialect, wire_protocol)
mod = __import__(modname)
components = modname.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
current_dialect = dialect
mavlink = mod | 0.002865 |
def create_single_weather(df, rename_dc):
"""Create an oemof weather object for the given geometry"""
my_weather = weather.FeedinWeather()
data_height = {}
name = None
# Create a pandas.DataFrame with the time series of the weather data set
weather_df = pd.DataFrame(index=df.time_series.iloc[0].index)
for row in df.iterrows():
key = rename_dc[row[1].type]
weather_df[key] = row[1].time_series
data_height[key] = row[1].height if not np.isnan(row[1].height) else 0
name = row[1].gid
my_weather.data = weather_df
my_weather.timezone = weather_df.index.tz
my_weather.longitude = df.geom_point.iloc[0].x
my_weather.latitude = df.geom_point.iloc[0].y
my_weather.geometry = df.geom_point.iloc[0]
my_weather.data_height = data_height
my_weather.name = name
return my_weather | 0.001161 |
def raise_error(error):
"""Intakes a dict of remote error information and raises a DashiError
"""
exc_type = error.get('exc_type')
if exc_type and exc_type.startswith(ERROR_PREFIX):
exc_type = exc_type[len(ERROR_PREFIX):]
exc_cls = ERROR_TYPE_MAP.get(exc_type, DashiError)
else:
exc_cls = DashiError
raise exc_cls(**error) | 0.002695 |
def eval_nonagg_call(self, exp):
"helper for eval_callx; evaluator for CallX that consume a single value"
# todo: get more concrete about argument counts
args=self.eval(exp.args)
if exp.f=='coalesce':
a,b=args # todo: does coalesce take more than 2 args?
return b if a is None else a
elif exp.f=='unnest': return self.eval(exp.args)[0] # note: run_select does some work in this case too
elif exp.f in ('to_tsquery','to_tsvector'): return set(self.eval(exp.args.children[0]).split())
else: raise NotImplementedError('unk_function',exp.f) | 0.029514 |
def len_on_depth(d, depth):
"""Get the number of nodes on specific depth.
"""
counter = 0
for node in DictTree.v_depth(d, depth-1):
counter += DictTree.length(node)
return counter | 0.008658 |
def create_target_group(Name=None, Protocol=None, Port=None, VpcId=None, HealthCheckProtocol=None, HealthCheckPort=None, HealthCheckPath=None, HealthCheckIntervalSeconds=None, HealthCheckTimeoutSeconds=None, HealthyThresholdCount=None, UnhealthyThresholdCount=None, Matcher=None):
"""
Creates a target group.
To register targets with the target group, use RegisterTargets . To update the health check settings for the target group, use ModifyTargetGroup . To monitor the health of targets in the target group, use DescribeTargetHealth .
To route traffic to the targets in a target group, specify the target group in an action using CreateListener or CreateRule .
To delete a target group, use DeleteTargetGroup .
For more information, see Target Groups for Your Application Load Balancers in the Application Load Balancers Guide .
See also: AWS API Documentation
Examples
This example creates a target group that you can use to route traffic to targets using HTTP on port 80. This target group uses the default health check configuration.
Expected Output:
:example: response = client.create_target_group(
Name='string',
Protocol='HTTP'|'HTTPS',
Port=123,
VpcId='string',
HealthCheckProtocol='HTTP'|'HTTPS',
HealthCheckPort='string',
HealthCheckPath='string',
HealthCheckIntervalSeconds=123,
HealthCheckTimeoutSeconds=123,
HealthyThresholdCount=123,
UnhealthyThresholdCount=123,
Matcher={
'HttpCode': 'string'
}
)
:type Name: string
:param Name: [REQUIRED]
The name of the target group.
This name must be unique per region per account, can have a maximum of 32 characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen.
:type Protocol: string
:param Protocol: [REQUIRED]
The protocol to use for routing traffic to the targets.
:type Port: integer
:param Port: [REQUIRED]
The port on which the targets receive traffic. This port is used unless you specify a port override when registering the target.
:type VpcId: string
:param VpcId: [REQUIRED]
The identifier of the virtual private cloud (VPC).
:type HealthCheckProtocol: string
:param HealthCheckProtocol: The protocol the load balancer uses when performing health checks on targets. The default is the HTTP protocol.
:type HealthCheckPort: string
:param HealthCheckPort: The port the load balancer uses when performing health checks on targets. The default is traffic-port , which indicates the port on which each target receives traffic from the load balancer.
:type HealthCheckPath: string
:param HealthCheckPath: The ping path that is the destination on the targets for health checks. The default is /.
:type HealthCheckIntervalSeconds: integer
:param HealthCheckIntervalSeconds: The approximate amount of time, in seconds, between health checks of an individual target. The default is 30 seconds.
:type HealthCheckTimeoutSeconds: integer
:param HealthCheckTimeoutSeconds: The amount of time, in seconds, during which no response from a target means a failed health check. The default is 5 seconds.
:type HealthyThresholdCount: integer
:param HealthyThresholdCount: The number of consecutive health checks successes required before considering an unhealthy target healthy. The default is 5.
:type UnhealthyThresholdCount: integer
:param UnhealthyThresholdCount: The number of consecutive health check failures required before considering a target unhealthy. The default is 2.
:type Matcher: dict
:param Matcher: The HTTP codes to use when checking for a successful response from a target. The default is 200.
HttpCode (string) -- [REQUIRED]The HTTP codes. You can specify values between 200 and 499. The default value is 200. You can specify multiple values (for example, '200,202') or a range of values (for example, '200-299').
:rtype: dict
:return: {
'TargetGroups': [
{
'TargetGroupArn': 'string',
'TargetGroupName': 'string',
'Protocol': 'HTTP'|'HTTPS',
'Port': 123,
'VpcId': 'string',
'HealthCheckProtocol': 'HTTP'|'HTTPS',
'HealthCheckPort': 'string',
'HealthCheckIntervalSeconds': 123,
'HealthCheckTimeoutSeconds': 123,
'HealthyThresholdCount': 123,
'UnhealthyThresholdCount': 123,
'HealthCheckPath': 'string',
'Matcher': {
'HttpCode': 'string'
},
'LoadBalancerArns': [
'string',
]
},
]
}
:returns:
(string) --
"""
pass | 0.005946 |
def _file_prompt_quiet(f):
"""Decorator to toggle 'file prompt quiet' for methods that perform file operations."""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if not self.prompt_quiet_configured:
if self.auto_file_prompt:
# disable file operation prompts
self.device.send_config_set(["file prompt quiet"])
self.prompt_quiet_changed = True
self.prompt_quiet_configured = True
else:
# check if the command is already in the running-config
cmd = "file prompt quiet"
show_cmd = "show running-config | inc {}".format(cmd)
output = self.device.send_command_expect(show_cmd)
if cmd in output:
self.prompt_quiet_configured = True
else:
msg = (
"on-device file operations require prompts to be disabled. "
"Configure 'file prompt quiet' or set 'auto_file_prompt=True'"
)
raise CommandErrorException(msg)
# call wrapped function
return f(self, *args, **kwargs)
return wrapper | 0.003754 |
def import_from_setting(setting_name, fallback):
"""Return the resolution of an import path stored in a Django setting.
:arg setting_name: The name of the setting holding the import path
:arg fallback: An alternate object to use if the setting is empty or
doesn't exist
Raise ImproperlyConfigured if a path is given that can't be resolved.
"""
path = getattr(settings, setting_name, None)
if path:
try:
return import_string(path)
except ImportError:
raise ImproperlyConfigured('%s: No such path.' % path)
else:
return fallback | 0.001626 |
def hold_exception(method):
"""Decorator for glib callback methods of GLibMainLoop used to store the
exception raised."""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
"""Wrapper for methods decorated with `hold_exception`."""
# pylint: disable=W0703,W0212
try:
return method(self, *args, **kwargs)
except Exception:
if self.exc_info:
raise
if not self._stack:
logger.debug('@hold_exception wrapped method {0!r} called'
' from outside of the main loop'.format(method))
raise
self.exc_info = sys.exc_info()
logger.debug(u"exception in glib main loop callback:",
exc_info = self.exc_info)
# pylint: disable=W0212
main_loop = self._stack[-1]
if main_loop is not None:
main_loop.quit()
return False
return wrapper | 0.004883 |
def form(self):
"""
This attribute points to default form.
If form was not selected manually then select the form
which has the biggest number of input elements.
The form value is just an `lxml.html` form element.
Example::
g.go('some URL')
# Choose form automatically
print g.form
# And now choose form manually
g.choose_form(1)
print g.form
"""
if self._lxml_form is None:
forms = [(idx, len(list(x.fields)))
for idx, x in enumerate(self.tree.forms)]
if forms:
idx = sorted(forms, key=lambda x: x[1], reverse=True)[0][0]
self.choose_form(idx)
else:
raise DataNotFound('Response does not contains any form')
return self._lxml_form | 0.002257 |
async def result_processor(tasks):
"""An async result aggregator that combines all the results
This gets executed in unsync.loop and unsync.thread"""
output = {}
for task in tasks:
num, res = await task
output[num] = res
return output | 0.003663 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.