text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def parse_elem(element):
"""Parse a OSM node XML element.
Args:
element (etree.Element): XML Element to parse
Returns:
Node: Object representing parsed element
"""
ident = int(element.get('id'))
latitude = element.get('lat')
longitude = element.get('lon')
flags = _parse_flags(element)
return Node(ident, latitude, longitude, *flags) | 0.004619 |
def make_naive(value, timezone):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value.replace(tzinfo=None) | 0.003215 |
def predict(self, x):
"""
Predict values for a single data point or an RDD of points
using the model trained.
"""
if isinstance(x, RDD):
return x.map(lambda v: self.predict(v))
x = _convert_to_vector(x)
if self.numClasses == 2:
margin = self.weights.dot(x) + self._intercept
if margin > 0:
prob = 1 / (1 + exp(-margin))
else:
exp_margin = exp(margin)
prob = exp_margin / (1 + exp_margin)
if self._threshold is None:
return prob
else:
return 1 if prob > self._threshold else 0
else:
best_class = 0
max_margin = 0.0
if x.size + 1 == self._dataWithBiasSize:
for i in range(0, self._numClasses - 1):
margin = x.dot(self._weightsMatrix[i][0:x.size]) + \
self._weightsMatrix[i][x.size]
if margin > max_margin:
max_margin = margin
best_class = i + 1
else:
for i in range(0, self._numClasses - 1):
margin = x.dot(self._weightsMatrix[i])
if margin > max_margin:
max_margin = margin
best_class = i + 1
return best_class | 0.00141 |
def compile_graphql_to_gremlin(schema, graphql_string, type_equivalence_hints=None):
"""Compile the GraphQL input using the schema into a Gremlin query and associated metadata.
Args:
schema: GraphQL schema object describing the schema of the graph to be queried
graphql_string: the GraphQL query to compile to Gremlin, as a string
type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union.
Used as a workaround for GraphQL's lack of support for
inheritance across "types" (i.e. non-interfaces), as well as a
workaround for Gremlin's total lack of inheritance-awareness.
The key-value pairs in the dict specify that the "key" type
is equivalent to the "value" type, i.e. that the GraphQL type or
interface in the key is the most-derived common supertype
of every GraphQL type in the "value" GraphQL union.
Recursive expansion of type equivalence hints is not performed,
and only type-level correctness of this argument is enforced.
See README.md for more details on everything this parameter does.
*****
Be very careful with this option, as bad input here will
lead to incorrect output queries being generated.
*****
Returns:
a CompilationResult object
"""
lowering_func = ir_lowering_gremlin.lower_ir
query_emitter_func = emit_gremlin.emit_code_from_ir
return _compile_graphql_generic(
GREMLIN_LANGUAGE, lowering_func, query_emitter_func,
schema, graphql_string, type_equivalence_hints, None) | 0.00874 |
def make_tarball(src_dir):
"""
Make gzipped tarball from a source directory
:param src_dir: source directory
:raises TypeError: if src_dir is not str
"""
if type(src_dir) != str:
raise TypeError('src_dir must be str')
output_file = src_dir + ".tar.gz"
log.msg("Wrapping tarball '{out}' ...".format(out=output_file))
if not _dry_run:
with tarfile.open(output_file, "w:gz") as tar:
tar.add(src_dir, arcname=os.path.basename(src_dir))
return output_file | 0.001927 |
def _create_RSA_private_key(self,
bytes):
"""
Instantiates an RSA key from bytes.
Args:
bytes (byte string): Bytes of RSA private key.
Returns:
private_key
(cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
RSA private key created from key bytes.
"""
try:
private_key = serialization.load_pem_private_key(
bytes,
password=None,
backend=default_backend()
)
return private_key
except Exception:
private_key = serialization.load_der_private_key(
bytes,
password=None,
backend=default_backend()
)
return private_key | 0.003584 |
def parse_doc_str(text=None, is_untabbed=True, is_stripped=True,
tab=None, split_character="::"):
"""
Returns a str of the parsed doc for example
the following would return 'a:A\nb:B' ::
a:A
b:B
:param text: str of the text to parse, by
default uses calling function doc
:param is_untabbed: bool if True will untab the text
:param is_stripped: bool if True will strip the text
:param tab: str of the tab to use when untabbing,
by default it will self determine tab size
:param split_character: str of the character to split the text on
:return: dict
"""
text = text or function_doc(2)
text = text.split(split_character, 1)[-1]
text = text.split(':param')[0].split(':return')[0]
tab = is_untabbed and \
(tab or text[:-1 * len(text.lstrip())].split('\n')[-1]) or ''
text = is_stripped and text.strip() or text
return text.replace('\n%s' % tab, '\n') | 0.004785 |
def initialize_baremetal_switch_interfaces(self, interfaces):
"""Initialize Nexus interfaces and for initial baremetal event.
This get/create port channel number, applies channel-group to
ethernet interface, and initializes trunking on interface.
:param interfaces: Receive a list of interfaces containing:
nexus_host: IP address of Nexus switch
intf_type: String which specifies interface type. example: ethernet
interface: String indicating which interface. example: 1/19
is_native: Whether native vlan must be configured.
ch_grp: May replace port channel to each entry. channel number is
0 if none
"""
if not interfaces:
return
max_ifs = len(interfaces)
starttime = time.time()
learned, nexus_ip_list = self._build_host_list_and_verify_chgrp(
interfaces)
if not nexus_ip_list:
return
if max_ifs > 1:
# update vpc db with learned vpcid or get new one.
if learned:
ch_grp = interfaces[0][-1]
self._configure_learned_port_channel(
nexus_ip_list, ch_grp)
else:
ch_grp = self._get_new_baremetal_portchannel_id(nexus_ip_list)
else:
ch_grp = 0
for i, (nexus_host, intf_type, nexus_port, is_native,
ch_grp_saved) in enumerate(interfaces):
if max_ifs > 1:
if learned:
ch_grp = ch_grp_saved
else:
self._config_new_baremetal_portchannel(
ch_grp, nexus_host, intf_type, nexus_port)
self._replace_interface_ch_grp(interfaces, i, ch_grp)
# init port-channel instead of the provided ethernet
intf_type = 'port-channel'
nexus_port = str(ch_grp)
else:
self._replace_interface_ch_grp(interfaces, i, ch_grp)
trunk_mode_present, vlan_present = (
self._get_interface_switch_trunk_present(
nexus_host, intf_type, nexus_port))
if not vlan_present:
self.send_enable_vlan_on_trunk_int(
nexus_host, "", intf_type, nexus_port, False,
not trunk_mode_present)
elif not trunk_mode_present:
LOG.warning(TRUNK_MODE_NOT_FOUND, nexus_host,
nexus_help.format_interface_name(
intf_type, nexus_port))
self.capture_and_print_timeshot(
starttime, "init_bmif",
switch=nexus_host) | 0.001464 |
def get_objects(self, path, marker=None,
limit=settings.CLOUD_BROWSER_DEFAULT_LIST_LIMIT):
"""Get objects.
**Pseudo-directory Notes**: Rackspace has two approaches to pseudo-
directories within the (really) flat storage object namespace:
1. Dummy directory storage objects. These are real storage objects
of type "application/directory" and must be manually uploaded
by the client.
2. Implied subdirectories using the `path` API query parameter.
Both serve the same purpose, but the latter is much preferred because
there is no independent maintenance of extra dummy objects, and the
`path` approach is always correct (for the existing storage objects).
This package uses the latter `path` approach, but gets into an
ambiguous situation where there is both a dummy directory storage
object and an implied subdirectory. To remedy this situation, we only
show information for the dummy directory object in results if present,
and ignore the implied subdirectory. But, under the hood this means
that our `limit` parameter may end up with less than the desired
number of objects. So, we use the heuristic that if we **do** have
"application/directory" objects, we end up doing an extra query of
double the limit size to ensure we can get up to the limit amount
of objects. This double query approach is inefficient, but as
using dummy objects should now be deprecated, the second query should
only rarely occur.
"""
object_infos, full_query = self._get_object_infos(path, marker, limit)
if full_query and len(object_infos) < limit:
# The underlying query returned a full result set, but we
# truncated it to under limit. Re-run at twice the limit and then
# slice back.
object_infos, _ = self._get_object_infos(path, marker, 2 * limit)
object_infos = object_infos[:limit]
return [self.obj_cls.from_info(self, x) for x in object_infos] | 0.001403 |
def enable(self, msgid, scope="package", line=None, ignore_unknown=False):
"""reenable message of the given id"""
self._set_msg_status(
msgid, enable=True, scope=scope, line=line, ignore_unknown=ignore_unknown
)
self._register_by_id_managed_msg(msgid, line, is_disabled=False) | 0.009375 |
def parse_workflow_call_body_io_map(self, i):
"""
Required.
:param i:
:return:
"""
io_map = OrderedDict()
if isinstance(i, wdl_parser.Terminal):
raise NotImplementedError
elif isinstance(i, wdl_parser.Ast):
raise NotImplementedError
elif isinstance(i, wdl_parser.AstList):
for ast in i:
if ast.name == 'IOMapping':
key = self.parse_declaration_expressn(ast.attr("key"), es='')
value = self.parse_declaration_expressn(ast.attr("value"), es='')
io_map[key] = value
else:
raise NotImplementedError
return io_map | 0.005427 |
def train(self, train_set, valid_set=None, test_set=None, train_size=None):
'''We train over mini-batches and evaluate periodically.'''
iteration = 0
while True:
if not iteration % self.config.test_frequency and test_set:
try:
self.test(iteration, test_set)
except KeyboardInterrupt:
logging.info('interrupted!')
break
if not iteration % self.validation_frequency and valid_set:
try:
if not self.evaluate(iteration, valid_set):
logging.info('patience elapsed, bailing out')
break
except KeyboardInterrupt:
logging.info('interrupted!')
break
train_message = ""
try:
train_message = self.train_func(train_set)
except KeyboardInterrupt:
logging.info('interrupted!')
break
if not iteration % self.config.monitor_frequency:
logging.info('monitor (iter=%i) %s', iteration + 1, train_message)
iteration += 1
if hasattr(self.network, "iteration_callback"):
self.network.iteration_callback()
yield train_message
if valid_set:
self.set_params(self.best_params)
if test_set:
self.test(0, test_set) | 0.002034 |
def iter_all_users(self, number=-1, etag=None, per_page=None):
"""Iterate over every user in the order they signed up for GitHub.
:param int number: (optional), number of users to return. Default: -1,
returns all of them
:param str etag: (optional), ETag from a previous request to the same
endpoint
:param int per_page: (optional), number of users to list per request
:returns: generator of :class:`User <github3.users.User>`
"""
url = self._build_url('users')
return self._iter(int(number), url, User,
params={'per_page': per_page}, etag=etag) | 0.00303 |
def solve_series(self, x0, params, varied_data, varied_idx,
internal_x0=None, solver=None, propagate=True, **kwargs):
""" Solve system for a set of parameters in which one is varied
Parameters
----------
x0 : array_like
Guess (subject to ``self.post_processors``)
params : array_like
Parameter values
vaired_data : array_like
Numerical values of the varied parameter.
varied_idx : int or str
Index of the varied parameter (indexing starts at 0).
If ``self.par_by_name`` this should be the name (str) of the varied
parameter.
internal_x0 : array_like (default: None)
Guess (*not* subject to ``self.post_processors``).
Overrides ``x0`` when given.
solver : str or callback
See :meth:`solve`.
propagate : bool (default: True)
Use last successful solution as ``x0`` in consecutive solves.
\\*\\*kwargs :
Keyword arguments pass along to :meth:`solve`.
Returns
-------
xout : array
Of shape ``(varied_data.size, x0.size)``.
info_dicts : list of dictionaries
Dictionaries each containing keys such as containing 'success', 'nfev', 'njev' etc.
"""
if self.x_by_name and isinstance(x0, dict):
x0 = [x0[k] for k in self.names]
if self.par_by_name:
if isinstance(params, dict):
params = [params[k] for k in self.param_names]
if isinstance(varied_idx, str):
varied_idx = self.param_names.index(varied_idx)
new_params = np.atleast_1d(np.array(params, dtype=np.float64))
xout = np.empty((len(varied_data), len(x0)))
self.internal_xout = np.empty_like(xout)
self.internal_params_out = np.empty((len(varied_data),
len(new_params)))
info_dicts = []
new_x0 = np.array(x0, dtype=np.float64) # copy
conds = kwargs.get('initial_conditions', None) # see ConditionalNeqSys
for idx, value in enumerate(varied_data):
try:
new_params[varied_idx] = value
except TypeError:
new_params = value # e.g. type(new_params) == int
if conds is not None:
kwargs['initial_conditions'] = conds
x, info_dict = self.solve(new_x0, new_params, internal_x0, solver,
**kwargs)
if propagate:
if info_dict['success']:
try:
# See ChainedNeqSys.solve
new_x0 = info_dict['x_vecs'][0]
internal_x0 = info_dict['internal_x_vecs'][0]
conds = info_dict['intermediate_info'][0].get(
'conditions', None)
except:
new_x0 = x
internal_x0 = None
conds = info_dict.get('conditions', None)
xout[idx, :] = x
self.internal_xout[idx, :] = self.internal_x
self.internal_params_out[idx, :] = self.internal_params
info_dicts.append(info_dict)
return xout, info_dicts | 0.001489 |
def reduce_to_cycles(self):
"""
Iteratively eliminate leafs to reduce the set of objects to only those
that build cycles. Return the reduced graph. If there are no cycles,
None is returned.
"""
if not self._reduced:
reduced = copy(self)
reduced.objects = self.objects[:]
reduced.metadata = []
reduced.edges = []
self.num_in_cycles = reduced._reduce_to_cycles()
reduced.num_in_cycles = self.num_in_cycles
if self.num_in_cycles:
reduced._get_edges()
reduced._annotate_objects()
for meta in reduced.metadata:
meta.cycle = True
else:
reduced = None
self._reduced = reduced
return self._reduced | 0.002389 |
def get_all_connected_interfaces():
"""! @brief Returns all the connected devices with a CMSIS-DAPv2 interface."""
# find all cmsis-dap devices
try:
all_devices = usb.core.find(find_all=True, custom_match=HasCmsisDapv2Interface())
except usb.core.NoBackendError:
common.show_no_libusb_warning()
return []
# iterate on all devices found
boards = []
for board in all_devices:
new_board = PyUSBv2()
new_board.vid = board.idVendor
new_board.pid = board.idProduct
new_board.product_name = board.product
new_board.vendor_name = board.manufacturer
new_board.serial_number = board.serial_number
boards.append(new_board)
return boards | 0.004932 |
def parse_config(self, config_source):
""" Parses a landslide configuration file and returns a normalized
python dict.
"""
self.log(u"Config %s" % config_source)
try:
raw_config = configparser.RawConfigParser()
raw_config.read(config_source)
except Exception as e:
raise RuntimeError(u"Invalid configuration file: %s" % e)
config = {}
config['source'] = raw_config.get('landslide', 'source')\
.replace('\r', '').split('\n')
if raw_config.has_option('landslide', 'theme'):
config['theme'] = raw_config.get('landslide', 'theme')
self.log(u"Using configured theme %s" % config['theme'])
if raw_config.has_option('landslide', 'destination'):
config['destination'] = raw_config.get('landslide', 'destination')
if raw_config.has_option('landslide', 'linenos'):
config['linenos'] = raw_config.get('landslide', 'linenos')
for boolopt in ('embed', 'relative', 'copy_theme'):
if raw_config.has_option('landslide', boolopt):
config[boolopt] = raw_config.getboolean('landslide', boolopt)
if raw_config.has_option('landslide', 'extensions'):
config['extensions'] = ",".join(raw_config.get('landslide', 'extensions')\
.replace('\r', '').split('\n'))
if raw_config.has_option('landslide', 'css'):
config['css'] = raw_config.get('landslide', 'css')\
.replace('\r', '').split('\n')
if raw_config.has_option('landslide', 'js'):
config['js'] = raw_config.get('landslide', 'js')\
.replace('\r', '').split('\n')
return config | 0.002859 |
def _determine_nTrackIterations(self,nTrackIterations):
"""Determine a good value for nTrackIterations based on the misalignment between stream and orbit; just based on some rough experience for now"""
if not nTrackIterations is None:
self.nTrackIterations= nTrackIterations
return None
if numpy.fabs(self.misalignment(quantity=False)) < 1./180.*numpy.pi:
self.nTrackIterations= 0
elif numpy.fabs(self.misalignment(quantity=False)) >= 1./180.*numpy.pi \
and numpy.fabs(self.misalignment(quantity=False)) < 3./180.*numpy.pi:
self.nTrackIterations= 1
elif numpy.fabs(self.misalignment(quantity=False)) >= 3./180.*numpy.pi:
self.nTrackIterations= 2
return None | 0.014085 |
def parse_xml(sentence, tab="\t", id=""):
""" Returns the given Sentence object as an XML-string (plain bytestring, UTF-8 encoded).
The tab delimiter is used as indendation for nested elements.
The id can be used as a unique identifier per sentence for chunk id's and anchors.
For example: "I eat pizza with a fork." =>
<sentence token="word, part-of-speech, chunk, preposition, relation, anchor, lemma" language="en">
<chunk type="NP" relation="SBJ" of="1">
<word type="PRP" lemma="i">I</word>
</chunk>
<chunk type="VP" relation="VP" id="1" anchor="A1">
<word type="VBP" lemma="eat">eat</word>
</chunk>
<chunk type="NP" relation="OBJ" of="1">
<word type="NN" lemma="pizza">pizza</word>
</chunk>
<chunk type="PNP" of="A1">
<chunk type="PP">
<word type="IN" lemma="with">with</word>
</chunk>
<chunk type="NP">
<word type="DT" lemma="a">a</word>
<word type="NN" lemma="fork">fork</word>
</chunk>
</chunk>
<chink>
<word type="." lemma=".">.</word>
</chink>
</sentence>
"""
uid = lambda *parts: "".join([str(id), _UID_SEPARATOR ]+[str(x) for x in parts]).lstrip(_UID_SEPARATOR)
push = lambda indent: indent+tab # push() increases the indentation.
pop = lambda indent: indent[:-len(tab)] # pop() decreases the indentation.
indent = tab
xml = []
# Start the sentence element:
# <sentence token="word, part-of-speech, chunk, preposition, relation, anchor, lemma">
xml.append('<%s%s %s="%s" %s="%s">' % (
XML_SENTENCE,
XML_ID and " %s=\"%s\"" % (XML_ID, str(id)) or "",
XML_TOKEN, ", ".join(sentence.token),
XML_LANGUAGE, sentence.language
))
# Collect chunks that are PNP anchors and assign id.
anchors = {}
for chunk in sentence.chunks:
if chunk.attachments:
anchors[chunk.start] = len(anchors) + 1
# Traverse all words in the sentence.
for word in sentence.words:
chunk = word.chunk
pnp = word.chunk and word.chunk.pnp or None
# Start the PNP element if the chunk is the first chunk in PNP:
# <chunk type="PNP" of="A1">
if pnp and pnp.start == chunk.start:
a = pnp.anchor and ' %s="%s"' % (XML_OF, uid("A", anchors.get(pnp.anchor.start, ""))) or ""
xml.append(indent + '<%s %s="PNP"%s>' % (XML_CHUNK, XML_TYPE, a))
indent = push(indent)
# Start the chunk element if the word is the first word in the chunk:
# <chunk type="VP" relation="VP" id="1" anchor="A1">
if chunk and chunk.start == word.index:
if chunk.relations:
# Create the shortest possible attribute values for multiple relations,
# e.g., [(1,"OBJ"),(2,"OBJ")]) => relation="OBJ" id="1|2"
r1 = unzip(0, chunk.relations) # Relation id's.
r2 = unzip(1, chunk.relations) # Relation roles.
r1 = [x is None and "-" or uid(x) for x in r1]
r2 = [x is None and "-" or x for x in r2]
r1 = not len(unique(r1)) == 1 and "|".join(r1) or (r1+[None])[0]
r2 = not len(unique(r2)) == 1 and "|".join(r2) or (r2+[None])[0]
xml.append(indent + '<%s%s%s%s%s%s>' % (
XML_CHUNK,
chunk.type and ' %s="%s"' % (XML_TYPE, chunk.type) or "",
chunk.relations and chunk.role != None and ' %s="%s"' % (XML_RELATION, r2) or "",
chunk.relation and chunk.type == "VP" and ' %s="%s"' % (XML_ID, uid(chunk.relation)) or "",
chunk.relation and chunk.type != "VP" and ' %s="%s"' % (XML_OF, r1) or "",
chunk.attachments and ' %s="%s"' % (XML_ANCHOR, uid("A",anchors[chunk.start])) or ""
))
indent = push(indent)
# Words outside of a chunk are wrapped in a <chink> tag:
# <chink>
if not chunk:
xml.append(indent + '<%s>' % XML_CHINK)
indent = push(indent)
# Add the word element:
# <word type="VBP" lemma="eat">eat</word>
xml.append(indent + '<%s%s%s%s>%s</%s>' % (
XML_WORD,
word.type and ' %s="%s"' % (XML_TYPE, xml_encode(word.type)) or '',
word.lemma and ' %s="%s"' % (XML_LEMMA, xml_encode(word.lemma)) or '',
(" "+" ".join(['%s="%s"' % (k,v) for k,v in word.custom_tags.items() if v != None])).rstrip(),
xml_encode(unicode(word)),
XML_WORD
))
if not chunk:
# Close the <chink> element if outside of a chunk.
indent = pop(indent); xml.append(indent + "</%s>" % XML_CHINK)
if chunk and chunk.stop-1 == word.index:
# Close the <chunk> element if this is the last word in the chunk.
indent = pop(indent); xml.append(indent + "</%s>" % XML_CHUNK)
if pnp and pnp.stop-1 == word.index:
# Close the PNP element if this is the last word in the PNP.
indent = pop(indent); xml.append(indent + "</%s>" % XML_CHUNK)
xml.append("</%s>" % XML_SENTENCE)
# Return as a plain str.
return "\n".join(xml).encode("utf-8") | 0.007165 |
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
data = '\n'.join(map(self.make_relative, self.paths))
if data:
log.debug("Saving %s", self.filename)
data = (
"import sys; sys.__plen = len(sys.path)\n"
"%s\n"
"import sys; new=sys.path[sys.__plen:];"
" del sys.path[sys.__plen:];"
" p=getattr(sys,'__egginsert',0); sys.path[p:p]=new;"
" sys.__egginsert = p+len(new)\n"
) % data
if os.path.islink(self.filename):
os.unlink(self.filename)
f = open(self.filename, 'wt')
f.write(data)
f.close()
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False | 0.002139 |
def set_slats_level(self, slatsLevel=0.0, shutterLevel=None):
""" sets the slats and shutter level
Args:
slatsLevel(float): the new level of the slats. 0.0 = open, 1.0 = closed,
shutterLevel(float): the new level of the shutter. 0.0 = open, 1.0 = closed, None = use the current value
Returns:
the result of the _restCall
"""
if shutterLevel is None:
shutterLevel = self.shutterLevel
data = {
"channelIndex": 1,
"deviceId": self.id,
"slatsLevel": slatsLevel,
"shutterLevel": shutterLevel,
}
return self._restCall("device/control/setSlatsLevel", json.dumps(data)) | 0.00554 |
def glossary(
self,
term,
definition):
"""*genarate a MMD glossary*
**Key Arguments:**
- ``term`` -- the term to add as a glossary item
- ``definition`` -- the definition of the glossary term
**Return:**
- ``glossary`` -- the glossary text
**Usage:**
To genarate a glossary item:
.. code-block:: python
text = \"\"\"Pomaceous fruit of plants of the genus Malus in the family Rosaceae.
Also the makers of really great products.\"\"\"
definition = md.glossary("Apple", text)
print definition
# OUTPUT:
# [^apple]
#
# [^apple]: Apple
# Pomaceous fruit of plants of the genus Malus in the family Rosaceae.
# Also the makers of really great products.
"""
term = term.strip()
term = term.lower()
title = term.title()
definition = definition.strip()
regex = re.compile(r'\n(\S)')
definition = regex.sub("\n \g<1>", definition)
return "[^%(term)s]\n\n[^%(term)s]: %(title)s\n %(definition)s" % locals() | 0.005521 |
def on_connection_open(self, connection):
"""This method is called by pika once the connection to RabbitMQ has
been established.
:type connection: pika.TornadoConnection
"""
LOGGER.debug('Connection opened')
connection.add_on_connection_blocked_callback(
self.on_connection_blocked)
connection.add_on_connection_unblocked_callback(
self.on_connection_unblocked)
connection.add_backpressure_callback(self.on_back_pressure_detected)
self.channel = self._open_channel() | 0.003546 |
def trim_data_back_to(monthToKeep):
"""
This method will remove data from the summary text file and the dictionary file for tests that occurs before
the number of months specified by monthToKeep.
:param monthToKeep:
:return:
"""
global g_failed_tests_info_dict
current_time = time.time() # unit in seconds
oldest_time_allowed = current_time - monthToKeep*30*24*3600 # in seconds
clean_up_failed_test_dict(oldest_time_allowed)
clean_up_summary_text(oldest_time_allowed) | 0.005769 |
def profiler(profiles, projectpath,parameters,serviceid,servicename,serviceurl,printdebug=None):
"""Given input files and parameters, produce metadata for outputfiles. Returns a list of matched profiles (empty if none match), and a program."""
parameters = sanitizeparameters(parameters)
matched = []
program = Program(projectpath)
for profile in profiles:
if profile.match(projectpath, parameters)[0]:
matched.append(profile)
program.update( profile.generate(projectpath,parameters,serviceid,servicename,serviceurl) )
return matched, program | 0.024958 |
def all_folders(
path_name, keyword='', has_date=False, date_fmt=DATE_FMT
) -> list:
"""
Search all folders with criteria
Returned list will be sorted by last modified
Args:
path_name: full path name
keyword: keyword to search
has_date: whether has date in file name (default False)
date_fmt: date format to check for has_date parameter
Returns:
list: all folder names fulfilled criteria
"""
if not os.path.exists(path=path_name): return []
path_name = path_name.replace('\\', '/')
if keyword:
folders = sort_by_modified([
f.replace('\\', '/') for f in glob.iglob(f'{path_name}/*{keyword}*')
if os.path.isdir(f) and (f.replace('\\', '/').split('/')[-1][0] != '~')
])
else:
folders = sort_by_modified([
f'{path_name}/{f}' for f in os.listdir(path=path_name)
if os.path.isdir(f'{path_name}/{f}') and (f[0] != '~')
])
if has_date:
folders = filter_by_dates(folders, date_fmt=date_fmt)
return folders | 0.003683 |
def _run__http(self, action, replace):
"""More complex HTTP query."""
query = action['query']
# self._debug = True
url = '{type}://{host}{path}'.format(path=query['path'], **action)
content = None
method = query.get('method', "get").lower()
self.debug("{} {} url={}\n", action['type'], method, url)
if method == "post":
content = query['content']
headers = query.get('headers', {})
if replace and action.get('template'):
self.rfxcfg.macro_expand(url, replace)
if content:
if isinstance(content, dict):
for key, value in content.items():
content[key] = self.rfxcfg.macro_expand(value, replace)
else:
content = self.rfxcfg.macro_expand(content, replace)
newhdrs = dict()
for key, value in headers.items():
newhdrs[key.lower()] = self.rfxcfg.macro_expand(value, replace)
headers = newhdrs
self.debug("{} headers={}\n", action['type'], headers)
self.debug("{} content={}\n", action['type'], content)
if content and isinstance(content, dict):
content = json.dumps(content)
self.logf("Action {name} {type}\n", **action)
result = getattr(requests, method)(url, headers=headers, data=content, timeout=action.get('timeout', 5))
expect = action.get('expect', {})
expected_codes = expect.get("response-codes", (200, 201, 202, 204))
self.debug("{} expect codes={}\n", action['type'], expected_codes)
self.debug("{} status={} content={}\n", action['type'], result.status_code, result.text)
if result.status_code not in expected_codes:
self.die("Unable to make {} call, unexpected result ({})",
action['type'], result.status_code)
if 'content' in expect:
self.debug("{} expect content={}\n", action['type'], expect['content'])
if expect['content'] not in result.text:
self.die("{} call to {} failed\nExpected: {}\nReceived:\n{}",
action['type'], url, expect['content'], result.text)
if 'regex' in expect:
self.debug("{} expect regex={}\n", action['type'], expect['regex'])
if not re.search(expect['regex'], result.text):
self.die("{} call to {} failed\nRegex: {}\nDid not match:\n{}",
action['type'], url, expect['regex'], result.text)
self.log(result.text, level=common.log_msg)
self.logf("Success, status={}\n", result.status_code, level=common.log_good)
return True | 0.002204 |
def _try_join_cancelled_thread(thread):
"""Join a thread, but if the thread doesn't terminate for some time, ignore it
instead of waiting infinitely."""
thread.join(10)
if thread.is_alive():
logging.warning("Thread %s did not terminate within grace period after cancellation",
thread.name) | 0.008902 |
def get_request_data(self, var_name, full_data=False):
"""
:param var_name:
:param full_data: If you want `.to_array()` with this data, ready to be sent.
:return: A tuple of `to_array()` dict and the files (:py:func:`InputFile.get_request_files()`).
Files can be None, if no file was given, but an url or existing `file_id`.
If `self.media` is an `InputFile` however,
the first tuple element (either the string, or the dict's `['media']` if `full_data=True`),
will be set to `attach://{var_name}_media` automatically.
If `self.thumb` is an `InputFile` however, the first tuple element's `['thumb']`, will be set to `attach://{var_name}_thumb` automatically.
"""
if not full_data:
raise ArithmeticError('we have a thumbnail, please use `full_data=True`.')
# end if
file = {}
data, file_to_add = super(InputMediaWithThumb, self).get_request_data(var_name, full_data=True)
if file_to_add:
file.update(file_to_add)
# end if
data['thumb'], file_to_add = self.get_inputfile_data(self.thumb, var_name, suffix='_thumb')
if data['thumb'] is None:
del data['thumb'] # having `'thumb': null` in the json produces errors.
# end if
if file_to_add:
file.update(file_to_add)
# end if
return data, (file or None) | 0.007498 |
def push_chunk(self, x, timestamp=0.0, pushthrough=True):
"""Push a list of samples into the outlet.
samples -- A list of samples, either as a list of lists or a list of
multiplexed values.
timestamp -- Optionally the capture time of the most recent sample, in
agreement with local_clock(); if omitted, the current
time is used. The time stamps of other samples are
automatically derived according to the sampling rate of
the stream. (default 0.0)
pushthrough Whether to push the chunk through to the receivers instead
of buffering it with subsequent samples. Note that the
chunk_size, if specified at outlet construction, takes
precedence over the pushthrough flag. (default True)
"""
try:
n_values = self.channel_count * len(x)
data_buff = (self.value_type * n_values).from_buffer(x)
handle_error(self.do_push_chunk(self.obj, data_buff,
c_long(n_values),
c_double(timestamp),
c_int(pushthrough)))
except TypeError:
if len(x):
if type(x[0]) is list:
x = [v for sample in x for v in sample]
if self.channel_format == cf_string:
x = [v.encode('utf-8') for v in x]
if len(x) % self.channel_count == 0:
constructor = self.value_type*len(x)
# noinspection PyCallingNonCallable
handle_error(self.do_push_chunk(self.obj, constructor(*x),
c_long(len(x)),
c_double(timestamp),
c_int(pushthrough)))
else:
raise ValueError("each sample must have the same number of "
"channels.") | 0.005102 |
def delete_file(self, fid):
"""
Delete file from WeedFS
:param string fid: File ID
"""
url = self.get_file_url(fid)
return self.conn.delete_data(url) | 0.010101 |
def type_to_string(t):
"""Get string representation of memory type"""
if t == MemoryElement.TYPE_I2C:
return 'I2C'
if t == MemoryElement.TYPE_1W:
return '1-wire'
if t == MemoryElement.TYPE_DRIVER_LED:
return 'LED driver'
if t == MemoryElement.TYPE_LOCO:
return 'Loco Positioning'
if t == MemoryElement.TYPE_TRAJ:
return 'Trajectory'
if t == MemoryElement.TYPE_LOCO2:
return 'Loco Positioning 2'
return 'Unknown' | 0.003656 |
def archive(folder, dry_run=False):
"Move an active project to the archive."
# error handling on archive_dir already done in main()
for f in folder:
if not os.path.exists(f):
bail('folder does not exist: ' + f)
_archive_safe(folder, PROJ_ARCHIVE, dry_run=dry_run) | 0.003322 |
def _eval_kwargs(self):
"""Evaluates any parameterized methods in the kwargs"""
evaled_kwargs = {}
for k, v in self.p.kwargs.items():
if util.is_param_method(v):
v = v()
evaled_kwargs[k] = v
return evaled_kwargs | 0.007067 |
def send_contact(chat_id, phone_number, first_name,
last_name=None, reply_to_message_id=None, reply_markup=None, disable_notification=False,
**kwargs):
"""
Use this method to send phone contacts.
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param phone_number: Contact's phone number.
:param first_name: Contact's first name.
:param last_name: Contact's last name.
:param reply_to_message_id: If the message is a reply, ID of the original message
:param reply_markup: Additional interface options. A JSON-serialized object for a
custom reply keyboard, instructions to hide keyboard or to
force a reply from the user.
:param disable_notification: Sends the message silently. iOS users will not receive a notification, Android users
will receive a notification with no sound. Other apps coming soon.
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:type phone_number: str
:type first_name: str
:type last_name: str
:type reply_to_message_id: int
:type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply
:type disable_notification: bool
:returns: On success, the sent Message is returned.
:rtype: TelegramBotRPCRequest
"""
# required args
params = dict(
chat_id=chat_id,
phone_number=phone_number,
first_name=first_name,
)
# optional args
params.update(
_clean_params(
last_name=last_name,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup,
disable_notification=disable_notification,
)
)
return TelegramBotRPCRequest('sendContact', params=params, on_result=Message.from_result, **kwargs) | 0.004566 |
def scale_0to1(image_in,
exclude_outliers_below=False,
exclude_outliers_above=False):
"""Scale the two images to [0, 1] based on min/max from both.
Parameters
-----------
image_in : ndarray
Input image
exclude_outliers_{below,above} : float
Lower/upper limit, a value between 0 and 100.
Returns
-------
scaled_image : ndarray
clipped and/or scaled image
"""
min_value = image_in.min()
max_value = image_in.max()
# making a copy to ensure no side-effects
image = image_in.copy()
if exclude_outliers_below:
perctl = float(exclude_outliers_below)
image[image < np.percentile(image, perctl)] = min_value
if exclude_outliers_above:
perctl = float(exclude_outliers_above)
image[image > np.percentile(image, 100.0 - perctl)] = max_value
image = (image - min_value) / (max_value - min_value)
return image | 0.001043 |
def _request_new_chunk(self):
"""
Called to request a new chunk of data to be read from the Crazyflie
"""
# Figure out the length of the next request
new_len = self._bytes_left
if new_len > _ReadRequest.MAX_DATA_LENGTH:
new_len = _ReadRequest.MAX_DATA_LENGTH
logger.debug('Requesting new chunk of {}bytes at 0x{:X}'.format(
new_len, self._current_addr))
# Request the data for the next address
pk = CRTPPacket()
pk.set_header(CRTPPort.MEM, CHAN_READ)
pk.data = struct.pack('<BIB', self.mem.id, self._current_addr, new_len)
reply = struct.unpack('<BBBBB', pk.data[:-1])
self.cf.send_packet(pk, expected_reply=reply, timeout=1) | 0.002649 |
def add_exac_info(genes, alias_genes, exac_lines):
"""Add information from the exac genes
Currently we only add the pLi score on gene level
The exac resource only use HGNC symbol to identify genes so we need
our alias mapping.
Args:
genes(dict): Dictionary with all genes
alias_genes(dict): Genes mapped to all aliases
ensembl_lines(iteable): Iteable with raw ensembl info
"""
LOG.info("Add exac pli scores")
for exac_gene in parse_exac_genes(exac_lines):
hgnc_symbol = exac_gene['hgnc_symbol'].upper()
pli_score = exac_gene['pli_score']
for hgnc_id in get_correct_ids(hgnc_symbol, alias_genes):
genes[hgnc_id]['pli_score'] = pli_score | 0.007895 |
def create_from_hdu(cls, hdu, ebins=None):
""" Creates an HPX object from a FITS header.
hdu : The FITS hdu
ebins : Energy bin edges [optional]
"""
convname = HPX.identify_HPX_convention(hdu.header)
conv = HPX_FITS_CONVENTIONS[convname]
try:
pixels = hdu.data[conv.idxstring]
except KeyError:
pixels = None
return cls.create_from_header(hdu.header, ebins, pixels) | 0.00431 |
def add_command(self, handler, name=None):
"""Add a subcommand `name` which invokes `handler`.
"""
if name is None:
name = docstring_to_subcommand(handler.__doc__)
# TODO: Prevent overwriting 'help'?
self._commands[name] = handler | 0.007067 |
def mine_urls(urls, params=None, callback=None, **kwargs):
"""Concurrently retrieve URLs.
:param urls: A set of URLs to concurrently retrieve.
:type urls: iterable
:param params: (optional) The URL parameters to send with each
request.
:type params: dict
:param callback: (optional) A callback function to be called on each
:py:class:`aiohttp.client.ClientResponse`.
:param \*\*kwargs: (optional) Arguments that ``get_miner`` takes.
"""
miner = Miner(**kwargs)
try:
miner.loop.add_signal_handler(signal.SIGINT, miner.close)
miner.loop.run_until_complete(miner.mine_urls(urls, params, callback))
except RuntimeError:
pass | 0.004098 |
def _generate_examples(self, archive_paths, objects_getter, bboxes_getter,
prefixes=None):
"""Yields examples."""
trainable_classes = set(
self.info.features['objects_trainable']['label'].names)
for i, archive_path in enumerate(archive_paths):
prefix = prefixes[i] if prefixes else None
objects = objects_getter(prefix)
bboxes = bboxes_getter(prefix)
logging.info('Opening archive %s ...', archive_path)
archive = tfds.download.iter_archive(
archive_path, tfds.download.ExtractMethod.TAR_STREAM)
for fpath, fobj in archive:
fname = os.path.basename(fpath)
image_id = int(os.path.splitext(fname)[0], 16)
image_objects = [obj._asdict() for obj in objects.get(image_id, [])]
image_bboxes = [bbox._asdict() for bbox in bboxes.get(image_id, [])]
image_objects_trainable = [
obj for obj in image_objects if obj['label'] in trainable_classes
]
yield {
'image': _resize_image_if_necessary(
fobj, target_pixels=self.builder_config.target_pixels),
'image/filename': fname,
'objects': image_objects,
'objects_trainable': image_objects_trainable,
'bobjects': image_bboxes,
} | 0.00613 |
def set_current_canvas(canvas):
""" Make a canvas active. Used primarily by the canvas itself.
"""
# Notify glir
canvas.context._do_CURRENT_command = True
# Try to be quick
if canvasses and canvasses[-1]() is canvas:
return
# Make this the current
cc = [c() for c in canvasses if c() is not None]
while canvas in cc:
cc.remove(canvas)
cc.append(canvas)
canvasses[:] = [weakref.ref(c) for c in cc] | 0.004376 |
def _validate_alias_name(alias_name):
"""
Check if the alias name is valid.
Args:
alias_name: The name of the alias to validate.
"""
if not alias_name:
raise CLIError(EMPTY_ALIAS_ERROR)
if not re.match('^[a-zA-Z]', alias_name):
raise CLIError(INVALID_STARTING_CHAR_ERROR.format(alias_name[0])) | 0.002915 |
def _iter_convert_to_object(self, iterable):
"""Iterable yields tuples of (binsha, mode, name), which will be converted
to the respective object representation"""
for binsha, mode, name in iterable:
path = join_path(self.path, name)
try:
yield self._map_id_to_type[mode >> 12](self.repo, binsha, mode, path)
except KeyError:
raise TypeError("Unknown mode %o found in tree data for path '%s'" % (mode, path)) | 0.01002 |
def ParseID3v1(data):
"""Parse an ID3v1 tag, returning a list of ID3v2.4 frames."""
try:
data = data[data.index(b'TAG'):]
except ValueError:
return None
if 128 < len(data) or len(data) < 124:
return None
# Issue #69 - Previous versions of Mutagen, when encountering
# out-of-spec TDRC and TYER frames of less than four characters,
# wrote only the characters available - e.g. "1" or "" - into the
# year field. To parse those, reduce the size of the year field.
# Amazingly, "0s" works as a struct format string.
unpack_fmt = "3s30s30s30s%ds29sBB" % (len(data) - 124)
try:
tag, title, artist, album, year, comment, track, genre = unpack(
unpack_fmt, data)
except StructError:
return None
if tag != b"TAG":
return None
def fix(data):
return data.split(b'\x00')[0].strip().decode('latin1')
title, artist, album, year, comment = map(
fix, [title, artist, album, year, comment])
frames = {}
if title:
frames['TIT2'] = TIT2(encoding=0, text=title)
if artist:
frames['TPE1'] = TPE1(encoding=0, text=[artist])
if album:
frames['TALB'] = TALB(encoding=0, text=album)
if year:
frames['TDRC'] = TDRC(encoding=0, text=year)
if comment:
frames['COMM'] = COMM(encoding=0, lang='eng', desc="ID3v1 Comment",
text=comment)
# Don't read a track number if it looks like the comment was
# padded with spaces instead of nulls (thanks, WinAmp).
if track and ((track != 32) or (data[-3] == b'\x00'[0])):
frames['TRCK'] = TRCK(encoding=0, text=str(track))
if genre != 255:
frames['TCON'] = TCON(encoding=0, text=str(genre))
return frames | 0.000559 |
def transformer_tall_train_uniencdec():
"""Train CNN/DM with a unidirectional encoder and decoder."""
hparams = transformer_tall()
hparams.max_input_seq_length = 750
hparams.max_target_seq_length = 100
hparams.optimizer = "true_adam"
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.learning_rate_decay_steps = 150000
hparams.learning_rate_constant = 2e-4
hparams.unidirectional_encoder = True
return hparams | 0.024123 |
def _from_dict(cls, _dict):
"""Initialize a RuntimeEntity object from a json dictionary."""
args = {}
if 'entity' in _dict:
args['entity'] = _dict.get('entity')
else:
raise ValueError(
'Required property \'entity\' not present in RuntimeEntity JSON'
)
if 'location' in _dict:
args['location'] = _dict.get('location')
else:
raise ValueError(
'Required property \'location\' not present in RuntimeEntity JSON'
)
if 'value' in _dict:
args['value'] = _dict.get('value')
else:
raise ValueError(
'Required property \'value\' not present in RuntimeEntity JSON')
if 'confidence' in _dict:
args['confidence'] = _dict.get('confidence')
if 'metadata' in _dict:
args['metadata'] = _dict.get('metadata')
if 'groups' in _dict:
args['groups'] = [
CaptureGroup._from_dict(x) for x in (_dict.get('groups'))
]
return cls(**args) | 0.004488 |
def start(self):
"""Start process."""
logger.debug(str(' '.join(self._cmd_list)))
if not self._fired:
self._partial_ouput = None
self._process.start(self._cmd_list[0], self._cmd_list[1:])
self._timer.start()
else:
raise CondaProcessWorker('A Conda ProcessWorker can only run once '
'per method call.') | 0.004773 |
def to_pickle(graph: BELGraph, file: Union[str, BinaryIO], protocol: int = HIGHEST_PROTOCOL) -> None:
"""Write this graph to a pickle object with :func:`networkx.write_gpickle`.
Note that the pickle module has some incompatibilities between Python 2 and 3. To export a universally importable
pickle, choose 0, 1, or 2.
:param graph: A BEL graph
:param file: A file or filename to write to
:param protocol: Pickling protocol to use. Defaults to ``HIGHEST_PROTOCOL``.
.. seealso:: https://docs.python.org/3.6/library/pickle.html#data-stream-format
"""
raise_for_not_bel(graph)
nx.write_gpickle(graph, file, protocol=protocol) | 0.007508 |
def umi_transform(data):
"""
transform each read by identifying the barcode and UMI for each read
and putting the information in the read name
"""
fq1 = data["files"][0]
umi_dir = os.path.join(dd.get_work_dir(data), "umis")
safe_makedir(umi_dir)
transform = dd.get_umi_type(data)
if not transform:
logger.info("No UMI transform specified, assuming pre-transformed data.")
if is_transformed(fq1):
logger.info("%s detected as pre-transformed, passing it on unchanged." % fq1)
data["files"] = [fq1]
return data
else:
logger.error("No UMI transform was specified, but %s does not look "
"pre-transformed. Assuming non-umi data." % fq1)
return data
if file_exists(transform):
transform_file = transform
else:
transform_file = get_transform_file(transform)
if not file_exists(transform_file):
logger.error(
"The UMI transform can be specified as either a file or a "
"bcbio-supported transform. Either the file %s does not exist "
"or the transform is not supported by bcbio. Supported "
"transforms are %s."
% (dd.get_umi_type(data), ", ".join(SUPPORTED_TRANSFORMS)))
sys.exit(1)
out_base = dd.get_sample_name(data) + ".umitransformed.fq.gz"
out_file = os.path.join(umi_dir, out_base)
if file_exists(out_file):
data["files"] = [out_file]
return data
umis = config_utils.get_program("umis", data, default="umis")
cores = dd.get_num_cores(data)
# skip transformation if the file already looks transformed
with open_fastq(fq1) as in_handle:
read = next(in_handle)
if "UMI_" in read:
data["files"] = [out_file]
return data
cmd = ("{umis} fastqtransform {transform_file} "
"--cores {cores} "
"{fq1}"
"| seqtk seq -L 20 - | gzip > {tx_out_file}")
message = ("Inserting UMI and barcode information into the read name of %s"
% fq1)
with file_transaction(out_file) as tx_out_file:
do.run(cmd.format(**locals()), message)
data["files"] = [out_file]
return data | 0.001747 |
def read(self, to_read, timeout_ms):
"""Reads data from this file.
in to_read of type int
Number of bytes to read.
in timeout_ms of type int
Timeout (in ms) to wait for the operation to complete.
Pass 0 for an infinite timeout.
return data of type str
Array of data read.
raises :class:`OleErrorNotimpl`
The method is not implemented yet.
"""
if not isinstance(to_read, baseinteger):
raise TypeError("to_read can only be an instance of type baseinteger")
if not isinstance(timeout_ms, baseinteger):
raise TypeError("timeout_ms can only be an instance of type baseinteger")
data = self._call("read",
in_p=[to_read, timeout_ms])
return data | 0.007177 |
def _set_alarm_owner(self, v, load=False):
"""
Setter method for alarm_owner, mapped from YANG variable /rmon/alarm_entry/alarm_owner (owner-string)
If this variable is read-only (config: false) in the
source YANG file, then _set_alarm_owner is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_alarm_owner() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,14})', 'length': [u'1 .. 15']}), is_leaf=True, yang_name="alarm-owner", rest_name="owner", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Owner identity', u'alt-name': u'owner'}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='owner-string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """alarm_owner must be of a type compatible with owner-string""",
'defined-type': "brocade-rmon:owner-string",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9\\.\\\\\\\\@#\\+\\*\\(\\)=\\{~\\}%<>=$_\\[\\]\\|]{0,14})', 'length': [u'1 .. 15']}), is_leaf=True, yang_name="alarm-owner", rest_name="owner", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Owner identity', u'alt-name': u'owner'}}, namespace='urn:brocade.com:mgmt:brocade-rmon', defining_module='brocade-rmon', yang_type='owner-string', is_config=True)""",
})
self.__alarm_owner = t
if hasattr(self, '_set'):
self._set() | 0.005211 |
def new_event(event):
"""
Requests a new event be created in the store.
http://msdn.microsoft.com/en-us/library/aa564690(v=exchg.140).aspx
<m:CreateItem SendMeetingInvitations="SendToAllAndSaveCopy"
xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types">
<m:SavedItemFolderId>
<t:DistinguishedFolderId Id="calendar"/>
</m:SavedItemFolderId>
<m:Items>
<t:CalendarItem>
<t:Subject>{event.subject}</t:Subject>
<t:Body BodyType="HTML">{event.subject}</t:Body>
<t:Start></t:Start>
<t:End></t:End>
<t:Location></t:Location>
<t:RequiredAttendees>
{% for attendee_email in meeting.required_attendees %}
<t:Attendee>
<t:Mailbox>
<t:EmailAddress>{{ attendee_email }}</t:EmailAddress>
</t:Mailbox>
</t:Attendee>
HTTPretty {% endfor %}
</t:RequiredAttendees>
{% if meeting.optional_attendees %}
<t:OptionalAttendees>
{% for attendee_email in meeting.optional_attendees %}
<t:Attendee>
<t:Mailbox>
<t:EmailAddress>{{ attendee_email }}</t:EmailAddress>
</t:Mailbox>
</t:Attendee>
{% endfor %}
</t:OptionalAttendees>
{% endif %}
{% if meeting.conference_room %}
<t:Resources>
<t:Attendee>
<t:Mailbox>
<t:EmailAddress>{{ meeting.conference_room.email }}</t:EmailAddress>
</t:Mailbox>
</t:Attendee>
</t:Resources>
{% endif %}
</t:CalendarItem>
</m:Items>
</m:CreateItem>
"""
id = T.DistinguishedFolderId(Id=event.calendar_id) if event.calendar_id in DISTINGUISHED_IDS else T.FolderId(Id=event.calendar_id)
start = convert_datetime_to_utc(event.start)
end = convert_datetime_to_utc(event.end)
root = M.CreateItem(
M.SavedItemFolderId(id),
M.Items(
T.CalendarItem(
T.Subject(event.subject),
T.Body(event.body or u'', BodyType="HTML"),
)
),
SendMeetingInvitations="SendToAllAndSaveCopy"
)
calendar_node = root.xpath(u'/m:CreateItem/m:Items/t:CalendarItem', namespaces=NAMESPACES)[0]
if event.reminder_minutes_before_start:
calendar_node.append(T.ReminderIsSet('true'))
calendar_node.append(T.ReminderMinutesBeforeStart(str(event.reminder_minutes_before_start)))
else:
calendar_node.append(T.ReminderIsSet('false'))
calendar_node.append(T.Start(start.strftime(EXCHANGE_DATETIME_FORMAT)))
calendar_node.append(T.End(end.strftime(EXCHANGE_DATETIME_FORMAT)))
if event.is_all_day:
calendar_node.append(T.IsAllDayEvent('true'))
calendar_node.append(T.Location(event.location or u''))
if event.required_attendees:
calendar_node.append(resource_node(element=T.RequiredAttendees(), resources=event.required_attendees))
if event.optional_attendees:
calendar_node.append(resource_node(element=T.OptionalAttendees(), resources=event.optional_attendees))
if event.resources:
calendar_node.append(resource_node(element=T.Resources(), resources=event.resources))
if event.recurrence:
if event.recurrence == u'daily':
recurrence = T.DailyRecurrence(
T.Interval(str(event.recurrence_interval)),
)
elif event.recurrence == u'weekly':
recurrence = T.WeeklyRecurrence(
T.Interval(str(event.recurrence_interval)),
T.DaysOfWeek(event.recurrence_days),
)
elif event.recurrence == u'monthly':
recurrence = T.AbsoluteMonthlyRecurrence(
T.Interval(str(event.recurrence_interval)),
T.DayOfMonth(str(event.start.day)),
)
elif event.recurrence == u'yearly':
recurrence = T.AbsoluteYearlyRecurrence(
T.DayOfMonth(str(event.start.day)),
T.Month(event.start.strftime("%B")),
)
calendar_node.append(
T.Recurrence(
recurrence,
T.EndDateRecurrence(
T.StartDate(event.start.strftime(EXCHANGE_DATE_FORMAT)),
T.EndDate(event.recurrence_end_date.strftime(EXCHANGE_DATE_FORMAT)),
)
)
)
return root | 0.006725 |
def get_resampled_top_edge(self, angle_var=0.1):
"""
This methods computes a simplified representation of a fault top edge
by removing the points that are not describing a change of direction,
provided a certain tolerance angle.
:param float angle_var:
Number representing the maximum deviation (in degrees) admitted
without the creation of a new segment
:returns:
A :class:`~openquake.hazardlib.geo.line.Line` representing the
rupture surface's top edge.
"""
mesh = self.mesh
top_edge = [Point(mesh.lons[0][0], mesh.lats[0][0], mesh.depths[0][0])]
for i in range(len(mesh.triangulate()[1][0]) - 1):
v1 = numpy.asarray(mesh.triangulate()[1][0][i])
v2 = numpy.asarray(mesh.triangulate()[1][0][i + 1])
cosang = numpy.dot(v1, v2)
sinang = numpy.linalg.norm(numpy.cross(v1, v2))
angle = math.degrees(numpy.arctan2(sinang, cosang))
if abs(angle) > angle_var:
top_edge.append(Point(mesh.lons[0][i + 1],
mesh.lats[0][i + 1],
mesh.depths[0][i + 1]))
top_edge.append(Point(mesh.lons[0][-1],
mesh.lats[0][-1], mesh.depths[0][-1]))
line_top_edge = Line(top_edge)
return line_top_edge | 0.001404 |
def content():
"""Helper method that returns just the content.
This method was added so that the text could be reused in the
dock_help module.
.. versionadded:: 3.2.2
:returns: A message object without brand element.
:rtype: safe.messaging.message.Message
"""
message = m.Message()
paragraph = m.Paragraph(
m.Image(
'file:///%s/img/screenshots/'
'osm-downloader-screenshot.png' % resources_path()),
style_class='text-center'
)
message.add(paragraph)
body = tr(
'This tool will fetch building (\'structure\') or road ('
'\'highway\') data from the OpenStreetMap project for you. '
'The downloaded data will have InaSAFE keywords defined and a '
'default QGIS style applied. To use this tool effectively:'
)
tips = m.BulletedList()
tips.add(tr(
'Your current extent, when opening this window, will be used to '
'determine the area for which you want data to be retrieved. '
'You can interactively select the area by using the '
'\'select on map\' button - which will temporarily hide this '
'window and allow you to drag a rectangle on the map. After you '
'have finished dragging the rectangle, this window will '
'reappear.'))
tips.add(tr(
'Check the output directory is correct. Note that the saved '
'dataset will be named after the type of data being downloaded '
'e.g. roads.shp or buildings.shp (and associated files).'
))
tips.add(tr(
'By default simple file names will be used (e.g. roads.shp, '
'buildings.shp). If you wish you can specify a prefix to '
'add in front of this default name. For example using a prefix '
'of \'padang-\' will cause the downloaded files to be saved as '
'\'padang-roads.shp\' and \'padang-buildings.shp\'. Note that '
'the only allowed prefix characters are A-Z, a-z, 0-9 and the '
'characters \'-\' and \'_\'. You can leave this blank if you '
'prefer.'
))
tips.add(tr(
'If a dataset already exists in the output directory it will be '
'overwritten.'
))
tips.add(tr(
'This tool requires a working internet connection and fetching '
'buildings or roads will consume your bandwidth.'))
tips.add(m.Link(
'http://www.openstreetmap.org/copyright',
text=tr(
'Downloaded data is copyright OpenStreetMap contributors '
'(click for more info).')
))
message.add(m.Paragraph(body))
message.add(tips)
message.add(m.Paragraph(
# format 'When the __Political boundaries__' for proper i18n
tr('When the %s '
'box in the Feature types menu is ticked, the Political boundary '
'options panel will be enabled. The panel lets you select which '
'admin level you wish to download. The admin levels are country '
'specific. When you select an admin level, the local name for '
'that admin level will be shown. You can change which country '
'is used for the admin level description using the country drop '
'down menu. The country will be automatically set to coincide '
'with the view extent if a matching country can be found.') %
(
m.ImportantText(tr('Political boundaries')).to_html(),
)))
message.add(m.Paragraph(
m.ImportantText(tr('Note: ')),
tr(
'We have only provide presets for a subset of the available '
'countries. If you want to know what the levels are for your '
'country, please check on the following web page: '),
m.Link(
'http://wiki.openstreetmap.org/wiki/Tag:boundary%3Dadministrative',
text=tr(
'List of OSM Admin Boundary definitions '))))
return message | 0.000253 |
def update(self, new_games):
""" new_games is a list of .tfrecord.zz new game records. """
new_games.sort(key=os.path.basename)
first_new_game = None
for idx, game in enumerate(new_games):
timestamp = file_timestamp(game)
if timestamp <= self.examples[-1][0]:
continue
elif first_new_game is None:
first_new_game = idx
num_new_games = len(new_games) - idx
print("Found {}/{} new games".format(
num_new_games, len(new_games)))
self.total_updates += num_new_games
self.examples.extend(self.func(game))
if first_new_game is None:
print("No new games", file_timestamp(
new_games[-1]), self.examples[-1][0]) | 0.002445 |
def plot_fit(self):
"""
Add the fit to the plot.
"""
self.plt.plot(*self.fit.fit, **self.options['fit']) | 0.014706 |
def _group_filter_values(seg, filter_indices, ms_per_input):
"""
Takes a list of 1s and 0s and returns a list of tuples of the form:
['y/n', timestamp].
"""
ret = []
for filter_value, (_segment, timestamp) in zip(filter_indices, seg.generate_frames_as_segments(ms_per_input)):
if filter_value == 1:
if len(ret) > 0 and ret[-1][0] == 'n':
ret.append(['y', timestamp]) # The last one was different, so we create a new one
elif len(ret) > 0 and ret[-1][0] == 'y':
ret[-1][1] = timestamp # The last one was the same as this one, so just update the timestamp
else:
ret.append(['y', timestamp]) # This is the first one
else:
if len(ret) > 0 and ret[-1][0] == 'n':
ret[-1][1] = timestamp
elif len(ret) > 0 and ret[-1][0] == 'y':
ret.append(['n', timestamp])
else:
ret.append(['n', timestamp])
return ret | 0.003956 |
def _remove_wire_nets(block):
""" Remove all wire nodes from the block. """
wire_src_dict = _ProducerList()
wire_removal_set = set() # set of all wirevectors to be removed
# one pass to build the map of value producers and
# all of the nets and wires to be removed
for net in block.logic:
if net.op == 'w':
wire_src_dict[net.dests[0]] = net.args[0]
if not isinstance(net.dests[0], Output):
wire_removal_set.add(net.dests[0])
# second full pass to create the new logic without the wire nets
new_logic = set()
for net in block.logic:
if net.op != 'w' or isinstance(net.dests[0], Output):
new_args = tuple(wire_src_dict.find_producer(x) for x in net.args)
new_net = LogicNet(net.op, net.op_param, new_args, net.dests)
new_logic.add(new_net)
# now update the block with the new logic and remove wirevectors
block.logic = new_logic
for dead_wirevector in wire_removal_set:
del block.wirevector_by_name[dead_wirevector.name]
block.wirevector_set.remove(dead_wirevector)
block.sanity_check() | 0.00087 |
def doesIntersect(self, other):
'''
:param: other - Line subclass
:return: boolean
Returns True iff:
ccw(self.A,self.B,other.A) * ccw(self.A,self.B,other.B) <= 0
and
ccw(other.A,other.B,self.A) * ccw(other.A,other.B,self.B) <= 0
'''
if self.A.ccw(self.B, other.A) * self.A.ccw(self.B, other.B) > 0:
return False
if other.A.ccw(other.B, self.A) * other.A.ccw(other.B, self.B) > 0:
return False
return True | 0.003781 |
def onMessageReceived(self, method_frame, properties, body):
"""
React to received message - deserialize it, add it to users reaction
function stored in ``self.react_fn`` and send back result.
If `Exception` is thrown during process, it is sent back instead of
message.
Note:
In case of `Exception`, response message doesn't have useful `body`,
but in headers is stored following (string) parameters:
- ``exception``, where the Exception's message is stored
- ``exception_type``, where ``e.__class__`` is stored
- ``exception_name``, where ``e.__class__.__name__`` is stored
- ``traceback`` where the full traceback is stored (contains line
number)
This allows you to react to unexpected cases at the other end of
the AMQP communication.
"""
# if UUID is not in headers, just ack the message and ignore it
if "UUID" not in properties.headers:
self.process_exception(
e=ValueError("No UUID provided, message ignored."),
uuid="",
routing_key=self.parseKey(method_frame),
body=body
)
return True # ack message
key = self.parseKey(method_frame)
uuid = properties.headers["UUID"]
try:
result = self.react_fn(
serializers.deserialize(body, self.globals),
self.get_sendback(uuid, key)
)
print "sending response", key
self.sendResponse(
serializers.serialize(result),
uuid,
key
)
except Exception, e:
self.process_exception(
e=e,
uuid=uuid,
routing_key=key,
body=str(e),
tb=traceback.format_exc().strip()
)
return True | 0.001511 |
def get_locale(self):
""" Retrieve the best matching locale using request headers
.. note:: Probably one of the thing to enhance quickly.
:rtype: str
"""
best_match = request.accept_languages.best_match(['de', 'fr', 'en', 'la'])
if best_match is None:
if len(request.accept_languages) > 0:
best_match = request.accept_languages[0][0][:2]
else:
return self.__default_lang__
lang = self.__default_lang__
if best_match == "de":
lang = "ger"
elif best_match == "fr":
lang = "fre"
elif best_match == "en":
lang = "eng"
elif best_match == "la":
lang = "lat"
return lang | 0.003922 |
def _add_labels(self, axes, dtype):
"""Given a 2x2 array of axes, add x and y labels
Parameters
----------
axes: numpy.ndarray, 2x2
A numpy array containing the four principal axes of an SIP plot
dtype: string
Can be either 'rho' or 'r', indicating the type of data that is
plotted: 'rho' stands for resistivities/conductivities, 'r' stands
for impedances/condactances
Returns
-------
None
"""
for ax in axes[1, :].flat:
ax.set_xlabel('frequency [Hz]')
if dtype == 'rho':
axes[0, 0].set_ylabel(r'$|\rho| [\Omega m]$')
axes[0, 1].set_ylabel(r'$-\phi [mrad]$')
axes[1, 0].set_ylabel(r"$\sigma' [S/m]$")
axes[1, 1].set_ylabel(r"$\sigma'' [S/m]$")
elif dtype == 'r':
axes[0, 0].set_ylabel(r'$|R| [\Omega]$')
axes[0, 1].set_ylabel(r'$-\phi [mrad]$')
axes[1, 0].set_ylabel(r"$Y' [S]$")
axes[1, 1].set_ylabel(r"$Y'' [S]$")
else:
raise Exception('dtype not known: {}'.format(dtype)) | 0.001741 |
def geocode(
self,
query,
query_type='StreetAddress',
maximum_responses=25,
is_freeform=False,
filtering=None,
exactly_one=True,
timeout=DEFAULT_SENTINEL,
):
"""
Return a location point by address.
:param str query: The query string to be geocoded.
:param str query_type: The type to provide for geocoding. It can be
`PositionOfInterest`, `StreetAddress` or `CadastralParcel`.
`StreetAddress` is the default choice if none provided.
:param int maximum_responses: The maximum number of responses
to ask to the API in the query body.
:param str is_freeform: Set if return is structured with
freeform structure or a more structured returned.
By default, value is False.
:param str filtering: Provide string that help setting geocoder
filter. It contains an XML string. See examples in documentation
and ignfrance.py file in directory tests.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
query = self.format_string % query
# Check if acceptable query type
if query_type not in ['PositionOfInterest',
'StreetAddress',
'CadastralParcel']:
raise GeocoderQueryError("""You did not provided a query_type the
webservice can consume. It should be PositionOfInterest,
'StreetAddress or CadastralParcel""")
# Check query validity for CadastralParcel
if query_type == 'CadastralParcel' and len(query.strip()) != 14:
raise GeocoderQueryError("""You must send a string of fourteen
characters long to match the cadastre required code""")
sub_request = """
<GeocodeRequest returnFreeForm="{is_freeform}">
<Address countryCode="{query_type}">
<freeFormAddress>{query}</freeFormAddress>
{filtering}
</Address>
</GeocodeRequest>
"""
xml_request = self.xml_request.format(
method_name='LocationUtilityService',
sub_request=sub_request,
maximum_responses=maximum_responses
)
# Manage type change for xml case sensitive
if is_freeform:
is_freeform = 'true'
else:
is_freeform = 'false'
# Manage filtering value
if filtering is None:
filtering = ''
# Create query using parameters
request_string = xml_request.format(
is_freeform=is_freeform,
query=query,
query_type=query_type,
filtering=filtering
)
params = {
'xls': request_string
}
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
raw_xml = self._request_raw_content(url, timeout)
return self._parse_xml(
raw_xml,
is_freeform=is_freeform,
exactly_one=exactly_one
) | 0.001082 |
def _from_json(json_data):
"""
Creates a Earthquake from json data.
:param json_data: The raw json data to parse
:type json_data: dict
:returns: Earthquake
"""
try:
coordinates = json_data['geometry']['coordinates']
except KeyError:
raise USGSException("The geometry information was not returned from the USGS website.")
try:
properties = json_data['properties']
except KeyError:
raise USGSException("One of the earthquakes did not have any property information")
return Earthquake(Coordinate._from_json(coordinates),
_parse_float(properties.get('mag', '0'), 0.0),
properties.get('place', ''),
_parse_int(properties.get('time', '0'), 0),
properties.get('url', ''),
_parse_int(properties.get('felt', '0'), 0),
_parse_float(properties.get('cdi', '0'), 0.0),
_parse_float(properties.get('mmi', '0'), 0.0),
properties['alert'] if 'alert' in properties and properties['alert'] else '',
properties.get('status', ''),
_parse_int(properties.get('sig', '0'), 0),
json_data.get('id', ''),
_parse_float(properties.get('dmin', '0'), 0.0),
_parse_float(properties.get('rms', '0'), 0.0),
_parse_float(properties.get('gap', '0'), 0.0)) | 0.003683 |
def export(self, output_folder):
"""Export matrices as ``*.npy`` files to an output folder."""
if not os.path.exists(output_folder):
os.makedirs(output_folder)
self._interact_with_folder(output_folder, 'w') | 0.008264 |
def multiple_choice_field_data(field, **kwargs):
"""
Return random value for MultipleChoiceField
>>> CHOICES = [('YNG', 'Child'), ('MIDDLE', 'Parent') ,('OLD', 'GrandParent')]
>>> result = any_form_field(forms.MultipleChoiceField(choices=CHOICES))
>>> type(result)
<type 'str'>
"""
if field.choices:
from django_any.functions import valid_choices
l = list(valid_choices(field.choices))
random.shuffle(l)
choices = []
count = xunit.any_int(min_value=1, max_value=len(field.choices))
for i in xrange(0, count):
choices.append(l[i])
return ' '.join(choices)
return 'None' | 0.005935 |
def set_window_refresh_callback(window, cbfun):
"""
Sets the refresh callback for the specified window.
Wrapper for:
GLFWwindowrefreshfun glfwSetWindowRefreshCallback(GLFWwindow* window, GLFWwindowrefreshfun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _window_refresh_callback_repository:
previous_callback = _window_refresh_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWwindowrefreshfun(cbfun)
_window_refresh_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetWindowRefreshCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0] | 0.002257 |
def par_compute_residuals(i):
"""Compute components of the residual and stopping thresholds that
can be done in parallel.
Parameters
----------
i : int
Index of group to compute
"""
# Compute the residuals in parallel, need to check if the residuals
# depend on alpha
global mp_ry0
global mp_ry1
global mp_sy0
global mp_sy1
global mp_nrmAx
global mp_nrmBy
global mp_nrmu
mp_ry0[i] = np.sum((mp_DXnr[i] - mp_Y0[i])**2)
mp_ry1[i] = mp_alpha**2*np.sum((mp_Xnr[mp_grp[i]:mp_grp[i+1]]-
mp_Y1[mp_grp[i]:mp_grp[i+1]])**2)
mp_sy0[i] = np.sum((mp_Y0old[i] - mp_Y0[i])**2)
mp_sy1[i] = mp_alpha**2*np.sum((mp_Y1old[mp_grp[i]:mp_grp[i+1]]-
mp_Y1[mp_grp[i]:mp_grp[i+1]])**2)
mp_nrmAx[i] = np.sum(mp_DXnr[i]**2) + mp_alpha**2 * np.sum(
mp_Xnr[mp_grp[i]:mp_grp[i+1]]**2)
mp_nrmBy[i] = np.sum(mp_Y0[i]**2) + mp_alpha**2 * np.sum(
mp_Y1[mp_grp[i]:mp_grp[i+1]]**2)
mp_nrmu[i] = np.sum(mp_U0[i]**2) + np.sum(mp_U1[mp_grp[i]:mp_grp[i+1]]**2) | 0.002715 |
def import_project(controller, project_id, stream, location=None, name=None, keep_compute_id=False):
"""
Import a project contain in a zip file
You need to handle OSerror exceptions
:param controller: GNS3 Controller
:param project_id: ID of the project to import
:param stream: A io.BytesIO of the zipfile
:param location: Directory for the project if None put in the default directory
:param name: Wanted project name, generate one from the .gns3 if None
:param keep_compute_id: If true do not touch the compute id
:returns: Project
"""
if location and ".gns3" in location:
raise aiohttp.web.HTTPConflict(text="The destination path should not contain .gns3")
try:
with zipfile.ZipFile(stream) as myzip:
try:
topology = json.loads(myzip.read("project.gns3").decode())
# We import the project on top of an existing project (snapshots)
if topology["project_id"] == project_id:
project_name = topology["name"]
else:
# If the project name is already used we generate a new one
if name:
project_name = controller.get_free_project_name(name)
else:
project_name = controller.get_free_project_name(topology["name"])
except KeyError:
raise aiohttp.web.HTTPConflict(text="Can't import topology the .gns3 is corrupted or missing")
if location:
path = location
else:
projects_path = controller.projects_directory()
path = os.path.join(projects_path, project_id)
try:
os.makedirs(path, exist_ok=True)
except UnicodeEncodeError as e:
raise aiohttp.web.HTTPConflict(text="The project name contain non supported or invalid characters")
myzip.extractall(path)
topology = load_topology(os.path.join(path, "project.gns3"))
topology["name"] = project_name
# To avoid unexpected behavior (project start without manual operations just after import)
topology["auto_start"] = False
topology["auto_open"] = False
topology["auto_close"] = True
# Generate a new node id
node_old_to_new = {}
for node in topology["topology"]["nodes"]:
if "node_id" in node:
node_old_to_new[node["node_id"]] = str(uuid.uuid4())
_move_node_file(path, node["node_id"], node_old_to_new[node["node_id"]])
node["node_id"] = node_old_to_new[node["node_id"]]
else:
node["node_id"] = str(uuid.uuid4())
# Update link to use new id
for link in topology["topology"]["links"]:
link["link_id"] = str(uuid.uuid4())
for node in link["nodes"]:
node["node_id"] = node_old_to_new[node["node_id"]]
# Generate new drawings id
for drawing in topology["topology"]["drawings"]:
drawing["drawing_id"] = str(uuid.uuid4())
# Modify the compute id of the node depending of compute capacity
if not keep_compute_id:
# For some VM type we move them to the GNS3 VM if possible
# unless it's a linux host without GNS3 VM
if not sys.platform.startswith("linux") or controller.has_compute("vm"):
for node in topology["topology"]["nodes"]:
if node["node_type"] in ("docker", "qemu", "iou", "nat"):
node["compute_id"] = "vm"
else:
# Round-robin through available compute resources.
compute_nodes = itertools.cycle(controller.computes)
for node in topology["topology"]["nodes"]:
node["compute_id"] = next(compute_nodes)
compute_created = set()
for node in topology["topology"]["nodes"]:
if node["compute_id"] != "local":
# Project created on the remote GNS3 VM?
if node["compute_id"] not in compute_created:
compute = controller.get_compute(node["compute_id"])
yield from compute.post("/projects", data={
"name": project_name,
"project_id": project_id,
})
compute_created.add(node["compute_id"])
yield from _move_files_to_compute(compute, project_id, path, os.path.join("project-files", node["node_type"], node["node_id"]))
# And we dump the updated.gns3
dot_gns3_path = os.path.join(path, project_name + ".gns3")
# We change the project_id to avoid erasing the project
topology["project_id"] = project_id
with open(dot_gns3_path, "w+") as f:
json.dump(topology, f, indent=4)
os.remove(os.path.join(path, "project.gns3"))
if os.path.exists(os.path.join(path, "images")):
_import_images(controller, path)
project = yield from controller.load_project(dot_gns3_path, load=False)
return project
except zipfile.BadZipFile:
raise aiohttp.web.HTTPConflict(text="Can't import topology the file is corrupted or not a GNS3 project (invalid zip)") | 0.002505 |
def calculate_backend(name_from_env, backends=None):
"""
Calculates which backend to use with the following algorithm:
- Try to read the GOLESS_BACKEND environment variable.
Usually 'gevent' or 'stackless'.
If a value is set but no backend is available or it fails to be created,
this function will error.
- Determine the default backend (gevent for PyPy, stackless for Python).
If no default can be determined or created, continue.
- Try to create all the runtimes and choose the first one to create
successfully.
- If no runtime can be created, return a NullBackend,
which will error when accessed.
The "default" backend is the less-easy backend for a runtime.
Since PyPy has stackless by default, gevent is intentional.
Since Stackless is a separate interpreter for CPython,
that is more intentional than gevent.
We feel this is a good default behavior.
"""
if backends is None:
backends = _default_backends
if name_from_env:
if name_from_env not in backends:
raise RuntimeError(
'Invalid backend %r specified. Valid backends are: %s'
% (name_from_env, _default_backends.keys()))
# Allow this to raise, since it was explicitly set from the environment
# noinspection PyCallingNonCallable
return backends[name_from_env]()
try:
return _calc_default(backends)
except SystemError:
pass
for maker in backends.values():
# noinspection PyBroadException
try:
return maker()
except Exception:
pass
return NullBackend() | 0.000599 |
def _update_yaw_and_pitch(self):
"""
Updates the camera vectors based on the current yaw and pitch
"""
front = Vector3([0.0, 0.0, 0.0])
front.x = cos(radians(self.yaw)) * cos(radians(self.pitch))
front.y = sin(radians(self.pitch))
front.z = sin(radians(self.yaw)) * cos(radians(self.pitch))
self.dir = vector.normalise(front)
self.right = vector.normalise(vector3.cross(self.dir, self._up))
self.up = vector.normalise(vector3.cross(self.right, self.dir)) | 0.003738 |
def get_interfaces(zone, permanent=True):
'''
List interfaces bound to a zone
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' firewalld.get_interfaces zone
'''
cmd = '--zone={0} --list-interfaces'.format(zone)
if permanent:
cmd += ' --permanent'
return __firewall_cmd(cmd).split() | 0.002778 |
def augment(module_name, base_class):
"""Call the augment() method for all of the derived classes in the module """
for name, cls in inspect.getmembers(sys.modules[module_name],
lambda x : inspect.isclass(x) and issubclass(x, base_class) ):
if cls == base_class:
continue
cls.augment() | 0.016484 |
def see_form(self, url):
"""
Assert the existence of a HTML form that submits to the given URL.
"""
elements = ElementSelector(
world.browser,
str('//form[@action="%s"]' % url),
filter_displayed=True,
)
if not elements:
raise AssertionError("Expected form not found.") | 0.003077 |
def read_tree(input, schema):
'''Read a tree from a string or file
Args:
``input`` (``str``): Either a tree string, a path to a tree file (plain-text or gzipped), or a DendroPy Tree object
``schema`` (``str``): The schema of ``input`` (DendroPy, Newick, NeXML, or Nexus)
Returns:
* If the input is Newick, either a ``Tree`` object if ``input`` contains a single tree, or a ``list`` of ``Tree`` objects if ``input`` contains multiple trees (one per line)
* If the input is NeXML or Nexus, a ``dict`` of trees represented by ``input``, where keys are tree names (``str``) and values are ``Tree`` objects
'''
schema_to_function = {
'dendropy': read_tree_dendropy,
'newick': read_tree_newick,
'nexml': read_tree_nexml,
'nexus': read_tree_nexus
}
if schema.lower() not in schema_to_function:
raise ValueError("Invalid schema: %s (valid options: %s)" % (schema, ', '.join(sorted(schema_to_function.keys()))))
return schema_to_function[schema.lower()](input) | 0.005666 |
def download_count_upload(job,
master_ip,
input_file,
output_file,
kmer_length,
spark_conf,
memory,
sudo):
'''
Runs k-mer counting.
1. If the input file is located in S3, the file is copied into HDFS.
2. If the input file is not in Parquet format, the file is converted into Parquet.
3. The k-mers are counted and saved as text.
4. If the output path is an S3 URL, the file is copied back to S3.
:param job: Toil job
:param input_file: URL/path to input file to count k-mers on
:param output_file: URL/path to save k-mer counts at
:param kmer_length: The length of k-mer substrings to count.
:param spark_conf: Optional Spark configuration. If set, memory should \
not be set.
:param memory: Amount of memory to provided to Spark workers. Must be set \
if spark_conf is not set.
:param sudo: Whether or not to run Spark containers with sudo.
:type job: toil.Job
:type input_file: string
:type output_file: string
:type kmer_length: int or string
:type spark_conf: list of string or None
:type memory: int or None
:type sudo: boolean
'''
if master_ip is not None:
hdfs_dir = "hdfs://{0}:{1}/".format(master_ip, HDFS_MASTER_PORT)
else:
_log.warn('Master IP is not set. If default filesystem is not set, jobs may fail.')
hdfs_dir = ""
# if the file isn't already in hdfs, copy it in
hdfs_input_file = hdfs_dir
if input_file.startswith("s3://"):
# append the s3 file name to our hdfs path
hdfs_input_file += input_file.split("/")[-1]
# run the download
_log.info("Downloading input file %s to %s.", input_file, hdfs_input_file)
call_conductor(job, master_ip, input_file, hdfs_input_file,
memory=memory, override_parameters=spark_conf)
else:
if not input_file.startswith("hdfs://"):
_log.warn("If not in S3, input file (%s) expected to be in HDFS (%s).",
input_file, hdfs_dir)
# where are we writing the output to? is it going to a location in hdfs or not?
run_upload = True
hdfs_output_file = hdfs_dir + "kmer_output.txt"
if output_file.startswith(hdfs_dir):
run_upload = False
hdfs_output_file = output_file
# do we need to convert to adam?
if (hdfs_input_file.endswith('.bam') or
hdfs_input_file.endswith('.sam') or
hdfs_input_file.endswith('.fq') or
hdfs_input_file.endswith('.fastq')):
hdfs_tmp_file = hdfs_input_file
# change the file extension to adam
hdfs_input_file = '.'.join(hdfs_input_file.split('.')[:-1].append('adam'))
# convert the file
_log.info('Converting %s into ADAM format at %s.', hdfs_tmp_file, hdfs_input_file)
call_adam(job, master_ip,
['transform',
hdfs_tmp_file, hdfs_input_file],
memory=memory, override_parameters=spark_conf)
# run k-mer counting
_log.info('Counting %d-mers in %s, and saving to %s.',
kmer_length, hdfs_input_file, hdfs_output_file)
call_adam(job, master_ip,
['count_kmers',
hdfs_input_file, hdfs_output_file,
str(kmer_length)],
memory=memory, override_parameters=spark_conf)
# do we need to upload the file back? if so, run upload
if run_upload:
_log.info("Uploading output file %s to %s.", hdfs_output_file, output_file)
call_conductor(job, master_ip, hdfs_output_file, output_file,
memory=memory, override_parameters=spark_conf) | 0.003392 |
def any_contains_any(strings, candidates):
"""Whether any of the strings contains any of the candidates."""
for string in strings:
for c in candidates:
if c in string:
return True | 0.004386 |
def GetMemZippedMB(self):
'''Undocumented.'''
counter = c_uint()
ret = vmGuestLib.VMGuestLib_GetMemZippedMB(self.handle.value, byref(counter))
if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret)
return counter.value | 0.014815 |
def clear_data(self):
"""Removes the content data.
raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# [email protected], Jan 9, 2015
# Removes the item from AWS S3 and resets URL to ''
odl_repo, url = get_aws_s3_handle(self._config_map)
existing_url = self._payload.get_url_metadata().get_existing_string_values()[0]
# try to clear from payload first, in case that fails we won't mess with AWS
self._payload.clear_url()
key_path = existing_url.replace(url, '')
# for boto3, remove leading /
if key_path[0] == '/':
key_path = key_path[1::]
odl_repo.delete_object(
Bucket=self._config_map['s3_bucket'],
Key=key_path
) | 0.004444 |
def route(rule=None, blueprint=None, defaults=None, endpoint=None,
is_member=False, methods=None, only_if=None, **rule_options):
"""
Decorator to set default route rules for a view function. The arguments this
function accepts are very similar to Flask's :meth:`~flask.Flask.route`,
however, the ``is_member`` perhaps deserves an example::
class UserResource(ModelResource):
class Meta:
model = User
member_param = '<int:id>'
include_methods = ['list', 'get']
@route(is_member=True, methods=['POST'])
def set_profile_pic(user):
# do stuff
# registered like so in your ``app_bundle/routes.py``:
routes = lambda: [
resource(UserResource),
]
# results in the following routes:
# UserResource.list => GET /users
# UserResource.get => GET /users/<int:id>
# UserResource.set_profile_pic => POST /users/<int:id>/set-profile-pic
:param rule: The URL rule.
:param defaults: Any default values for parameters in the URL rule.
:param endpoint: The endpoint name of this view. Determined automatically if left
unspecified.
:param is_member: Whether or not this view is for a
:class:`~flask_unchained.bundles.resource.resource.Resource`
member method.
:param methods: A list of HTTP methods supported by this view. Defaults to
``['GET']``.
:param only_if: A boolean or callable to dynamically determine whether or not to
register this route with the app.
:param rule_options: Other kwargs passed on to :class:`~werkzeug.routing.Rule`.
"""
def wrapper(fn):
fn_routes = getattr(fn, FN_ROUTES_ATTR, [])
route = Route(rule, fn, blueprint=blueprint, defaults=defaults,
endpoint=endpoint, is_member=is_member, methods=methods,
only_if=only_if, **rule_options)
setattr(fn, FN_ROUTES_ATTR, fn_routes + [route])
return fn
if callable(rule):
fn = rule
rule = None
return wrapper(fn)
return wrapper | 0.002213 |
def media_url(self, with_ssl=False):
"""
Used to return a base media URL. Depending on whether we're serving
media remotely or locally, this either hands the decision off to the
backend, or just uses the value in settings.STATIC_URL.
args:
with_ssl: (bool) If True, return an HTTPS url (depending on how
the backend handles it).
"""
if self.serve_remote:
# Hand this off to whichever backend is being used.
url = self.remote_media_url(with_ssl)
else:
# Serving locally, just use the value in settings.py.
url = self.local_media_url
return url.rstrip('/') | 0.004167 |
def request(self, api_commands, *, timeout=None):
"""Make a request. Timeout is in seconds."""
if not isinstance(api_commands, list):
return self._execute(api_commands, timeout=timeout)
command_results = []
for api_command in api_commands:
result = self._execute(api_command, timeout=timeout)
command_results.append(result)
return command_results | 0.004706 |
def create_child_ref(self, transform_path, child_id=None):
"""
Creates a new child TransformRef with transform_path specified.
:param transform_path:
:param child_id:
:return: TransformRef child
"""
transform_ref = TransformRef(transform_path, child_id)
self.add_child_ref(transform_ref)
return transform_ref | 0.005263 |
def get_tail(self):
"""Gets tail
:return: Tail of linked list
"""
node = self.head
last_node = self.head
while node is not None:
last_node = node
node = node.next_node
return last_node | 0.007491 |
def coerce(self, value):
"""Coerce a single value according to this parameter's settings.
@param value: A L{str}, or L{None}. If L{None} is passed - meaning no
value is avalable at all, not even the empty string - and this
parameter is optional, L{self.default} will be returned.
"""
if value is None:
if self.optional:
return self.default
else:
value = ""
if value == "":
if not self.allow_none:
raise MissingParameterError(self.name, kind=self.kind)
return self.default
try:
self._check_range(value)
parsed = self.parse(value)
if self.validator and not self.validator(parsed):
raise ValueError(value)
return parsed
except ValueError:
try:
value = value.decode("utf-8")
message = "Invalid %s value %s" % (self.kind, value)
except UnicodeDecodeError:
message = "Invalid %s value" % self.kind
raise InvalidParameterValueError(message) | 0.001727 |
def get_user(login, hashes=False):
'''
Get user account details
login : string
login name
hashes : boolean
include NTHASH and LMHASH in verbose output
CLI Example:
.. code-block:: bash
salt '*' pdbedit.get kaylee
'''
users = list_users(verbose=True, hashes=hashes)
return users[login] if login in users else {} | 0.002674 |
def hoist_mutation(self, random_state):
"""Perform the hoist mutation operation on the program.
Hoist mutation selects a random subtree from the embedded program to
be replaced. A random subtree of that subtree is then selected and this
is 'hoisted' into the original subtrees location to form an offspring.
This method helps to control bloat.
Parameters
----------
random_state : RandomState instance
The random number generator.
Returns
-------
program : list
The flattened tree representation of the program.
"""
# Get a subtree to replace
start, end = self.get_subtree(random_state)
subtree = self.program[start:end]
# Get a subtree of the subtree to hoist
sub_start, sub_end = self.get_subtree(random_state, subtree)
hoist = subtree[sub_start:sub_end]
# Determine which nodes were removed for plotting
removed = list(set(range(start, end)) -
set(range(start + sub_start, start + sub_end)))
return self.program[:start] + hoist + self.program[end:], removed | 0.001696 |
def _get_parent_id_list(self, qualifier_id, hierarchy_id):
"""Returns list of parent id strings for qualifier_id in hierarchy.
Uses memcache if caching is enabled.
"""
if self._caching_enabled():
key = 'parent_id_list_{0}'.format(str(qualifier_id))
# If configured to use memcache as the caching engine, use it.
# Otherwise default to diskcache
caching_engine = 'diskcache'
try:
config = self._runtime.get_configuration()
parameter_id = Id('parameter:cachingEngine@json')
caching_engine = config.get_value_by_parameter(parameter_id).get_string_value()
except (AttributeError, KeyError, errors.NotFound):
pass
if caching_engine == 'memcache':
import memcache
caching_host = '127.0.0.1:11211'
try:
config = self._runtime.get_configuration()
parameter_id = Id('parameter:cachingHostURI@json')
caching_host = config.get_value_by_parameter(parameter_id).get_string_value()
except (AttributeError, KeyError, errors.NotFound):
pass
mc = memcache.Client([caching_host], debug=0)
parent_id_list = mc.get(key)
if parent_id_list is None:
parent_ids = self._get_hierarchy_session(hierarchy_id).get_parents(qualifier_id)
parent_id_list = [str(parent_id) for parent_id in parent_ids]
mc.set(key, parent_id_list)
elif caching_engine == 'diskcache':
import diskcache
with diskcache.Cache('/tmp/dlkit_cache') as cache:
# A little bit non-DRY, since it's almost the same as for memcache above.
# However, for diskcache.Cache, we have to call ".close()" or use a
# ``with`` statement to safeguard calling ".close()", so we keep this
# separate from the memcache implementation.
parent_id_list = cache.get(key)
if parent_id_list is None:
parent_ids = self._get_hierarchy_session(hierarchy_id).get_parents(qualifier_id)
parent_id_list = [str(parent_id) for parent_id in parent_ids]
cache.set(key, parent_id_list)
else:
raise errors.NotFound('The {0} caching engine was not found.'.format(caching_engine))
else:
parent_ids = self._get_hierarchy_session(hierarchy_id).get_parents(qualifier_id)
parent_id_list = [str(parent_id) for parent_id in parent_ids]
return parent_id_list | 0.004643 |
def random_array(shape, mean=128., std=20.):
"""Creates a uniformly distributed random array with the given `mean` and `std`.
Args:
shape: The desired shape
mean: The desired mean (Default value = 128)
std: The desired std (Default value = 20)
Returns: Random numpy array of given `shape` uniformly distributed with desired `mean` and `std`.
"""
x = np.random.random(shape)
# normalize around mean=0, std=1
x = (x - np.mean(x)) / (np.std(x) + K.epsilon())
# and then around the desired mean/std
x = (x * std) + mean
return x | 0.005085 |
def py_lnotab(self):
"""The encoded lnotab that python uses to compute when lines start.
Note
----
See Objects/lnotab_notes.txt in the cpython source for more details.
"""
reverse_lnotab = reverse_dict(self.lnotab)
py_lnotab = []
prev_instr = 0
prev_lno = self.firstlineno
for addr, instr in enumerate(_sparse_args(self.instrs)):
lno = reverse_lnotab.get(instr)
if lno is None:
continue
delta = lno - prev_lno
py_lnotab.append(addr - prev_instr)
py_lnotab.append(min(delta, max_lnotab_increment))
delta -= max_lnotab_increment
while delta > 0:
py_lnotab.append(0)
py_lnotab.append(min(delta, max_lnotab_increment))
delta -= max_lnotab_increment
prev_lno = lno
prev_instr = addr
return bytes(py_lnotab) | 0.002073 |
def window(iterable, size=2):
''' yields wondows of a given size '''
iterable = iter(iterable)
d = deque(islice(iterable, size-1), maxlen=size)
for _ in map(d.append, iterable):
yield tuple(d) | 0.00463 |
def load_map_projection(filename,
center=None, center_right=None, radius=None, method='orthographic',
registration='native', chirality=None, sphere_radius=None,
pre_affine=None, post_affine=None, meta_data=None):
'''
load_map_projection(filename) yields the map projection indicated by the given file name. Map
projections define the parameters of a projection to the 2D cortical surface via a
registartion name and projection parameters.
This function is primarily a wrapper around the MapProjection.load() function; for information
about options, see MapProjection.load.
'''
return MapProjection.load(filename,
center=center, center_right=center_right, radius=radius,
method=method, registration=registration, chirality=chirality,
sphere_radius=sphere_radius, pre_affine=pre_affine,
post_affine=post_affine) | 0.008671 |
def get_environmental_configuration(self, id_or_uri):
"""
Returns a description of the environmental configuration (supported feature set, calibrated minimum & maximum
power, location & dimensions, ...) of the resource.
Args:
id_or_uri:
Can be either the Unmanaged Device id or the uri
Returns:
dict:
EnvironmentalConfiguration
"""
uri = self._client.build_uri(id_or_uri) + "/environmentalConfiguration"
return self._client.get(uri) | 0.005415 |
def counter(self, key, value, timestamp=None):
"""Set a counter value
If the inner key does not exist is is created
:param key: counter to update
:type key: str
:param value: counter value
:type value: float
:return: An alignak_stat brok if broks are enabled else None
"""
_min, _max, count, _sum = self.stats.get(key, (None, None, 0, 0))
count += 1
_sum += value
if _min is None or value < _min:
_min = value
if _max is None or value > _max:
_max = value
self.stats[key] = (_min, _max, count, _sum)
# Manage local statsd part
if self.statsd_enabled and self.statsd_sock:
# beware, we are sending ms here, timer is in seconds
packet = '%s.%s.%s:%d|c' % (self.statsd_prefix, self.name, key, value)
packet = packet.encode('utf-8')
try:
self.statsd_sock.sendto(packet, self.statsd_addr)
except (socket.error, socket.gaierror):
pass
# cannot send? ok not a huge problem here and we cannot
# log because it will be far too verbose :p
# Manage Graphite part
if self.statsd_enabled and self.carbon:
self.send_to_graphite(key, value, timestamp=timestamp)
# Manage file part
if self.statsd_enabled and self.file_d:
if timestamp is None:
timestamp = int(time.time())
packet = self.line_fmt
if not self.date_fmt:
date = "%s" % timestamp
else:
date = datetime.datetime.fromtimestamp(timestamp).strftime(self.date_fmt)
packet = packet.replace("#date#", date)
packet = packet.replace("#counter#", '%s.%s.%s' % (self.statsd_prefix, self.name, key))
packet = packet.replace("#value#", '%d' % value)
packet = packet.replace("#uom#", 'c')
try:
self.file_d.write(packet)
except IOError:
logger.warning("Could not write to the file: %s", packet)
if self.broks_enabled:
logger.debug("alignak stat brok: %s = %s", key, value)
if timestamp is None:
timestamp = int(time.time())
return Brok({'type': 'alignak_stat',
'data': {
'ts': timestamp,
'type': 'counter',
'metric': '%s.%s.%s' % (self.statsd_prefix, self.name, key),
'value': value,
'uom': 'c'
}})
return None | 0.002195 |
def closing_address(self, block_identifier: BlockSpecification) -> Optional[Address]:
""" Returns the address of the closer of the channel. """
return self.token_network.closing_address(
participant1=self.participant1,
participant2=self.participant2,
block_identifier=block_identifier,
channel_identifier=self.channel_identifier,
) | 0.007444 |
def line(self, x1, y1, x2, y2, draw=True):
'''Draws a line from (x1,y1) to (x2,y2)'''
p = self._path
self.newpath()
self.moveto(x1,y1)
self.lineto(x2,y2)
self.endpath(draw=draw)
self._path = p
return p | 0.015094 |
def lazy_parallelize(func, result, processes=None, partition_size=None):
"""
Lazily computes an iterable in parallel, and returns them in pool chunks
:param func: Function to apply
:param result: Data to apply to
:param processes: Number of processes to use in parallel
:param partition_size: Size of partitions for each parallel process
:return: Iterable of chunks where each chunk as func applied to it
"""
if processes is None or processes < 1:
processes = CPU_COUNT
else:
processes = min(processes, CPU_COUNT)
partition_size = partition_size or compute_partition_size(result, processes)
pool = Pool(processes=processes)
partitions = split_every(partition_size, iter(result))
packed_partitions = (pack(func, (partition, )) for partition in partitions)
for pool_result in pool.imap(unpack, packed_partitions):
yield pool_result
pool.terminate() | 0.002141 |
def get_gpg_home( appname, config_dir=None ):
"""
Get the GPG keyring directory for a particular application.
Return the path.
"""
assert is_valid_appname(appname)
config_dir = get_config_dir( config_dir )
path = os.path.join( config_dir, "gpgkeys", appname )
return path | 0.023102 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.