text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_instance(
model, method="file",
img_dir=None, data_dir=None,
bucket=None
):
"""Return an instance of ConsumeStore."""
global _instances
if not isinstance(model, ConsumeModel):
raise TypeError(
"get_instance() expects a parker.ConsumeModel derivative."
)
if method == "file":
my_store = store.get_filestore_instance(
img_dir=img_dir,
data_dir=data_dir
)
elif method == "s3":
my_store = store.get_s3store_instance(
bucket=bucket
)
else:
raise ValueError("Unexpected method value, '%s'." % method)
key = "%s:%s" % (repr(model), repr(my_store))
try:
instance = _instances[key]
except KeyError:
instance = ConsumeStore(model, my_store)
_instances[key] = instance
return instance | 0.001156 |
def find_articulation_vertices(graph):
"""Finds all of the articulation vertices within a graph.
Returns a list of all articulation vertices within the graph.
Returns an empty list for an empty graph.
"""
articulation_vertices = []
all_nodes = graph.get_all_node_ids()
if len(all_nodes) == 0:
return articulation_vertices
# Run the algorithm on each of the connected components of the graph
components = get_connected_components_as_subgraphs(graph)
for component in components:
# --Call the internal articulation vertices function to find
# --the node list for this particular connected component
vertex_list = _internal_get_cut_vertex_list(component)
articulation_vertices.extend(vertex_list)
return articulation_vertices | 0.001235 |
def compute(self,
text, # text for which to find the most similar event
lang = "eng"): # language in which the text is written
"""
compute the list of most similar events for the given text
"""
params = { "lang": lang, "text": text, "topClustersCount": self._nrOfEventsToReturn }
res = self._er.jsonRequest("/json/getEventForText/enqueueRequest", params)
requestId = res["requestId"]
for i in range(10):
time.sleep(1) # sleep for 1 second to wait for the clustering to perform computation
res = self._er.jsonRequest("/json/getEventForText/testRequest", { "requestId": requestId })
if isinstance(res, list) and len(res) > 0:
return res
return None | 0.017327 |
def formula_1980(household, period, parameters):
'''
To compute this allowance, the 'rent' value must be provided for the same month, but 'housing_occupancy_status' is not necessary.
'''
return household('rent', period) * parameters(period).benefits.housing_allowance | 0.013378 |
def open(self, url, method='get', **kwargs):
"""Open a URL.
:param str url: URL to open
:param str method: Optional method; defaults to `'get'`
:param kwargs: Keyword arguments to `Session::request`
"""
response = self.session.request(method, url, **self._build_send_args(**kwargs))
self._update_state(response) | 0.00813 |
def _TypecheckDecorator(subject=None, **kwargs):
"""Dispatches type checks based on what the subject is.
Functions or methods are annotated directly. If this method is called
with keyword arguments only, return a decorator.
"""
if subject is None:
return _TypecheckDecoratorFactory(kwargs)
elif inspect.isfunction(subject) or inspect.ismethod(subject):
return _TypecheckFunction(subject, {}, 2, None)
else:
raise TypeError() | 0.011086 |
def get_fields(self):
"""
Return all field objects
:rtype: a list of :class:`EncodedField` objects
"""
l = []
for i in self.classes.class_def:
for j in i.get_fields():
l.append(j)
return l | 0.01083 |
def generate_map_chart_file(qtl_matrix, lod_threshold,
map_chart_file='MapChart.map'):
""" This function converts our QTL matrix file into a MapChart input
file.
:arg qtl_matrix: the path to the QTL matrix file generated by
the plugin.
:arg lod_threshold: threshold used to determine if a given LOD value
is reflective the presence of a QTL.
:kwarg map_chart_file: name of the output file containing the
MapChart information.
"""
qtl_matrix = read_input_file(qtl_matrix, sep=',')
tmp_dic = {}
cnt = 1
tmp = {}
block = {}
for row in qtl_matrix[1:]:
linkgrp = qtl_matrix[cnt - 1][1]
if cnt == 1:
linkgrp = qtl_matrix[cnt][1]
if not linkgrp in tmp_dic:
tmp_dic[linkgrp] = [[], []]
infos = row[0:3]
if qtl_matrix[cnt][1] != linkgrp:
if tmp:
qtls = _extrac_qtl(tmp, block, qtl_matrix[0])
tmp_dic[linkgrp][1] = qtls
linkgrp = qtl_matrix[cnt][1]
tmp_dic[linkgrp] = [[], []]
tmp = {}
block = {}
tmp_dic[linkgrp][0].append([row[0], row[2]])
colcnt = 3
for cel in row[3:-1]:
blockrow = infos[:]
blockrow.extend([qtl_matrix[0][colcnt], cel])
if colcnt in block:
block[colcnt].append(blockrow)
else:
block[colcnt] = [blockrow]
if cel.strip() != '' and float(cel) >= float(lod_threshold):
temp = infos[:]
if not tmp\
or (qtl_matrix[0][colcnt] in tmp
and float(cel) >= float(
tmp[qtl_matrix[0][colcnt]][-1])
) \
or qtl_matrix[0][colcnt] not in tmp:
temp.extend([qtl_matrix[0][colcnt], cel])
tmp[qtl_matrix[0][colcnt]] = temp
colcnt = colcnt + 1
cnt = cnt + 1
qtl_info = {}
try:
stream = open(map_chart_file, 'w')
keys = list(tmp_dic.keys())
## Remove unknown group, reason:
# The unlinked markers, if present, are always put in group U by
# MapQTL. If you don't omit them and there are many (often), then
# their names take so much space that it is difficult to fit them
# on the page.
if 'U' in keys:
keys.remove('U')
# Try to convert all the groups to int, which would result in
# a better sorting. If that fails, fail silently.
try:
keys = [int(key) for key in keys]
except ValueError:
pass
keys.sort()
for key in keys:
key = str(key) # Needed since we might have converted them to int
if tmp_dic[key]:
if key == 'U': # pragma: no cover
# We removed the key before, we should not be here
continue
stream.write('group %s\n' % key)
for entry in _order_linkage_group(tmp_dic[key][0]):
stream.write(' '.join(entry) + '\n')
if tmp_dic[key][1]:
stream.write('\n')
stream.write('qtls\n')
for qtl in tmp_dic[key][1]:
qtl_info[qtl.peak_mk] = qtl.get_flanking_markers()
stream.write('%s \n' % qtl.to_string())
stream.write('\n')
stream.write('\n')
except IOError as err: # pragma: no cover
LOG.info('An error occured while writing the map chart map '
'to the file %s' % map_chart_file)
LOG.debug("Error: %s" % err)
finally:
stream.close()
LOG.info('Wrote MapChart map in file %s' % map_chart_file)
return qtl_info | 0.000768 |
def _convert_size(self, size_str):
"""Convert units to GB"""
suffix = size_str[-1]
if suffix == 'K':
multiplier = 1.0 / (1024.0 * 1024.0)
elif suffix == 'M':
multiplier = 1.0 / 1024.0
elif suffix == 'T':
multiplier = 1024.0
else:
multiplier = 1
try:
val = float(size_str.split(' ')[0])
return val * multiplier
except ValueError:
return 0.0 | 0.004098 |
def _ensure_format(rule, attribute, res_dict):
"""Verifies that attribute in res_dict is properly formatted.
Since, in the .ini-files, lists are specified as ':' separated text and
UUID values can be plain integers we need to transform any such values
into proper format. Empty strings are converted to None if validator
specifies that None value is accepted.
"""
if rule == 'type:uuid' or (rule == 'type:uuid_or_none' and
res_dict[attribute]):
res_dict[attribute] = uuidify(res_dict[attribute])
elif rule == 'type:uuid_list':
if not res_dict[attribute]:
res_dict[attribute] = []
else:
temp_list = res_dict[attribute].split(':')
res_dict[attribute] = []
for item in temp_list:
res_dict[attribute].append = uuidify(item)
elif rule == 'type:string_or_none' and res_dict[attribute] == "":
res_dict[attribute] = None | 0.001025 |
def get_elasticache_clusters_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache clusters (with
nodes' info) in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = elasticache.connect_to_region(region)
if conn:
# show_cache_node_info = True
# because we also want nodes' information
response = conn.describe_cache_clusters(None, None, None, True)
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to CacheClusters or
# CacheNodes. Because of that wo can't make use of the get_list
# method in the AWSQueryConnection. Let's do the work manually
clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
except KeyError as e:
error = "ElastiCache query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for cluster in clusters:
self.add_elasticache_cluster(cluster, region) | 0.001817 |
def remove(name=None, pkgs=None, purge=False, **kwargs): # pylint: disable=unused-argument
'''
Remove packages using ``apk del``.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
'''
old = list_pkgs()
pkg_to_remove = []
if name:
if ',' in name:
pkg_to_remove = name.split(',')
else:
pkg_to_remove = [name]
if pkgs:
pkg_to_remove.extend(pkgs)
if not pkg_to_remove:
return {}
if purge:
cmd = ['apk', 'del', '--purge']
else:
cmd = ['apk', 'del']
cmd.extend(pkg_to_remove)
out = __salt__['cmd.run_all'](
cmd,
output_loglevel='trace',
python_shell=False
)
if out['retcode'] != 0 and out['stderr']:
errors = [out['stderr']]
else:
errors = []
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
'Problem encountered removing package(s)',
info={'errors': errors, 'changes': ret}
)
return ret | 0.001288 |
def _AssAttr(self, t):
""" Handle assigning an attribute of an object
"""
self._dispatch(t.expr)
self._write('.'+t.attrname) | 0.012821 |
def add_field(self, fieldname, fieldspec=whoosh_module_fields.TEXT):
"""Add a field in the index of the model.
Args:
fieldname (Text): This parameters register a new field in specified model.
fieldspec (Name, optional): This option adds various options as were described before.
Returns:
TYPE: The new schema after deleted is returned.
"""
self._whoosh.add_field(fieldname, fieldspec)
return self._whoosh.schema | 0.006508 |
def longest_non_repeat_v1(string):
"""
Find the length of the longest substring
without repeating characters.
"""
if string is None:
return 0
dict = {}
max_length = 0
j = 0
for i in range(len(string)):
if string[i] in dict:
j = max(dict[string[i]], j)
dict[string[i]] = i + 1
max_length = max(max_length, i - j + 1)
return max_length | 0.002398 |
def start_blocking(self):
""" Start the advertiser in the background, but wait until it is ready """
self._cav_started.clear()
self.start()
self._cav_started.wait() | 0.015228 |
def fullcompare(self, other):
"""Compare two names, returning a 3-tuple (relation, order, nlabels).
I{relation} describes the relation ship beween the names,
and is one of: dns.name.NAMERELN_NONE,
dns.name.NAMERELN_SUPERDOMAIN, dns.name.NAMERELN_SUBDOMAIN,
dns.name.NAMERELN_EQUAL, or dns.name.NAMERELN_COMMONANCESTOR
I{order} is < 0 if self < other, > 0 if self > other, and ==
0 if self == other. A relative name is always less than an
absolute name. If both names have the same relativity, then
the DNSSEC order relation is used to order them.
I{nlabels} is the number of significant labels that the two names
have in common.
"""
sabs = self.is_absolute()
oabs = other.is_absolute()
if sabs != oabs:
if sabs:
return (NAMERELN_NONE, 1, 0)
else:
return (NAMERELN_NONE, -1, 0)
l1 = len(self.labels)
l2 = len(other.labels)
ldiff = l1 - l2
if ldiff < 0:
l = l1
else:
l = l2
order = 0
nlabels = 0
namereln = NAMERELN_NONE
while l > 0:
l -= 1
l1 -= 1
l2 -= 1
label1 = self.labels[l1].lower()
label2 = other.labels[l2].lower()
if label1 < label2:
order = -1
if nlabels > 0:
namereln = NAMERELN_COMMONANCESTOR
return (namereln, order, nlabels)
elif label1 > label2:
order = 1
if nlabels > 0:
namereln = NAMERELN_COMMONANCESTOR
return (namereln, order, nlabels)
nlabels += 1
order = ldiff
if ldiff < 0:
namereln = NAMERELN_SUPERDOMAIN
elif ldiff > 0:
namereln = NAMERELN_SUBDOMAIN
else:
namereln = NAMERELN_EQUAL
return (namereln, order, nlabels) | 0.002468 |
def hot_plug_cpu(self, cpu):
"""Plugs a CPU into the machine.
in cpu of type int
The CPU id to insert.
"""
if not isinstance(cpu, baseinteger):
raise TypeError("cpu can only be an instance of type baseinteger")
self._call("hotPlugCPU",
in_p=[cpu]) | 0.008982 |
def update(self, new_val):
"""Update the estimate.
Parameters
----------
new_val: float
new observated value of estimated quantity.
"""
if self._value is None:
self._value = new_val
else:
self._value = self._gamma * self._value + (1.0 - self._gamma) * new_val | 0.008523 |
def award_group_principal_award_recipient(tag):
"""
Find the award group principal award recipient, one for each
item found in the get_funding_group section
"""
award_group_principal_award_recipient = []
principal_award_recipients = extract_nodes(tag, "principal-award-recipient")
for t in principal_award_recipients:
principal_award_recipient_text = ""
institution = node_text(first(extract_nodes(t, "institution")))
surname = node_text(first(extract_nodes(t, "surname")))
given_names = node_text(first(extract_nodes(t, "given-names")))
string_name = node_text(first(raw_parser.string_name(t)))
# Concatenate name and institution values if found
# while filtering out excess whitespace
if(given_names):
principal_award_recipient_text += given_names
if(principal_award_recipient_text != ""):
principal_award_recipient_text += " "
if(surname):
principal_award_recipient_text += surname
if(institution):
principal_award_recipient_text += institution
if(string_name):
principal_award_recipient_text += string_name
award_group_principal_award_recipient.append(principal_award_recipient_text)
return award_group_principal_award_recipient | 0.002252 |
def declallvars(self):
"""generator on all declaration of variable"""
for f in self.body:
if (hasattr(f, '_ctype')
and not isinstance(f._ctype, FuncType)):
yield f | 0.008811 |
def perform_permissions_check(self, user, obj, perms):
""" Performs the permissions check. """
return self.request.forum_permission_handler.can_move_topics(obj, user) | 0.010989 |
def replace_entities(self, html):
"""
Replace htmlentities with unicode characters
@Params
html - html source to replace entities in
@Returns
String html with entities replaced
"""
def fixup(text):
"""replace the htmlentities in some text"""
text = text.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return chr(int(text[3:-1], 16))
else:
return chr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = chr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub(r"&#?\w+;", fixup, html) | 0.00207 |
def get_passphrase(self, prompt='Passphrase:'):
"""Ask the user for passphrase."""
passphrase = None
if self.cached_passphrase_ack:
passphrase = self.cached_passphrase_ack.get()
if passphrase is None:
passphrase = interact(
title='{} passphrase'.format(self.device_name),
prompt=prompt,
description=None,
binary=self.passphrase_entry_binary,
options=self.options_getter())
if self.cached_passphrase_ack:
self.cached_passphrase_ack.set(passphrase)
return passphrase | 0.003185 |
def fetchone(self):
""" Fetch next row """
self._check_executed()
row = yield self.read_next()
if row is None:
raise gen.Return(None)
self.rownumber += 1
raise gen.Return(row) | 0.008511 |
def publish(self, message, properties=None, mandatory=False):
"""
Publish a message to an AMQP exchange.
Parameters
----------
message: string
Message to publish.
properties: dict
Properties to set on message. This parameter is optional, but if set, at least the following options must be set:
content_type: string - what content_type to specify, default is 'text/plain'.
delivery_mode: int - what delivery_mode to use. By default message are not persistent, but this can be
set by specifying PERSISTENT_MESSAGE .
The following options are also available:
routing_key: string - what routing_key to use. MUST be set if this was not set during __init__.
exchange: string - what exchange to use. MUST be set if this was not set during __init__.
mandatory: boolean
If set to True, the mandatory bit will be set on the published message.
Returns
-------
Depending on the mode of the Channel, the return value can signify different things:
basic_Confirm is active:
True means that the message has been delivered to a queue, False means it hasn't.
mandatory bit was set on message:
True means that the message has been delivered to a consumer, False means that it has been returned.
No special bit or mode has been set:
None is returned.
"""
return publish_message(self.channel, self.exchange_name, self.default_routing_key, message, properties, mandatory) | 0.026685 |
def guard(func):
""" Prevents the decorated function from parallel execution.
Internally, this decorator creates a Lock object and transparently
obtains/releases it when calling the function.
"""
semaphore = threading.Lock()
@functools.wraps(func)
def wrapper(*args, **kwargs):
semaphore.acquire()
try:
return func(*args, **kwargs)
finally:
semaphore.release()
return wrapper | 0.002169 |
def _dispatch(self, func, args=None):
"""Send message to parent process
Arguments:
func (str): Name of function for parent to call
args (list, optional): Arguments passed to function when called
"""
data = json.dumps(
{
"header": "pyblish-qml:popen.request",
"payload": {
"name": func,
"args": args or list(),
}
}
)
# This should never happen. Each request is immediately
# responded to, always. If it isn't the next line will block.
# If multiple responses were made, then this will fail.
# Both scenarios are bugs.
assert self.channels["response"].empty(), (
"There were pending messages in the response channel")
sys.stdout.write(data + "\n")
sys.stdout.flush()
try:
message = self.channels["response"].get()
if six.PY3:
response = json.loads(message)
else:
response = _byteify(json.loads(message, object_hook=_byteify))
except TypeError as e:
raise e
else:
assert response["header"] == "pyblish-qml:popen.response", response
return response["payload"] | 0.001497 |
def list(self, status=values.unset, phone_number=values.unset,
incoming_phone_number_sid=values.unset, friendly_name=values.unset,
unique_name=values.unset, limit=None, page_size=None):
"""
Lists DependentHostedNumberOrderInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param DependentHostedNumberOrderInstance.Status status: The Status of this HostedNumberOrder.
:param unicode phone_number: An E164 formatted phone number.
:param unicode incoming_phone_number_sid: IncomingPhoneNumber sid.
:param unicode friendly_name: A human readable description of this resource.
:param unicode unique_name: A unique, developer assigned name of this HostedNumberOrder.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.hosted_numbers.authorization_document.dependent_hosted_number_order.DependentHostedNumberOrderInstance]
"""
return list(self.stream(
status=status,
phone_number=phone_number,
incoming_phone_number_sid=incoming_phone_number_sid,
friendly_name=friendly_name,
unique_name=unique_name,
limit=limit,
page_size=page_size,
)) | 0.008338 |
def getColumnName(self, attribute):
"""
Retreive the fully qualified column name for a particular
attribute in this store. The attribute must be bound to an
Item subclass (its type must be valid). If the underlying
table does not exist in the database, it will be created as a
side-effect.
@param tableClass: an Item subclass
@return: a string
"""
if attribute not in self.attrToColumnNameCache:
self.attrToColumnNameCache[attribute] = '.'.join(
(self.getTableName(attribute.type),
self.getShortColumnName(attribute)))
return self.attrToColumnNameCache[attribute] | 0.002861 |
def get_object(pid_type, pid_value):
"""Get an object behind persistent identifier."""
from .models import PersistentIdentifier
obj = PersistentIdentifier.get(pid_type, pid_value)
if obj.has_object():
click.echo('{0.object_type} {0.object_uuid} {0.status}'.format(obj)) | 0.003401 |
def choicerank(
digraph, traffic_in, traffic_out, weight=None,
initial_params=None, alpha=1.0, max_iter=10000, tol=1e-8):
"""Compute the MAP estimate of a network choice model's parameters.
This function computes the maximum-a-posteriori (MAP) estimate of model
parameters given a network structure and node-level traffic data (see
:ref:`data-network`), using the ChoiceRank algorithm [MG17]_, [KTVV15]_.
The nodes are assumed to be labeled using consecutive integers starting
from 0.
Parameters
----------
digraph : networkx.DiGraph
Directed graph representing the network.
traffic_in : array_like
Number of arrivals at each node.
traffic_out : array_like
Number of departures at each node.
weight : str, optional
The edge attribute that holds the numerical value used for the edge
weight. If None (default) then all edge weights are 1.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
alpha : float, optional
Regularization parameter.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The MAP estimate of model parameters.
Raises
------
ImportError
If the NetworkX library cannot be imported.
"""
import networkx as nx
# Compute the (sparse) adjacency matrix.
n_items = len(digraph)
nodes = np.arange(n_items)
adj = nx.to_scipy_sparse_matrix(digraph, nodelist=nodes, weight=weight)
adj_t = adj.T.tocsr()
# Process the data into a standard form.
traffic_in = np.asarray(traffic_in)
traffic_out = np.asarray(traffic_out)
data = (adj, adj_t, traffic_in, traffic_out)
return _mm(
n_items, data, initial_params, alpha, max_iter, tol, _choicerank) | 0.000497 |
def _serialize_function(obj):
"""
Still needing this much try-except stuff. We should find a way to get rid of this.
:param obj:
:return:
"""
try:
obj = inspect.getsource(obj)
except (TypeError, IOError):
try:
obj = marshal.dumps(obj)
except ValueError:
if hasattr(obj, '__dict__'):
obj = _serialize_dict(obj.__dict__)
return obj | 0.004706 |
def min_max_temp(temp: str, unit: str = 'C') -> str:
"""
Format the Min and Max temp elemets into a readable string
Ex: Maximum temperature of 23°C (73°F) at 18-15:00Z
"""
if not temp or len(temp) < 7:
return ''
if temp[:2] == 'TX':
temp_type = 'Maximum'
elif temp[:2] == 'TN':
temp_type = 'Minimum'
else:
return ''
temp = temp[2:].replace('M', '-').replace('Z', '').split('/') # type: ignore
if len(temp[1]) > 2:
temp[1] = temp[1][:2] + '-' + temp[1][2:] # type: ignore
temp_value = temperature(core.make_number(temp[0]), unit)
return f'{temp_type} temperature of {temp_value} at {temp[1]}:00Z' | 0.002915 |
def point_sets(script, neighbors=10, smooth_iteration=0, flip=False,
viewpoint_pos=(0.0, 0.0, 0.0)):
""" Compute the normals of the vertices of a mesh without exploiting the
triangle connectivity, useful for dataset with no faces.
Args:
script: the FilterScript object or script filename to write
the filter to.
neighbors (int): The number of neighbors used to estimate normals.
smooth_iteration (int): The number of smoothing iteration done on the
p used to estimate and propagate normals.
flip (bool): Flip normals w.r.t. viewpoint. If the 'viewpoint' (i.e.
scanner position) is known, it can be used to disambiguate normals
orientation, so that all the normals will be oriented in the same
direction.
viewpoint_pos (single xyz point, tuple or list): Set the x, y, z
coordinates of the viewpoint position.
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
filter_xml = ''.join([
' <filter name="Compute normals for point sets">\n',
' <Param name="K" ',
'value="{:d}" '.format(neighbors),
'description="Neighbour num" ',
'type="RichInt" ',
'/>\n',
' <Param name="smoothIter" ',
'value="{:d}" '.format(smooth_iteration),
'description="Smooth Iteration" ',
'type="RichInt" ',
'/>\n',
' <Param name="flipFlag" ',
'value="{}" '.format(str(flip).lower()),
'description="Flip normals w.r.t. viewpoint" ',
'type="RichBool" ',
'/>\n',
' <Param name="viewPos" ',
'x="{}" y="{}" z="{}" '.format(viewpoint_pos[0], viewpoint_pos[1],
viewpoint_pos[2],),
'description="Viewpoint Pos." ',
'type="RichPoint3f" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None | 0.000498 |
def recv(self, timeout=None):
"""Receive a handover select message from the remote server."""
message = self._recv(timeout)
if message and message.type == "urn:nfc:wkt:Hs":
log.debug("received '{0}' message".format(message.type))
return nfc.ndef.HandoverSelectMessage(message)
else:
log.error("received invalid message type {0}".format(message.type))
return None | 0.004525 |
def Process(self, parser_mediator, root_item=None, **kwargs):
"""Parses a document summary information OLECF item.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
root_item (Optional[pyolecf.item]): root item of the OLECF file.
Raises:
ValueError: If the root item is not set.
"""
# This will raise if unhandled keyword arguments are passed.
super(DocumentSummaryInformationOLECFPlugin, self).Process(
parser_mediator, **kwargs)
if not root_item:
raise ValueError('Root item not set.')
root_creation_time, root_modification_time = self._GetTimestamps(root_item)
for item_name in self.REQUIRED_ITEMS:
item = root_item.get_sub_item_by_name(item_name)
if not item:
continue
summary_information = OLECFDocumentSummaryInformation(item)
event_data = summary_information.GetEventData(
data_type='olecf:document_summary_info')
event_data.name = 'Document Summary Information'
if root_creation_time:
date_time = dfdatetime_filetime.Filetime(
timestamp=root_creation_time)
event = OLECFDocumentSummaryInformationEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
if root_modification_time:
date_time = dfdatetime_filetime.Filetime(
timestamp=root_modification_time)
event = OLECFDocumentSummaryInformationEvent(
date_time, definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data) | 0.005282 |
def check(cls, status):
"""Checks if a status enum matches the trigger originally set, and
if so, raises the appropriate error.
Args:
status (int, enum): A protobuf enum response status to check.
Raises:
AssertionError: If trigger or error were not set.
_ApiError: If the statuses don't match. Do not catch. Will be
caught automatically and sent back to the client.
"""
assert cls.trigger is not None, 'Invalid ErrorTrap, trigger not set'
assert cls.error is not None, 'Invalid ErrorTrap, error not set'
if status == cls.trigger:
# pylint: disable=not-callable
# cls.error will be callable at runtime
raise cls.error() | 0.002587 |
def tell(self):
"""Returns the current position of read head.
"""
pos = ctypes.c_size_t()
check_call(_LIB.MXRecordIOReaderTell(self.handle, ctypes.byref(pos)))
return pos.value | 0.009259 |
def get_avatar_url(self, size=2):
"""Get URL to avatar picture
:param size: possible values are ``0``, ``1``, or ``2`` corresponding to small, medium, large
:type size: :class:`int`
:return: url to avatar
:rtype: :class:`str`
"""
hashbytes = self.get_ps('avatar_hash')
if hashbytes != "\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000":
ahash = hexlify(hashbytes).decode('ascii')
else:
ahash = 'fef49e7fa7e1997310d705b2a6158ff8dc1cdfeb'
sizes = {
0: '',
1: '_medium',
2: '_full',
}
url = "http://cdn.akamai.steamstatic.com/steamcommunity/public/images/avatars/%s/%s%s.jpg"
return url % (ahash[:2], ahash, sizes[size]) | 0.00612 |
def echo(text, **kwargs):
""" Print results to the console
:param text: the text string to print
:type text: str
:return: a string
:rtype: str
"""
if shakedown.cli.quiet:
return
if not 'n' in kwargs:
kwargs['n'] = True
if 'd' in kwargs:
text = decorate(text, kwargs['d'])
if 'TERM' in os.environ and os.environ['TERM'] == 'velocity':
if text:
print(text, end="", flush=True)
if kwargs.get('n'):
print()
else:
click.echo(text, nl=kwargs.get('n')) | 0.003431 |
def plot(self, plot_cmd=None, tf=lambda y: y):
"""plot the data we have, return ``self``"""
if not plot_cmd:
plot_cmd = self.plot_cmd
colors = 'bgrcmyk'
pyplot.hold(False)
res = self.res
flatx, flatf = self.flattened()
minf = np.inf
for i in flatf:
minf = min((minf, min(flatf[i])))
addf = 1e-9 - minf if minf <= 1e-9 else 0
for i in sorted(res.keys()): # we plot not all values here
if isinstance(i, int):
color = colors[i % len(colors)]
arx = sorted(res[i].keys())
plot_cmd(arx, [tf(np.median(res[i][x]) + addf) for x in arx], color + '-')
pyplot.text(arx[-1], tf(np.median(res[i][arx[-1]])), i)
pyplot.hold(True)
plot_cmd(flatx[i], tf(np.array(flatf[i]) + addf), color + 'o')
pyplot.ylabel('f + ' + str(addf))
pyplot.draw()
pyplot.ion()
pyplot.show()
# raw_input('press return')
return self | 0.002844 |
def check_port(port, host, timeout=10):
"""
connect to port on host and return True on success
:param port: int, port to check
:param host: string, host address
:param timeout: int, number of seconds spent trying
:return: bool
"""
logger.info("trying to open connection to %s:%s", host, port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.settimeout(timeout)
result = sock.connect_ex((host, port))
logger.info("was connection successful? errno: %s", result)
if result == 0:
logger.debug('port is opened: %s:%s' % (host, port))
return True
else:
logger.debug('port is closed: %s:%s' % (host, port))
return False
finally:
sock.close() | 0.001259 |
def iter(self, start=0, stop=-1, withscores=False, reverse=None):
""" Return a range of values from sorted set name between
@start and @end sorted in ascending order unless @reverse or
:prop:reversed.
@start and @end: #int, can be negative, indicating the end of
the range.
@withscores: #bool indicates to return the scores along with the
members, as a list of |(member, score)| pairs
@reverse: #bool indicating whether to sort the results descendingly
-> yields members or |(member, score)| #tuple pairs
"""
reverse = reverse if reverse is not None else self.reversed
_loads = self._loads
for member in self._client.zrange(
self.key_prefix, start=start, end=stop, withscores=withscores,
desc=reverse, score_cast_func=self.cast):
if withscores:
yield (_loads(member[0]), self.cast(member[1]))
else:
yield _loads(member) | 0.001918 |
def from_any(cls, params, **ctor_opts):
"""
Creates a new Query object from input.
:param params: Parameter to convert to query
:type params: dict, string, or :class:`Query`
If ``params`` is a :class:`Query` object already, a deep copy is made
and a new :class:`Query` object is returned.
If ``params`` is a string, then a :class:`Query` object is contructed
from it. The string itself is not parsed, but rather prepended to
any additional parameters (defined via the object's methods)
with an additional ``&`` characted.
If ``params`` is a dictionary, it is passed to the :class:`Query`
constructor.
:return: a new :class:`Query` object
:raise: :exc:`ArgumentError` if the input is none of the acceptable
types mentioned above. Also raises any exceptions possibly thrown
by the constructor.
"""
if isinstance(params, cls):
return deepcopy(params)
elif isinstance(params, dict):
ctor_opts.update(**params)
if cls is QueryBase:
if ('bbox' in params or 'start_range' in params or
'end_range' in params):
return SpatialQuery(**ctor_opts)
else:
return ViewQuery(**ctor_opts)
elif isinstance(params, basestring):
ret = cls()
ret._base_str = params
return ret
else:
raise ArgumentError.pyexc("Params must be Query, dict, or string") | 0.001881 |
def is_good_age_ratios(self):
"""Method to check the sum of age ratio is 1.
:returns: True if the sum is 1 or the sum less than 1 but there is
None.
:rtype: bool
"""
ratios = self.age_ratios()
if None in ratios:
# If there is None, just check to not exceeding 1
clean_ratios = [x for x in ratios if x is not None]
ratios.remove(None)
if sum(clean_ratios) > 1:
return False
else:
if sum(ratios) != 1:
return False
return True | 0.003361 |
def import_from_path(path):
"""
Imports a package, module or attribute from path
Thanks http://stackoverflow.com/a/14050282/1267398
>>> import_from_path('os.path')
<module 'posixpath' ...
>>> import_from_path('os.path.basename')
<function basename at ...
>>> import_from_path('os')
<module 'os' from ...
>>> import_from_path('getrektcunt')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc.dummylol')
Traceback (most recent call last):
ImportError:
"""
try:
return importlib.import_module(path)
except ImportError:
if '.' not in path:
raise
module_name, attr_name = path.rsplit('.', 1)
if not does_module_exist(module_name):
raise ImportError("No object found at '{}'".format(path))
mod = importlib.import_module(module_name)
if not hasattr(mod, attr_name):
raise ImportError("No object found at '{}'".format(path))
return getattr(mod, attr_name) | 0.000876 |
def _GetNormalizedTimestamp(self):
"""Retrieves the normalized timestamp.
Returns:
decimal.Decimal: normalized timestamp, which contains the number of
seconds since January 1, 1970 00:00:00 and a fraction of second used
for increased precision, or None if the normalized timestamp cannot be
determined.
"""
if self._normalized_timestamp is None:
if self._timestamp is not None:
self._normalized_timestamp = (
decimal.Decimal(self._timestamp) /
definitions.MICROSECONDS_PER_SECOND)
return self._normalized_timestamp | 0.004918 |
def _plot_prepare(self, components, units):
"""
Prepare the ``PhaseSpacePosition`` or subclass for passing to a plotting
routine to plot all projections of the object.
"""
# components to plot
if components is None:
components = self.pos.components
n_comps = len(components)
# if units not specified, get units from the components
if units is not None:
if isinstance(units, u.UnitBase):
units = [units]*n_comps # global unit
elif len(units) != n_comps:
raise ValueError('You must specify a unit for each axis, or a '
'single unit for all axes.')
labels = []
x = []
for i,name in enumerate(components):
val = getattr(self, name)
if units is not None:
val = val.to(units[i])
unit = units[i]
else:
unit = val.unit
if val.unit != u.one:
uu = unit.to_string(format='latex_inline')
unit_str = ' [{}]'.format(uu)
else:
unit_str = ''
# Figure out how to fancy display the component name
if name.startswith('d_'):
dot = True
name = name[2:]
else:
dot = False
if name in _greek_letters:
name = r"\{}".format(name)
if dot:
name = "\dot{{{}}}".format(name)
labels.append('${}$'.format(name) + unit_str)
x.append(val.value)
return x, labels | 0.003608 |
def spoolable(*, pre_condition=True, body_params=()):
"""
Decorates a function to make it spoolable using uWSGI, but if no spooling mechanism is available,
the function is called synchronously. All decorated function arguments must be picklable and
the first annotated with `Context` will receive an object that defines the current execution state.
Return values are always ignored and all exceptions are caught in spooled mode.
:param pre_condition: additional condition needed to use spooler
:param body_params: parameter names that can have large values and should use spooler body
"""
def decorator(func):
context_name = None
keyword_kinds = {inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY}
invalid_body_params = set(body_params)
for name, parameter in inspect.signature(func).parameters.items():
if parameter.kind not in keyword_kinds:
continue
if not context_name and parameter.annotation is Context:
context_name = name
elif name in invalid_body_params:
invalid_body_params.remove(name)
if invalid_body_params:
raise TypeError('Spoolable task body_params must be keyword arguments')
task = Task(func, context_name=context_name, pre_condition=pre_condition, body_params=body_params)
spooler.register(task)
return task
return decorator | 0.006131 |
def is_predecessor_of_other(self, predecessor, others):
"""Returns whether the predecessor is a predecessor or a predecessor
of a predecessor...of any of the others.
Args:
predecessor (str): The txn id of the predecessor.
others (list(str)): The txn id of the successor.
Returns:
(bool)
"""
return any(predecessor in self._predecessors_by_id[o] for o in others) | 0.004454 |
def hist(self, by=None, bins=10, **kwds):
"""
Draw one histogram of the DataFrame's columns.
A histogram is a representation of the distribution of data.
This function groups the values of all given Series in the DataFrame
into bins and draws all bins in one :class:`matplotlib.axes.Axes`.
This is useful when the DataFrame's Series are in a similar scale.
Parameters
----------
by : str or sequence, optional
Column in the DataFrame to group by.
bins : int, default 10
Number of histogram bins to be used.
**kwds
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
class:`matplotlib.AxesSubplot`
Return a histogram plot.
See Also
--------
DataFrame.hist : Draw histograms per DataFrame's Series.
Series.hist : Draw a histogram with Series' data.
Examples
--------
When we draw a dice 6000 times, we expect to get each value around 1000
times. But when we draw two dices and sum the result, the distribution
is going to be quite different. A histogram illustrates those
distributions.
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... np.random.randint(1, 7, 6000),
... columns = ['one'])
>>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)
>>> ax = df.plot.hist(bins=12, alpha=0.5)
"""
return self(kind='hist', by=by, bins=bins, **kwds) | 0.001213 |
def grid_track(lat,lon,sla,remove_edges=None,backbone=None,interp_over_continents=True):
"""
# GRID_TRACK
# @summary: This function allow detecting gaps in a set of altimetry data and rebin this data regularly, with informations on gaps.
# @param lat {type:numeric} : latitude
# @param lon {type:numeric} : longitude
# @param sla {type:numeric} : data
# @return:
# outdst : resampled distance
# outlon : resampled longitude
# outlat : resampled latitude
# outsla : resampled data
# gaplen : length of the longest gap in data
# dx : average spatial sampling
# interpolated : True when data was interpolated (empty bin)
#
# @author: Renaud DUSSURGET (RD) - LER/PAC, Ifremer
# @change: Created by RD, July 2012
# 29/08/2012 : Major change -> number of output variables changes (added INTERPOLATED), and rebinning modified
# 06/11/2012 : Included in alti_tools lib
# 19/12/2012 : Added backbone option (reproject along the backbone grid)
"""
#Find gaps in data
if backbone is not None :
backlon=backbone[0]
backlat=backbone[1]
ascending=track_orient(lon,lat)
dst=calcul_distance(backlat[0],backlon[0],lat,lon)
if ascending : dst[lat < backlat[0]]*=-1
if not ascending : dst[lat > backlat[0]]*=-1
dstback=calcul_distance(backlat,backlon)
dx = dstback[1:] - dstback[:-1]
mn_dx = np.median(dx)
bins = np.round(dstback.max() / mn_dx)+1
range=(0/2.,mn_dx * bins) - mn_dx/2
# dfback=list(set(dstback).difference(set(dst)))
bhist,bbin_edges=np.histogram(dstback, bins=bins, range=range)
continent=np.where(bhist==0)[0]
if remove_edges is None : remove_edges=False
else :
dst=calcul_distance(lat,lon)
#Find gaps in data
dx = dst[1:] - dst[:-1]
mn_dx = np.median(dx)
bins = np.ceil(dst.max() / mn_dx) + 1
range=(0/2.,mn_dx * bins) - mn_dx/2
if remove_edges is None : remove_edges=True
hist,bin_edges=np.histogram(dst, bins=bins, range=range) #We have binned the data along a regular grid of size (bins) in the range (range)
#Missing data is thus represented by no data in a given bin
#Remove leading and trailing edges (and synchronise bin_edges)
if remove_edges == True :
while hist[0] == 0 :
hist=np.delete(hist,[0])
bin_edges=np.delete(bin_edges,[0])
while hist[-1] == 0 :
hist=np.delete(hist,[len(hist)-1])
bin_edges=np.delete(bin_edges,[len(bin_edges)-1])
nH =len(hist)
#Get filled bins indices
ok = np.arange(len(hist)).compress(np.logical_and(hist,True or False))
empty = np.arange(len(hist)).compress(~np.logical_and(hist,True or False))
if isinstance(sla,np.ma.masked_array) : outsla = np.ma.masked_array(np.repeat(sla.fill_value,nH),mask=np.ones(nH,dtype=bool),dtype=sla.dtype)
else : outsla = np.ma.masked_array(np.repeat(np.ma.default_fill_value(1.0),nH),mask=np.ones(nH,dtype=bool),dtype=np.float32)
if isinstance(sla,np.ma.masked_array) : outlon = np.ma.masked_array(np.repeat(lon.fill_value,nH),mask=np.ones(nH,dtype=bool),dtype=lon.dtype)
else : outlon = np.ma.masked_array(np.repeat(np.ma.default_fill_value(1.0),nH),mask=np.ones(nH,dtype=bool),dtype=np.float32)
if isinstance(sla,np.ma.masked_array) : outlat = np.ma.masked_array(np.repeat(lat.fill_value,nH),mask=np.ones(nH,dtype=bool),dtype=lat.dtype)
else : outlat = np.ma.masked_array(np.repeat(np.ma.default_fill_value(1.0),nH),mask=np.ones(nH,dtype=bool),dtype=np.float32)
outdst = bin_edges [:-1]+ mn_dx/2 #distances is taken at bins centers
outsla[ok] = sla
outlon[ok] = lon
outlat[ok] = lat
#Remove land mass point if asked
if not interp_over_continents :
sempty=np.sort(np.array(list(set(empty).difference(set(continent)))))
else : sempty=empty.copy()
#Fill the gaps if there are some
if len(empty) > 0 :
#Interpolate lon,lat @ empty positions
outlon[empty] = interp1d(ok, outlon[ok], empty, kind='cubic', fill_value=lon.fill_value)
outlat[empty] = interp1d(ok, outlat[ok], empty, kind='cubic', fill_value=lat.fill_value)
if len(sempty) > 0 :outsla[sempty] = interp1d(ok, outsla[ok], empty, kind=0, fill_value=sla.fill_value) #0-th order Spline interpolation
outlon.mask[outlon.data == outlon.fill_value] = outlon.fill_value
outlat.mask[outlat.data == outlat.fill_value] = outlat.fill_value
outsla.mask[outsla.data == outsla.fill_value] = outsla.fill_value
#Get gap properties
ind=np.arange(len(hist))
dhist=(hist[1:] - hist[:-1])
#Remove edge gaps
if (dhist!=0).sum()> 0 :
if (dhist[dhist!=0])[0] == 1 : dhist[(np.arange(nH)[dhist!=0])[0]]=0 #This do not count start and end of track as gaps
if (dhist!=0).sum()> 0 :
if (dhist[dhist!=0])[-1] == -1 : dhist[(np.arange(nH)[dhist!=0])[-1]]=0
st=ind[dhist==-1]+1
en=ind[dhist==1]
gaplen=(en-st) + 1
ngaps=len(st)
gapedges=np.array([st,en])
#Get empty bin flag
interpolated=~hist.astype('bool')
return outdst, outlon, outlat, outsla, gaplen, ngaps, gapedges, interpolated | 0.035405 |
def session_demo_alert_callback(n_clicks, session_state=None, **kwargs):
'Output text based on both app state and session state'
if session_state is None:
raise NotImplementedError("Cannot handle a missing session state")
csf = session_state.get('bootstrap_demo_state', None)
if not csf:
csf = dict(clicks=0)
session_state['bootstrap_demo_state'] = csf
else:
csf['clicks'] = n_clicks
return "Button has been clicked %s times since the page was rendered" %n_clicks | 0.00578 |
def get_current_state_m(self):
"""Returns the state model of the currently open tab"""
page_id = self.view.notebook.get_current_page()
if page_id == -1:
return None
page = self.view.notebook.get_nth_page(page_id)
state_identifier = self.get_state_identifier_for_page(page)
return self.tabs[state_identifier]['state_m'] | 0.005291 |
def boolean(meshes, operation='difference'):
"""
Run an operation on a set of meshes
"""
script = operation + '(){'
for i in range(len(meshes)):
script += 'import(\"$mesh_' + str(i) + '\");'
script += '}'
return interface_scad(meshes, script) | 0.003597 |
def write(self, data: bytes) -> int:
"""Returns the number of (encrypted) bytes sent.
"""
if self._sock is None:
raise IOError('Internal socket set to None; cannot perform handshake.')
if not self._is_handshake_completed:
raise IOError('SSL Handshake was not completed; cannot send data.')
# Pass the cleartext data to the SSL engine
self._ssl.write(data)
# Recover the corresponding encrypted data
final_length = self._flush_ssl_engine()
return final_length | 0.005386 |
def winning_abbr(self):
"""
Returns a ``string`` of the winning team's abbreviation, such as 'HOU'
for the Houston Astros.
"""
if self.winner == HOME:
return utils._parse_abbreviation(self._home_name)
return utils._parse_abbreviation(self._away_name) | 0.006452 |
def multiifo_noise_coinc_rate(rates, slop):
"""
Calculate the expected rate of noise coincidences for multiple detectors
Parameters
----------
rates: dict
Dictionary keyed on ifo string
Value is a sequence of single-detector trigger rates, units assumed
to be Hz
slop: float
time added to maximum time-of-flight between detectors to account
for timing error
Returns
-------
expected_coinc_rates: dict
Dictionary keyed on the ifo combination string
Value is expected coincidence rate in the combination, units Hz
"""
ifos = numpy.array(sorted(rates.keys()))
rates_raw = list(rates[ifo] for ifo in ifos)
expected_coinc_rates = {}
# Calculate coincidence for all-ifo combination
# multiply product of trigger rates by the overlap time
allowed_area = multiifo_noise_coincident_area(ifos, slop)
rateprod = [numpy.prod(rs) for rs in zip(*rates_raw)]
ifostring = ' '.join(ifos)
expected_coinc_rates[ifostring] = allowed_area * numpy.array(rateprod)
# if more than one possible coincidence type exists,
# calculate coincidence for subsets through recursion
if len(ifos) > 2:
# Calculate rate for each 'miss-one-out' detector combination
subsets = itertools.combinations(ifos, len(ifos) - 1)
for subset in subsets:
rates_subset = {}
for ifo in subset:
rates_subset[ifo] = rates[ifo]
sub_coinc_rates = multiifo_noise_coinc_rate(rates_subset, slop)
# add these sub-coincidences to the overall dictionary
for sub_coinc in sub_coinc_rates:
expected_coinc_rates[sub_coinc] = sub_coinc_rates[sub_coinc]
return expected_coinc_rates | 0.000561 |
def autoload_static(model, resources, script_path):
''' Return JavaScript code and a script tag that can be used to embed
Bokeh Plots.
The data for the plot is stored directly in the returned JavaScript code.
Args:
model (Model or Document) :
resources (Resources) :
script_path (str) :
Returns:
(js, tag) :
JavaScript code to be saved at ``script_path`` and a ``<script>``
tag to load it
Raises:
ValueError
'''
# TODO: maybe warn that it's not exactly useful, but technically possible
# if resources.mode == 'inline':
# raise ValueError("autoload_static() requires non-inline resources")
if isinstance(model, Model):
models = [model]
elif isinstance (model, Document):
models = model.roots
else:
raise ValueError("autoload_static expects a single Model or Document")
with OutputDocumentFor(models):
(docs_json, [render_item]) = standalone_docs_json_and_render_items([model])
bundle = bundle_all_models() or ""
script = script_for_render_items(docs_json, [render_item])
(modelid, elementid) = list(render_item.roots.to_json().items())[0]
js = wrap_in_onload(AUTOLOAD_JS.render(
js_urls = resources.js_files,
css_urls = resources.css_files,
js_raw = resources.js_raw + [bundle, script],
css_raw = resources.css_raw_str,
elementid = elementid,
))
tag = AUTOLOAD_TAG.render(
src_path = script_path,
elementid = elementid,
)
return encode_utf8(js), encode_utf8(tag) | 0.0105 |
def exists(name=None, region=None, key=None, keyid=None, profile=None,
vpc_id=None, vpc_name=None, group_id=None):
'''
Check to see if a security group exists.
CLI example::
salt myminion boto_secgroup.exists mysecgroup
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
group = _get_group(conn, name=name, vpc_id=vpc_id, vpc_name=vpc_name,
group_id=group_id, region=region, key=key, keyid=keyid,
profile=profile)
if group:
return True
else:
return False | 0.001684 |
def _get_national_number_groups(numobj, formatting_pattern):
"""Helper method to get the national-number part of a number, formatted without any national
prefix, and return it as a set of digit blocks that should be formatted together according to
the formatting pattern passed in."""
# If a format is provided, we format the NSN only, and split that according to the separator.
nsn = national_significant_number(numobj)
return _format_nsn_using_pattern(nsn, formatting_pattern,
PhoneNumberFormat.RFC3966).split(U_DASH) | 0.006897 |
def length(self):
"""Gets the length of the Vector"""
return math.sqrt((self.x * self.x) + (self.y * self.y) + (self.z * self.z) + (self.w * self.w)) | 0.018182 |
def mesh2fc(script, all_visible_layers=False):
"""Transfer mesh colors to face colors
Args:
script: the FilterScript object or script filename to write
the filter to.
all_visible_layers (bool): If true the color mapping is applied to all the meshes
"""
filter_xml = ''.join([
' <filter name="Transfer Color: Mesh to Face">\n',
' <Param name="allVisibleMesh" ',
'value="%s" ' % str(all_visible_layers).lower(),
'description="Apply to all Meshes" ',
'type="RichBool" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None | 0.003049 |
def _changes(plays):
'''
Find changes in ansible return data
'''
changes = {}
for play in plays['plays']:
task_changes = {}
for task in play['tasks']:
host_changes = {}
for host, data in six.iteritems(task['hosts']):
if data['changed'] is True:
host_changes[host] = data.get('diff', data.get('changes', {}))
if host_changes:
task_changes[task['task']['name']] = host_changes
if task_changes:
changes[play['play']['name']] = task_changes
return changes | 0.003339 |
def load_imdb_dataset(
path='data', nb_words=None, skip_top=0, maxlen=None, test_split=0.2, seed=113, start_char=1, oov_char=2,
index_from=3
):
"""Load IMDB dataset.
Parameters
----------
path : str
The path that the data is downloaded to, defaults is ``data/imdb/``.
nb_words : int
Number of words to get.
skip_top : int
Top most frequent words to ignore (they will appear as oov_char value in the sequence data).
maxlen : int
Maximum sequence length. Any longer sequence will be truncated.
seed : int
Seed for reproducible data shuffling.
start_char : int
The start of a sequence will be marked with this character. Set to 1 because 0 is usually the padding character.
oov_char : int
Words that were cut out because of the num_words or skip_top limit will be replaced with this character.
index_from : int
Index actual words with this index and higher.
Examples
--------
>>> X_train, y_train, X_test, y_test = tl.files.load_imdb_dataset(
... nb_words=20000, test_split=0.2)
>>> print('X_train.shape', X_train.shape)
(20000,) [[1, 62, 74, ... 1033, 507, 27],[1, 60, 33, ... 13, 1053, 7]..]
>>> print('y_train.shape', y_train.shape)
(20000,) [1 0 0 ..., 1 0 1]
References
-----------
- `Modified from keras. <https://github.com/fchollet/keras/blob/master/keras/datasets/imdb.py>`__
"""
path = os.path.join(path, 'imdb')
filename = "imdb.pkl"
url = 'https://s3.amazonaws.com/text-datasets/'
maybe_download_and_extract(filename, path, url)
if filename.endswith(".gz"):
f = gzip.open(os.path.join(path, filename), 'rb')
else:
f = open(os.path.join(path, filename), 'rb')
X, labels = cPickle.load(f)
f.close()
np.random.seed(seed)
np.random.shuffle(X)
np.random.seed(seed)
np.random.shuffle(labels)
if start_char is not None:
X = [[start_char] + [w + index_from for w in x] for x in X]
elif index_from:
X = [[w + index_from for w in x] for x in X]
if maxlen:
new_X = []
new_labels = []
for x, y in zip(X, labels):
if len(x) < maxlen:
new_X.append(x)
new_labels.append(y)
X = new_X
labels = new_labels
if not X:
raise Exception(
'After filtering for sequences shorter than maxlen=' + str(maxlen) + ', no sequence was kept. '
'Increase maxlen.'
)
if not nb_words:
nb_words = max([max(x) for x in X])
# by convention, use 2 as OOV word
# reserve 'index_from' (=3 by default) characters: 0 (padding), 1 (start), 2 (OOV)
if oov_char is not None:
X = [[oov_char if (w >= nb_words or w < skip_top) else w for w in x] for x in X]
else:
nX = []
for x in X:
nx = []
for w in x:
if (w >= nb_words or w < skip_top):
nx.append(w)
nX.append(nx)
X = nX
X_train = np.array(X[:int(len(X) * (1 - test_split))])
y_train = np.array(labels[:int(len(X) * (1 - test_split))])
X_test = np.array(X[int(len(X) * (1 - test_split)):])
y_test = np.array(labels[int(len(X) * (1 - test_split)):])
return X_train, y_train, X_test, y_test | 0.002653 |
def u64(self, name, value=None, align=None):
"""Add an unsigned 8 byte integer field to template.
This is an convenience method that simply calls `Uint` keyword with predefined length."""
self.uint(8, name, value, align) | 0.012245 |
def _highlight_block(self, block):
"""
Highlights the current fold scope.
:param block: Block that starts the current fold scope.
"""
scope = FoldScope(block)
if (self._current_scope is None or
self._current_scope.get_range() != scope.get_range()):
self._current_scope = scope
self._clear_scope_decos()
# highlight current scope with darker or lighter color
start, end = scope.get_range()
if not TextBlockHelper.is_collapsed(block):
self._decorate_block(start, end) | 0.0033 |
async def connect(self):
""" Connect to target. """
self.tls_context = None
if self.tls:
self.tls_context = self.create_tls_context()
(self.reader, self.writer) = await asyncio.open_connection(
host=self.hostname,
port=self.port,
local_addr=self.source_address,
ssl=self.tls_context,
loop=self.eventloop
) | 0.004773 |
def remove_tweet(self, id):
"""
Delete a tweet.
:param id: ID of the tweet in question
:return: True if success, False otherwise
"""
try:
self._client.destroy_status(id=id)
return True
except TweepError as e:
if e.api_code in [TWITTER_PAGE_DOES_NOT_EXISTS_ERROR, TWITTER_DELETE_OTHER_USER_TWEET]:
return False
raise | 0.006881 |
def divide_1Darray_equally(ind, nsub):
"""Divide an array into equal chunks to be used for instance in OSEM.
Parameters
----------
ind : ndarray
input array
nsubsets : int
number of subsets to be divided into
Returns
-------
sub2ind : list
list of indices for each subset
ind2sub : list
list of subsets for each index
"""
n_ind = len(ind)
sub2ind = partition_equally_1d(ind, nsub, order='interlaced')
ind2sub = []
for i in range(n_ind):
ind2sub.append([])
for i in range(nsub):
for j in sub2ind[i]:
ind2sub[j].append(i)
return (sub2ind, ind2sub) | 0.001481 |
def _safe_output(line):
'''
Looks for rabbitmqctl warning, or general formatting, strings that aren't
intended to be parsed as output.
Returns a boolean whether the line can be parsed as rabbitmqctl output.
'''
return not any([
line.startswith('Listing') and line.endswith('...'),
line.startswith('Listing') and '\t' not in line,
'...done' in line,
line.startswith('WARNING:')
]) | 0.002278 |
def export_users(path_prefix='/', region=None, key=None, keyid=None,
profile=None):
'''
Get all IAM user details. Produces results that can be used to create an
sls file.
.. versionadded:: 2016.3.0
CLI Example:
salt-call boto_iam.export_users --out=txt | sed "s/local: //" > iam_users.sls
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return None
results = odict.OrderedDict()
users = get_all_users(path_prefix, region, key, keyid, profile)
for user in users:
name = user.user_name
_policies = conn.get_all_user_policies(name, max_items=100)
_policies = _policies.list_user_policies_response.list_user_policies_result.policy_names
policies = {}
for policy_name in _policies:
_policy = conn.get_user_policy(name, policy_name)
_policy = salt.utils.json.loads(_unquote(
_policy.get_user_policy_response.get_user_policy_result.policy_document
))
policies[policy_name] = _policy
user_sls = []
user_sls.append({"name": name})
user_sls.append({"policies": policies})
user_sls.append({"path": user.path})
results["manage user " + name] = {"boto_iam.user_present": user_sls}
return __utils__['yaml.safe_dump'](
results,
default_flow_style=False,
indent=2) | 0.00277 |
def get_values(self, dtype=None):
"""
return an internal format, currently just the ndarray
this is often overridden to handle to_dense like operations
"""
if is_object_dtype(dtype):
return self.values.astype(object)
return self.values | 0.00678 |
def _get_websocket(self, reuse=True):
"""
Reuse existing connection or create a new connection.
"""
# Check if still connected
if self.ws and reuse:
if self.ws.connected:
return self.ws
logging.debug("Stale connection, reconnecting.")
self.ws = self._create_connection()
return self.ws | 0.005222 |
def find(cls, text):
"""This method should return an iterable containing matches of this element."""
if isinstance(cls.pattern, string_types):
cls.pattern = re.compile(cls.pattern)
return cls.pattern.finditer(text) | 0.012 |
def slice_pdf(input_filename: str, output_filename: str,
slice_horiz: int, slice_vert: int) -> str:
"""
Slice each page of the original, to convert to "one real page per PDF
page". Return the output filename.
"""
if slice_horiz == 1 and slice_vert == 1:
log.debug("No slicing required")
return input_filename # nothing to do
log.info("Slicing each source page mv into {} horizontally x {} vertically",
slice_horiz, slice_vert)
log.debug("... from {!r} to {!r}", input_filename, output_filename)
require(MUTOOL, HELP_MISSING_MUTOOL)
run([
MUTOOL,
"poster",
"-x", str(slice_horiz),
"-y", str(slice_vert),
input_filename,
output_filename
])
return output_filename | 0.002513 |
def findStylesForEach(self, element, attrNames, default=NotImplemented):
"""Attempts to find the style setting for attrName in the CSSRulesets.
Note: This method does not attempt to resolve rules that return
"inherited", "default", or values that have units (including "%").
This is left up to the client app to re-query the CSS in order to
implement these semantics.
"""
rules = self.findCSSRulesForEach(element, attrNames)
return [(attrName, self._extractStyleForRule(rule, attrName, default))
for attrName, rule in six.iteritems(rules)] | 0.003226 |
def triads(key):
"""Return all the triads in key.
Implemented using a cache.
"""
if _triads_cache.has_key(key):
return _triads_cache[key]
res = map(lambda x: triad(x, key), keys.get_notes(key))
_triads_cache[key] = res
return res | 0.007519 |
def trim_snapshots(self, hourly_backups = 8, daily_backups = 7,
weekly_backups = 4):
"""
Trim excess snapshots, based on when they were taken. More current
snapshots are retained, with the number retained decreasing as you
move back in time.
If ebs volumes have a 'Name' tag with a value, their snapshots
will be assigned the same tag when they are created. The values
of the 'Name' tags for snapshots are used by this function to
group snapshots taken from the same volume (or from a series
of like-named volumes over time) for trimming.
For every group of like-named snapshots, this function retains
the newest and oldest snapshots, as well as, by default, the
first snapshots taken in each of the last eight hours, the first
snapshots taken in each of the last seven days, the first snapshots
taken in the last 4 weeks (counting Midnight Sunday morning as
the start of the week), and the first snapshot from the first
Sunday of each month forever.
:type hourly_backups: int
:param hourly_backups: How many recent hourly backups should be saved.
:type daily_backups: int
:param daily_backups: How many recent daily backups should be saved.
:type weekly_backups: int
:param weekly_backups: How many recent weekly backups should be saved.
"""
# This function first builds up an ordered list of target times
# that snapshots should be saved for (last 8 hours, last 7 days, etc.).
# Then a map of snapshots is constructed, with the keys being
# the snapshot / volume names and the values being arrays of
# chronologically sorted snapshots.
# Finally, for each array in the map, we go through the snapshot
# array and the target time array in an interleaved fashion,
# deleting snapshots whose start_times don't immediately follow a
# target time (we delete a snapshot if there's another snapshot
# that was made closer to the preceding target time).
now = datetime.utcnow()
last_hour = datetime(now.year, now.month, now.day, now.hour)
last_midnight = datetime(now.year, now.month, now.day)
last_sunday = datetime(now.year, now.month, now.day) - timedelta(days = (now.weekday() + 1) % 7)
start_of_month = datetime(now.year, now.month, 1)
target_backup_times = []
# there are no snapshots older than 1/1/2007
oldest_snapshot_date = datetime(2007, 1, 1)
for hour in range(0, hourly_backups):
target_backup_times.append(last_hour - timedelta(hours = hour))
for day in range(0, daily_backups):
target_backup_times.append(last_midnight - timedelta(days = day))
for week in range(0, weekly_backups):
target_backup_times.append(last_sunday - timedelta(weeks = week))
one_day = timedelta(days = 1)
while start_of_month > oldest_snapshot_date:
# append the start of the month to the list of
# snapshot dates to save:
target_backup_times.append(start_of_month)
# there's no timedelta setting for one month, so instead:
# decrement the day by one, so we go to the final day of
# the previous month...
start_of_month -= one_day
# ... and then go to the first day of that previous month:
start_of_month = datetime(start_of_month.year,
start_of_month.month, 1)
temp = []
for t in target_backup_times:
if temp.__contains__(t) == False:
temp.append(t)
target_backup_times = temp
# make the oldeest dates first, and make sure the month start
# and last four week's start are in the proper order
target_backup_times.sort()
# get all the snapshots, sort them by date and time, and
# organize them into one array for each volume:
all_snapshots = self.get_all_snapshots(owner = 'self')
all_snapshots.sort(cmp = lambda x, y: cmp(x.start_time, y.start_time))
snaps_for_each_volume = {}
for snap in all_snapshots:
# the snapshot name and the volume name are the same.
# The snapshot name is set from the volume
# name at the time the snapshot is taken
volume_name = snap.tags.get('Name')
if volume_name:
# only examine snapshots that have a volume name
snaps_for_volume = snaps_for_each_volume.get(volume_name)
if not snaps_for_volume:
snaps_for_volume = []
snaps_for_each_volume[volume_name] = snaps_for_volume
snaps_for_volume.append(snap)
# Do a running comparison of snapshot dates to desired time
#periods, keeping the oldest snapshot in each
# time period and deleting the rest:
for volume_name in snaps_for_each_volume:
snaps = snaps_for_each_volume[volume_name]
snaps = snaps[:-1] # never delete the newest snapshot
time_period_number = 0
snap_found_for_this_time_period = False
for snap in snaps:
check_this_snap = True
while check_this_snap and time_period_number < target_backup_times.__len__():
snap_date = datetime.strptime(snap.start_time,
'%Y-%m-%dT%H:%M:%S.000Z')
if snap_date < target_backup_times[time_period_number]:
# the snap date is before the cutoff date.
# Figure out if it's the first snap in this
# date range and act accordingly (since both
#date the date ranges and the snapshots
# are sorted chronologically, we know this
#snapshot isn't in an earlier date range):
if snap_found_for_this_time_period == True:
if not snap.tags.get('preserve_snapshot'):
# as long as the snapshot wasn't marked
# with the 'preserve_snapshot' tag, delete it:
try:
self.delete_snapshot(snap.id)
boto.log.info('Trimmed snapshot %s (%s)' % (snap.tags['Name'], snap.start_time))
except EC2ResponseError:
boto.log.error('Attempt to trim snapshot %s (%s) failed. Possible result of a race condition with trimming on another server?' % (snap.tags['Name'], snap.start_time))
# go on and look at the next snapshot,
#leaving the time period alone
else:
# this was the first snapshot found for this
#time period. Leave it alone and look at the
# next snapshot:
snap_found_for_this_time_period = True
check_this_snap = False
else:
# the snap is after the cutoff date. Check it
# against the next cutoff date
time_period_number += 1
snap_found_for_this_time_period = False | 0.004606 |
def _arg_parser():
"""Factory for creating the argument parser"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('xml', nargs='*')
return parser | 0.005348 |
def get_time_offset():
"""Get time offset from steam server time via WebAPI
:return: time offset (``None`` when Steam WebAPI fails to respond)
:rtype: :class:`int`, :class:`None`
"""
try:
resp = webapi.post('ITwoFactorService', 'QueryTime', 1, params={'http_timeout': 10})
except:
return None
ts = int(time())
return int(resp.get('response', {}).get('server_time', ts)) - ts | 0.007075 |
def shear_vel_at_depth(self, y_c):
"""
Get the shear wave velocity at a depth.
:param y_c: float, depth from surface
:return:
"""
sl = self.get_soil_at_depth(y_c)
if y_c <= self.gwl:
saturation = False
else:
saturation = True
if hasattr(sl, "get_shear_vel_at_v_eff_stress"):
v_eff = self.get_v_eff_stress_at_depth(y_c)
vs = sl.get_shear_vel_at_v_eff_stress(v_eff, saturation)
else:
vs = sl.get_shear_vel(saturation)
return vs | 0.003484 |
def get_version(brain_or_object):
"""Get the version of the current object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:returns: The current version of the object, or None if not available
:rtype: int or None
"""
obj = get_object(brain_or_object)
if not is_versionable(obj):
return None
return getattr(aq_base(obj), "version_id", 0) | 0.002132 |
def correlation_plot(self, x_analyte, y_analyte, window=15, filt=True, recalc=False):
"""
Plot the local correlation between two analytes.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
recalc : bool
If True, the correlation is re-calculated, even if it is already present.
Returns
-------
fig, axs : figure and axes objects
"""
label = '{:}_{:}_{:.0f}'.format(x_analyte, y_analyte, window)
self.calc_correlation(x_analyte, y_analyte, window, filt, recalc)
r, p = self.correlations[label]
fig, axs = plt.subplots(3, 1, figsize=[7, 5], sharex=True)
# plot analytes
ax = axs[0]
ax.plot(self.Time, nominal_values(self.focus[x_analyte]), color=self.cmap[x_analyte], label=x_analyte)
ax.plot(self.Time, nominal_values(self.focus[y_analyte]), color=self.cmap[y_analyte], label=y_analyte)
ax.set_yscale('log')
ax.legend()
ax.set_ylabel('Signals')
# plot r
ax = axs[1]
ax.plot(self.Time, r)
ax.set_ylabel('Pearson R')
# plot p
ax = axs[2]
ax.plot(self.Time, p)
ax.set_ylabel('pignificance Level (p)')
fig.tight_layout()
return fig, axs | 0.007898 |
def parse_frog(lines):
"""
Interpret the output of the frog parser.
Input should be an iterable of lines (i.e. the output of call_frog)
Result is a sequence of dicts representing the tokens
"""
sid = 0
for i, line in enumerate(lines):
if not line:
# end of sentence marker
sid += 1
else:
parts = line.split("\t")
tid, token, lemma, morph, pos, conf, ne, _, parent, rel = parts
if rel:
rel = (rel, int(parent) - 1)
word = u' '.join(token.split(u'_'))
result = dict(id=i, sentence=sid, word=word, lemma=lemma, pos=pos,
pos_confidence=float(conf), rel=rel)
if ne != 'O':
# NER label from BIO tags
result["ne"] = ne.split('_', 1)[0][2:]
yield result | 0.001148 |
def cmd_iter(
self,
tgt,
fun,
arg=(),
timeout=None,
tgt_type='glob',
ret='',
kwarg=None,
**kwargs):
'''
Yields the individual minion returns as they come in
The function signature is the same as :py:meth:`cmd` with the
following exceptions.
Normally :py:meth:`cmd_iter` does not yield results for minions that
are not connected. If you want it to return results for disconnected
minions set `expect_minions=True` in `kwargs`.
:return: A generator yielding the individual minion returns
.. code-block:: python
>>> ret = local.cmd_iter('*', 'test.ping')
>>> for i in ret:
... print(i)
{'jerry': {'ret': True}}
{'dave': {'ret': True}}
{'stewart': {'ret': True}}
'''
was_listening = self.event.cpub
try:
pub_data = self.run_job(
tgt,
fun,
arg,
tgt_type,
ret,
timeout,
kwarg=kwarg,
listen=True,
**kwargs)
if not pub_data:
yield pub_data
else:
if kwargs.get('yield_pub_data'):
yield pub_data
for fn_ret in self.get_iter_returns(pub_data['jid'],
pub_data['minions'],
timeout=self._get_timeout(timeout),
tgt=tgt,
tgt_type=tgt_type,
**kwargs):
if not fn_ret:
continue
yield fn_ret
self._clean_up_subscriptions(pub_data['jid'])
finally:
if not was_listening:
self.event.close_pub() | 0.001441 |
def resolve_sid(sid):
"""Get the PID to which the ``sid`` currently maps.
Preconditions:
- ``sid`` is verified to exist. E.g., with d1_gmn.app.views.asserts.is_sid().
"""
return d1_gmn.app.models.Chain.objects.get(sid__did=sid).head_pid.did | 0.007634 |
def unregister(self, slug):
"""
Unregisters the given url.
If a slug isn't already registered, this will raise NotRegistered.
"""
if slug not in self._registry:
raise NotRegistered('The slug %s is not registered' % slug)
bundle = self._registry[slug]
if bundle._meta.model and bundle._meta.primary_model_bundle:
self.unregister_model(bundle._meta.model)
del self._registry[slug]
del self._order[slug] | 0.004008 |
def remove_child(self, child):
"""Remove the child from this TreeItem
:param child: the child TreeItem
:type child: :class:`TreeItem`
:returns: None
:rtype: None
:raises: ValueError
"""
child.set_model(None)
if self._model:
row = self.childItems.index(child)
parentindex = self._model.index_of_item(self)
self._model.removeRow(row, parentindex)
else:
self.childItems.remove(child) | 0.003929 |
def cmdloop(self):
"""Start the main loop of the interactive shell.
The preloop() and postloop() methods are always run before and after the
main loop, respectively.
Returns:
'root': Inform the parent shell to to keep exiting until the root
shell is reached.
'all': Exit all the way back the the command line shell.
False, None, or anything that are evaluated as False: Exit this
shell, enter the parent shell.
An integer: The depth of the shell to exit to. 0 = root shell.
History:
_ShellBase histories are persistently saved to files, whose name matches
the prompt string. For example, if the prompt of a subshell is
'(Foo-Bar-Kar)$ ', the name of its history file is s-Foo-Bar-Kar.
The history_fname property encodes this algorithm.
All history files are saved to the the directory whose path is
self._temp_dir. Subshells use the same temp_dir as their parent
shells, thus their root shell.
The history of the parent shell is saved and restored by the parent
shell, as in launch_subshell(). The history of the subshell is saved
and restored by the subshell, as in cmdloop().
When a subshell is started, i.e., when the cmdloop() method of the
subshell is called, the subshell will try to load its own history
file, whose file name is determined by the naming convention
introduced earlier.
Completer Delimiters:
Certain characters such as '-' could be part of a command. But by
default they are considered the delimiters by the readline library,
which causes completion candidates with those characters to
malfunction.
The old completer delimiters are saved before the loop and restored
after the loop ends. This is to keep the environment clean.
"""
self.print_debug("Enter subshell '{}'".format(self.prompt))
# Save the completer function, the history buffer, and the
# completer_delims.
old_completer = readline.get_completer()
old_delims = readline.get_completer_delims()
new_delims = ''.join(list(set(old_delims) - set(_ShellBase._non_delims)))
readline.set_completer_delims(new_delims)
# Load the new completer function and start a new history buffer.
readline.set_completer(self.__driver_stub)
readline.clear_history()
if os.path.isfile(self.history_fname):
readline.read_history_file(self.history_fname)
# main loop
try:
# The exit_directive:
# True Leave this shell, enter the parent shell.
# False Continue with the loop.
# 'root' Exit to the root shell.
# 'all' Exit to the command line.
# an integer The depth of the shell to exit to. 0 = root
# shell. Negative number is taken as error.
self.preloop()
while True:
exit_directive = False
try:
if self.batch_mode:
line = self._pipe_end.recv()
else:
line = input(self.prompt).strip()
except EOFError:
line = _ShellBase.EOF
try:
exit_directive = self.__exec_line__(line)
except:
self.stderr.write(traceback.format_exc())
if type(exit_directive) is int:
if len(self._mode_stack) > exit_directive:
break
if len(self._mode_stack) == exit_directive:
continue
if self._mode_stack and exit_directive == 'root':
break
if exit_directive in { 'all', True, }:
break
finally:
self.postloop()
# Restore the completer function, save the history, and restore old
# delims.
readline.set_completer(old_completer)
readline.write_history_file(self.history_fname)
readline.set_completer_delims(old_delims)
self.print_debug("Leave subshell '{}': {}".format(self.prompt, exit_directive))
return exit_directive | 0.001983 |
def set_gmxrc_environment(gmxrc):
"""Set the environment from ``GMXRC`` provided in *gmxrc*.
Runs ``GMXRC`` in a subprocess and puts environment variables loaded by it
into this Python environment.
If *gmxrc* evaluates to ``False`` then nothing is done. If errors occur
then only a warning will be logged. Thus, it should be safe to just call
this function.
"""
# only v5: 'GMXPREFIX', 'GROMACS_DIR'
envvars = ['GMXBIN', 'GMXLDLIB', 'GMXMAN', 'GMXDATA',
'LD_LIBRARY_PATH', 'MANPATH', 'PKG_CONFIG_PATH',
'PATH',
'GMXPREFIX', 'GROMACS_DIR']
# in order to keep empty values, add ___ sentinels around result
# (will be removed later)
cmdargs = ['bash', '-c', ". {0} && echo {1}".format(gmxrc,
' '.join(['___${{{0}}}___'.format(v) for v in envvars]))]
if not gmxrc:
logger.debug("set_gmxrc_environment(): no GMXRC, nothing done.")
return
try:
out = subprocess.check_output(cmdargs)
out = out.strip().split()
for key, value in zip(envvars, out):
value = str(value.decode('ascii').replace('___', '')) # remove sentinels
os.environ[key] = value
logger.debug("set_gmxrc_environment(): %s = %r", key, value)
except (subprocess.CalledProcessError, OSError):
logger.warning("Failed to automatically set the Gromacs environment"
"from GMXRC=%r", gmxrc) | 0.00136 |
def write_tsv(self, file_path: str, encoding: str = 'UTF-8',
sep: str = '\t'):
"""Write expression matrix to a tab-delimited text file.
Parameters
----------
file_path: str
The path of the output file.
encoding: str, optional
The file encoding. ("UTF-8")
Returns
-------
None
"""
#if six.PY2:
# sep = sep.encode('UTF-8')
self.to_csv(
file_path, sep=sep, float_format='%.5f', mode='w',
encoding=encoding, quoting=csv.QUOTE_NONE
)
_LOGGER.info('Wrote %d x %d expression matrix to "%s".',
self.p, self.n, file_path) | 0.006974 |
def ignore_stops_before_now(self):
"""Ignore any stops received before this point"""
self._sentinel_stop = object()
self._q.put(self._sentinel_stop) | 0.011628 |
def pollNextEvent(self, pEvent):
"""
Returns true and fills the event with the next event on the queue if there is one. If there are no events
this method returns false. uncbVREvent should be the size in bytes of the VREvent_t struct
"""
fn = self.function_table.pollNextEvent
result = fn(byref(pEvent), sizeof(VREvent_t))
return result != 0 | 0.01005 |
def natsort(string):
'''按照语言里的意义对字符串进行排序.
这个方法用于替换按照字符编码顺序对字符串进行排序.
相关链接:
http://stackoverflow.com/questions/2545532/python-analog-of-natsort-function-sort-a-list-using-a-natural-order-algorithm
http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html
'''
return [int(s) if s.isdigit() else s for s in re.split('(\d+)', string)] | 0.005208 |
def pasteprepare(args):
"""
%prog pasteprepare bacs.fasta
Prepare sequences for paste.
"""
p = OptionParser(pasteprepare.__doc__)
p.add_option("--flank", default=5000, type="int",
help="Get the seq of size on two ends [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
goodfasta, = args
flank = opts.flank
pf = goodfasta.rsplit(".", 1)[0]
extbed = pf + ".ext.bed"
sizes = Sizes(goodfasta)
fw = open(extbed, "w")
for bac, size in sizes.iter_sizes():
print("\t".join(str(x) for x in \
(bac, 0, min(flank, size), bac + "L")), file=fw)
print("\t".join(str(x) for x in \
(bac, max(size - flank, 0), size, bac + "R")), file=fw)
fw.close()
fastaFromBed(extbed, goodfasta, name=True) | 0.006652 |
def project_users_with_administrator_permissions(self, key):
"""
Get project administrators for project
:param key: project key
:return: project administrators
"""
project_administrators = [user['user'] for user in self.project_users(key)
if user['permission'] == 'PROJECT_ADMIN']
for group in self.project_groups_with_administrator_permissions(key):
for user in self.group_members(group):
project_administrators.append(user)
return project_administrators | 0.005164 |
def _custom_init_(self, model_name, other_name=None,log_interp = True):
"""
Custom initialization for this model
:param model_name: the name of the model, corresponding to the root of the .h5 file in the data directory
:param other_name: (optional) the name to be used as name of the model when used in astromodels. If None
(default), use the same name as model_name
:return: none
"""
# Get the data directory
data_dir_path = get_user_data_path()
# Sanitize the data file
filename_sanitized = os.path.abspath(os.path.join(data_dir_path, '%s.h5' % model_name))
if not os.path.exists(filename_sanitized):
raise MissingDataFile("The data file %s does not exists. Did you use the "
"TemplateFactory?" % (filename_sanitized))
# Open the template definition and read from it
self._data_file = filename_sanitized
with HDFStore(filename_sanitized) as store:
self._data_frame = store['data_frame']
self._parameters_grids = collections.OrderedDict()
processed_parameters = 0
for key in store.keys():
match = re.search('p_([0-9]+)_(.+)', key)
if match is None:
continue
else:
tokens = match.groups()
this_parameter_number = int(tokens[0])
this_parameter_name = str(tokens[1])
assert this_parameter_number == processed_parameters, "Parameters out of order!"
self._parameters_grids[this_parameter_name] = store[key]
processed_parameters += 1
self._energies = store['energies']
# Now get the metadata
metadata = store.get_storer('data_frame').attrs.metadata
description = metadata['description']
name = metadata['name']
self._interpolation_degree = metadata['interpolation_degree']
self._spline_smoothing_factor = metadata['spline_smoothing_factor']
# Make the dictionary of parameters
function_definition = collections.OrderedDict()
function_definition['description'] = description
function_definition['latex'] = 'n.a.'
# Now build the parameters according to the content of the parameter grid
parameters = collections.OrderedDict()
parameters['K'] = Parameter('K', 1.0)
parameters['scale'] = Parameter('scale', 1.0)
for parameter_name in self._parameters_grids.keys():
grid = self._parameters_grids[parameter_name]
parameters[parameter_name] = Parameter(parameter_name, grid.median(),
min_value=grid.min(),
max_value=grid.max())
if other_name is None:
super(TemplateModel, self).__init__(name, function_definition, parameters)
else:
super(TemplateModel, self).__init__(other_name, function_definition, parameters)
# Finally prepare the interpolators
self._prepare_interpolators(log_interp) | 0.005196 |
def _add_parsley_ns(cls, namespace_dict):
"""
Extend XPath evaluation with Parsley extensions' namespace
"""
namespace_dict.update({
'parslepy' : cls.LOCAL_NAMESPACE,
'parsley' : cls.LOCAL_NAMESPACE,
})
return namespace_dict | 0.013468 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.