text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
async def get_updates(self, offset: typing.Union[base.Integer, None] = None,
limit: typing.Union[base.Integer, None] = None,
timeout: typing.Union[base.Integer, None] = None,
allowed_updates:
typing.Union[typing.List[base.String], None] = None) -> typing.List[types.Update]:
"""
Use this method to receive incoming updates using long polling (wiki).
Notes
1. This method will not work if an outgoing webhook is set up.
2. In order to avoid getting duplicate updates, recalculate offset after each server response.
Source: https://core.telegram.org/bots/api#getupdates
:param offset: Identifier of the first update to be returned
:type offset: :obj:`typing.Union[base.Integer, None]`
:param limit: Limits the number of updates to be retrieved
:type limit: :obj:`typing.Union[base.Integer, None]`
:param timeout: Timeout in seconds for long polling
:type timeout: :obj:`typing.Union[base.Integer, None]`
:param allowed_updates: List the types of updates you want your bot to receive
:type allowed_updates: :obj:`typing.Union[typing.List[base.String], None]`
:return: An Array of Update objects is returned
:rtype: :obj:`typing.List[types.Update]`
"""
allowed_updates = prepare_arg(allowed_updates)
payload = generate_payload(**locals())
result = await self.request(api.Methods.GET_UPDATES, payload)
return [types.Update(**update) for update in result] | 0.006154 |
def validate_params_match(method, parameters):
"""Validates that the given parameters are exactly the method's declared parameters.
:param method: The method to be called
:type method: function
:param parameters: The parameters to use in the call
:type parameters: dict[str, object] | list[object]
"""
argspec = inspect.getargspec(method) # pylint: disable=deprecated-method
default_length = len(argspec.defaults) if argspec.defaults is not None else 0
if isinstance(parameters, list):
if len(parameters) > len(argspec.args) and argspec.varargs is None:
raise InvalidParamsError("Too many parameters")
remaining_parameters = len(argspec.args) - len(parameters)
if remaining_parameters > default_length:
raise InvalidParamsError("Not enough parameters")
elif isinstance(parameters, dict):
missing_parameters = [key for key in argspec.args if key not in parameters]
default_parameters = set(argspec.args[len(argspec.args) - default_length:])
for key in missing_parameters:
if key not in default_parameters:
raise InvalidParamsError("Parameter {} has not been satisfied".format(key))
extra_params = [key for key in parameters if key not in argspec.args]
if len(extra_params) > 0 and argspec.keywords is None:
raise InvalidParamsError("Too many parameters") | 0.004205 |
def type(self, type):
# pylint: disable=redefined-builtin
"""Setter method; for a description see the getter method."""
type = _ensure_unicode(type)
# We perform this check after the initialization to avoid errors
# in test tools that show the object with repr().
if type not in ALL_CIMTYPES:
raise ValueError(
_format("Invalid CIM type: {0}", type))
# pylint: disable=attribute-defined-outside-init
self._type = type | 0.005859 |
def _displayattrs(attrib, expandattrs):
"""
Helper function to display the attributes of a Node object in lexicographic
order.
:param attrib: dictionary with the attributes
:param expandattrs: if True also displays the value of the attributes
"""
if not attrib:
return ''
if expandattrs:
alist = ['%s=%r' % item for item in sorted(attrib.items())]
else:
alist = list(attrib)
return '{%s}' % ', '.join(alist) | 0.002119 |
def _update_segmentation_mask(segmentation_mask, onset_fronts, offset_fronts, onset_front_id, offset_front_id_most_overlap):
"""
Returns an updated segmentation mask such that the input `segmentation_mask` has been updated by segmenting between
`onset_front_id` and `offset_front_id`, as found in `onset_fronts` and `offset_fronts`, respectively.
This function also returns the onset_fronts and offset_fronts matrices, updated so that any fronts that are of
less than 3 channels wide are removed.
This function also returns a boolean value indicating whether the onset channel went to completion.
Specifically, segments by doing the following:
- Going across frequencies in the onset_front,
- add the segment mask ID (the onset front ID) to all samples between the onset_front and the offset_front,
if the offset_front is in that frequency.
Possible scenarios:
Fronts line up completely:
::
| | S S S
| | => S S S
| | S S S
| | S S S
Onset front starts before offset front:
::
| |
| | S S S
| | => S S S
| | S S S
Onset front ends after offset front:
::
| | S S S
| | => S S S
| | S S S
| |
Onset front starts before and ends after offset front:
::
| |
| | => S S S
| | S S S
| |
The above three options in reverse:
::
| |S S| |
|S S| |S S| |S S|
|S S| |S S| |S S|
|S S| | |
There is one last scenario:
::
| |
\ /
\ /
/ \
| |
Where the offset and onset fronts cross one another. If this happens, we simply
reverse the indices and accept:
::
|sss|
\sss/
\s/
/s\
|sss|
The other option would be to destroy the offset front from the crossover point on, and
then search for a new offset front for the rest of the onset front.
"""
# Get the portions of the onset and offset fronts that overlap and are consecutive
onset_front_overlap, offset_front_overlap = _get_consecutive_and_overlapping_fronts(onset_fronts, offset_fronts, onset_front_id, offset_front_id_most_overlap)
onset_front = _get_front_idxs_from_id(onset_fronts, onset_front_id)
offset_front = _get_front_idxs_from_id(offset_fronts, offset_front_id_most_overlap)
msg = "Onset front {} and offset front {} result in consecutive overlapping portions of (on) {} and (off) {}, one of which is empty".format(
onset_front, offset_front, onset_front_overlap, offset_front_overlap
)
assert onset_front_overlap, msg
assert offset_front_overlap, msg
onset_front = onset_front_overlap
offset_front = offset_front_overlap
# Figure out which frequencies will go in the segment
flow_on, _slow_on = onset_front[0]
fhigh_on, _shigh_on = onset_front[-1]
flow_off, _slow_off = offset_front[0]
fhigh_off, _shigh_off = offset_front[-1]
flow = max(flow_on, flow_off)
fhigh = min(fhigh_on, fhigh_off)
# Update all the masks with the segment
for fidx, _freqchan in enumerate(segmentation_mask[flow:fhigh + 1, :], start=flow):
assert fidx >= flow, "Frequency index is {}, but we should have started at {}".format(fidx, flow)
assert (fidx - flow) < len(onset_front), "Frequency index {} minus starting frequency {} is too large for nfrequencies {} in onset front {}".format(
fidx, flow, len(onset_front), onset_front
)
assert (fidx - flow) < len(offset_front), "Frequency index {} minus starting frequency {} is too large for nfrequencies {} in offset front {}".format(
fidx, flow, len(offset_front), offset_front
)
_, beg = onset_front[fidx - flow]
_, end = offset_front[fidx - flow]
if beg > end:
end, beg = beg, end
assert end >= beg
segmentation_mask[fidx, beg:end + 1] = onset_front_id
onset_fronts[fidx, (beg + 1):(end + 1)] = 0
offset_fronts[fidx, (beg + 1):(end + 1)] = 0
nfreqs_used_in_onset_front = (fidx - flow) + 1
# Update the other masks to delete fronts that have been used
indexes = np.arange(flow, fhigh + 1, 1, dtype=np.int64)
onset_front_sample_idxs_across_freqs = np.array([s for _, s in onset_front])
onset_front_sample_idxs_across_freqs_up_to_break = onset_front_sample_idxs_across_freqs[:nfreqs_used_in_onset_front]
offset_front_sample_idxs_across_freqs = np.array([s for _, s in offset_front])
offset_front_sample_idxs_across_freqs_up_to_break = offset_front_sample_idxs_across_freqs[:nfreqs_used_in_onset_front]
## Remove the offset front from where we started to where we ended
offset_fronts[indexes[:nfreqs_used_in_onset_front], offset_front_sample_idxs_across_freqs_up_to_break] = 0
## Remove the onset front from where we started to where we ended
onset_fronts[indexes[:nfreqs_used_in_onset_front], onset_front_sample_idxs_across_freqs_up_to_break] = 0
# Determine if we matched the entire onset front by checking if there is any more of this onset front in onset_fronts
whole_onset_front_matched = onset_front_id not in np.unique(onset_fronts)
return whole_onset_front_matched | 0.005455 |
def register_range_type(pgrange, pyrange, conn):
"""
Register a new range type as a PostgreSQL range.
>>> register_range_type("int4range", intrange, conn)
The above will make sure intrange is regarded as an int4range for queries
and that int4ranges will be cast into intrange when fetching rows.
pgrange should be the full name including schema for the custom range type.
Note that adaption is global, meaning if a range type is passed to a regular
psycopg2 connection it will adapt it to its proper range type. Parsing of
rows from the database however is not global and just set on a per connection
basis.
"""
register_adapter(pyrange, partial(adapt_range, pgrange))
register_range_caster(
pgrange, pyrange, *query_range_oids(pgrange, conn), scope=conn) | 0.003641 |
def find_command(self, argv):
"""Given an argument list, find a command and
return the processor and any remaining arguments.
"""
search_args = argv[:]
name = ''
while search_args:
if search_args[0].startswith('-'):
name = '%s %s' % (name, search_args[0])
raise ValueError('Invalid command %r' % name)
next_val = search_args.pop(0)
name = '%s %s' % (name, next_val) if name else next_val
if name in self.commands:
cmd_ep = self.commands[name]
if hasattr(cmd_ep, 'resolve'):
cmd_factory = cmd_ep.resolve()
else:
# NOTE(dhellmann): Some fake classes don't take
# require as an argument. Yay?
arg_spec = inspect.getargspec(cmd_ep.load)
if 'require' in arg_spec[0]:
cmd_factory = cmd_ep.load(require=False)
else:
cmd_factory = cmd_ep.load()
return (cmd_factory, name, search_args)
else:
raise ValueError('Unknown command %r' % next(iter(argv), '')) | 0.001634 |
def _requiredSize(shape, dtype):
"""
Determines the number of bytes required to store a NumPy array with
the specified shape and datatype.
"""
return math.floor(np.prod(np.asarray(shape, dtype=np.uint64)) * np.dtype(dtype).itemsize) | 0.029536 |
def count_pingbacks_handler(sender, **kwargs):
"""
Update Entry.pingback_count when a pingback was posted.
"""
entry = kwargs['entry']
entry.pingback_count = F('pingback_count') + 1
entry.save(update_fields=['pingback_count']) | 0.004 |
def get_image(dataset):
"""Convert the NumPy array to two nested lists with r,g,b tuples."""
dim, nrow, ncol = dataset.shape
uint8_dataset = dataset.astype('uint8')
if not (uint8_dataset == dataset).all():
message = (
"\nYour image was cast to a `uint8` (`<img>.astype(uint8)`), "
"but some information was lost.\nPlease check your gif and "
"convert to uint8 beforehand if the gif looks wrong."
)
warnings.warn(message)
image = [[
struct.pack(
'BBB',
uint8_dataset[0, i, j],
uint8_dataset[1, i, j],
uint8_dataset[2, i, j]
)
for j in range(ncol)]
for i in range(nrow)]
return image | 0.001294 |
def __CreateVoidResponseType(self, method_description):
"""Create an empty response type."""
schema = {}
method_name = self.__names.ClassName(
method_description['id'], separator='.')
schema['id'] = self.__names.ClassName('%sResponse' % method_name)
schema['type'] = 'object'
schema['description'] = 'An empty %s response.' % method_name
self.__message_registry.AddDescriptorFromSchema(schema['id'], schema)
return schema['id'] | 0.003976 |
def get_package_version_provenance(self, feed_id, package_id, package_version_id, project=None):
"""GetPackageVersionProvenance.
[Preview API] Gets provenance for a package version.
:param str feed_id: Name or Id of the feed.
:param str package_id: Id of the package (GUID Id, not name).
:param str package_version_id: Id of the package version (GUID Id, not name).
:param str project: Project ID or project name
:rtype: :class:`<PackageVersionProvenance> <azure.devops.v5_1.feed.models.PackageVersionProvenance>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if feed_id is not None:
route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str')
if package_id is not None:
route_values['packageId'] = self._serialize.url('package_id', package_id, 'str')
if package_version_id is not None:
route_values['packageVersionId'] = self._serialize.url('package_version_id', package_version_id, 'str')
response = self._send(http_method='GET',
location_id='0aaeabd4-85cd-4686-8a77-8d31c15690b8',
version='5.1-preview.1',
route_values=route_values)
return self._deserialize('PackageVersionProvenance', response) | 0.00694 |
def _get_selected_items(self):
""" Return an Item (e.g. StateView) for each model (e.g. StateModel) in the current selection """
return set(self.canvas.get_view_for_model(model) for model in self._selection) | 0.017937 |
def put_on_top(self, request, queryset):
"""
Put the selected entries on top at the current date.
"""
queryset.update(publication_date=timezone.now())
self.ping_directories(request, queryset, messages=False)
self.message_user(request, _(
'The selected entries are now set at the current date.')) | 0.005634 |
def get(self, cfg):
"""
Reads single document or list of documents from MongoDB collection
:param cfg:
{
AccessParams.KEY_COLLECTION: <Collection to read data from>,
AccessParams.KEY_MATCH_PARAMS: <A query that matches the documents to select>,
AccessParams.KEY_TYPE: <AccessParams.TYPE_SINGLE or AccessParams.TYPE_MULTI>
}
:return: single document or list of documents
"""
collection = cfg[AccessParams.KEY_COLLECTION]
match_params = cfg[AccessParams.KEY_MATCH_PARAMS] if AccessParams.KEY_MATCH_PARAMS in cfg else None
target_type = cfg[AccessParams.KEY_TYPE] if AccessParams.KEY_TYPE in cfg else AccessParams.TYPE_MULTI
if target_type == AccessParams.TYPE_SINGLE:
result = CRUD.read_single(self.__db, collection, match_params)
elif target_type == AccessParams.TYPE_MULTI:
result = CRUD.read_multi(self.__db, collection, match_params)
return result | 0.005803 |
def connect(self, their_unl, events, force_master=1, hairpin=1,
nonce="0" * 64):
"""
A new thread is spawned because many of the connection techniques
rely on sleep to determine connection outcome or to synchronise hole
punching techniques. If the sleep is in its own thread it won't
block main execution.
"""
parms = (their_unl, events, force_master, hairpin, nonce)
t = Thread(target=self.connect_handler, args=parms)
t.start()
self.unl_threads.append(t) | 0.005329 |
def translate_to_stackdriver(self, trace):
"""Translate the spans json to Stackdriver format.
See: https://cloud.google.com/trace/docs/reference/v2/rest/v2/
projects.traces/batchWrite
:type trace: dict
:param trace: Trace dictionary
:rtype: dict
:returns: Spans in Google Cloud StackDriver Trace format.
"""
set_attributes(trace)
spans_json = trace.get('spans')
trace_id = trace.get('traceId')
for span in spans_json:
span_name = 'projects/{}/traces/{}/spans/{}'.format(
self.project_id, trace_id, span.get('spanId'))
span_json = {
'name': span_name,
'displayName': span.get('displayName'),
'startTime': span.get('startTime'),
'endTime': span.get('endTime'),
'spanId': str(span.get('spanId')),
'attributes': self.map_attributes(span.get('attributes')),
'links': span.get('links'),
'status': span.get('status'),
'stackTrace': span.get('stackTrace'),
'timeEvents': span.get('timeEvents'),
'sameProcessAsParentSpan': span.get('sameProcessAsParentSpan'),
'childSpanCount': span.get('childSpanCount')
}
if span.get('parentSpanId') is not None:
parent_span_id = str(span.get('parentSpanId'))
span_json['parentSpanId'] = parent_span_id
yield span_json | 0.001289 |
def ListDescendentPathInfos(self,
client_id,
path_type,
components,
timestamp=None,
max_depth=None):
"""Lists path info records that correspond to children of given path."""
result = []
for path_idx, path_record in iteritems(self.path_records):
other_client_id, other_path_type, other_components = path_idx
if client_id != other_client_id or path_type != other_path_type:
continue
if len(other_components) == len(components):
continue
if not collection.StartsWith(other_components, components):
continue
if (max_depth is not None and
len(other_components) - len(components) > max_depth):
continue
result.append(path_record.GetPathInfo(timestamp=timestamp))
if timestamp is None:
return sorted(result, key=lambda _: tuple(_.components))
# We need to filter implicit path infos if specific timestamp is given.
# TODO(hanuszczak): If we were to switch to use path trie instead of storing
# records by path id, everything would be much easier.
class TrieNode(object):
"""A trie of path components with path infos as values."""
def __init__(self):
self.path_info = None
self.children = {}
self.explicit = False
def Add(self, path_info, idx=0):
"""Adds given path info to the trie (or one of its subtrees)."""
components = path_info.components
if idx == len(components):
self.path_info = path_info
self.explicit |= (
path_info.HasField("stat_entry") or
path_info.HasField("hash_entry"))
else:
child = self.children.setdefault(components[idx], TrieNode())
child.Add(path_info, idx=idx + 1)
self.explicit |= child.explicit
def Collect(self, path_infos):
if self.path_info is not None and self.explicit:
path_infos.append(self.path_info)
for component in sorted(iterkeys(self.children)):
self.children[component].Collect(path_infos)
trie = TrieNode()
for path_info in result:
trie.Add(path_info)
explicit_path_infos = []
trie.Collect(explicit_path_infos)
return explicit_path_infos | 0.011392 |
def import_l2c_db():
"""
Static import helper function.
Checks if the log2code.pickle exists first, otherwise raises ImportError.
"""
data_path = os.path.join(os.path.dirname(mtools.__file__), 'data')
if os.path.exists(os.path.join(data_path, 'log2code.pickle')):
av, lv, lbw, lcl = cPickle.load(open(os.path.join(data_path,
'log2code.pickle'),
'rb'))
return av, lv, lbw, lcl
else:
raise ImportError('log2code.pickle not found in %s.' % data_path) | 0.001656 |
def add_positional_embedding(x, max_length, name=None, positions=None):
"""Adds positional embedding.
Args:
x: Tensor with shape [batch, length, depth].
max_length: int representing static maximum size of any dimension.
name: str representing name of the embedding tf.Variable.
positions: Tensor with shape [batch, length].
Returns:
Tensor of same shape as x.
"""
with tf.name_scope("add_positional_embedding"):
_, length, depth = common_layers.shape_list(x)
var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype)
if positions is None:
pad_length = tf.maximum(0, length - max_length)
sliced = tf.cond(
tf.less(length, max_length),
lambda: tf.slice(var, [0, 0], [length, -1]),
lambda: tf.pad(var, [[0, pad_length], [0, 0]]))
return x + tf.expand_dims(sliced, 0)
else:
return x + tf.gather(var, tf.to_int32(positions)) | 0.007527 |
def _tags_present(name, tags, vpc_id=None, vpc_name=None, region=None,
key=None, keyid=None, profile=None):
'''
helper function to validate tags are correct
'''
ret = {'result': True, 'comment': '', 'changes': {}}
if tags:
sg = __salt__['boto_secgroup.get_config'](name=name, group_id=None, region=region, key=key,
keyid=keyid, profile=profile, vpc_id=vpc_id,
vpc_name=vpc_name)
if not sg:
ret['comment'] = '{0} security group configuration could not be retrieved.'.format(name)
ret['result'] = False
return ret
tags_to_add = tags
tags_to_update = {}
tags_to_remove = []
if sg.get('tags'):
for existing_tag in sg['tags']:
if existing_tag not in tags:
if existing_tag not in tags_to_remove:
tags_to_remove.append(existing_tag)
else:
if tags[existing_tag] != sg['tags'][existing_tag]:
tags_to_update[existing_tag] = tags[existing_tag]
tags_to_add.pop(existing_tag)
if tags_to_remove:
if __opts__['test']:
msg = 'The following tag{0} set to be removed: {1}.'.format(
('s are' if len(tags_to_remove) > 1 else ' is'), ', '.join(tags_to_remove))
ret['comment'] = ' '.join([ret['comment'], msg])
ret['result'] = None
else:
temp_ret = __salt__['boto_secgroup.delete_tags'](tags_to_remove,
name=name,
group_id=None,
vpc_name=vpc_name,
vpc_id=vpc_id,
region=region,
key=key,
keyid=keyid,
profile=profile)
if not temp_ret:
ret['result'] = False
ret['comment'] = ' '.join([
ret['comment'],
'Error attempting to delete tags {0}.'.format(tags_to_remove)
])
return ret
if 'old' not in ret['changes']:
ret['changes'] = dictupdate.update(ret['changes'], {'old': {'tags': {}}})
for rem_tag in tags_to_remove:
ret['changes']['old']['tags'][rem_tag] = sg['tags'][rem_tag]
if tags_to_add or tags_to_update:
if __opts__['test']:
if tags_to_add:
msg = 'The following tag{0} set to be added: {1}.'.format(
('s are' if len(tags_to_add.keys()) > 1 else ' is'),
', '.join(tags_to_add.keys()))
ret['comment'] = ' '.join([ret['comment'], msg])
ret['result'] = None
if tags_to_update:
msg = 'The following tag {0} set to be updated: {1}.'.format(
('values are' if len(tags_to_update.keys()) > 1 else 'value is'),
', '.join(tags_to_update.keys()))
ret['comment'] = ' '.join([ret['comment'], msg])
ret['result'] = None
else:
all_tag_changes = dictupdate.update(tags_to_add, tags_to_update)
temp_ret = __salt__['boto_secgroup.set_tags'](all_tag_changes,
name=name,
group_id=None,
vpc_name=vpc_name,
vpc_id=vpc_id,
region=region,
key=key,
keyid=keyid,
profile=profile)
if not temp_ret:
ret['result'] = False
msg = 'Error attempting to set tags.'
ret['comment'] = ' '.join([ret['comment'], msg])
return ret
if 'old' not in ret['changes']:
ret['changes'] = dictupdate.update(ret['changes'], {'old': {'tags': {}}})
if 'new' not in ret['changes']:
ret['changes'] = dictupdate.update(ret['changes'], {'new': {'tags': {}}})
for tag in all_tag_changes:
ret['changes']['new']['tags'][tag] = tags[tag]
if 'tags' in sg:
if sg['tags']:
if tag in sg['tags']:
ret['changes']['old']['tags'][tag] = sg['tags'][tag]
if not tags_to_update and not tags_to_remove and not tags_to_add:
ret['comment'] = ' '.join([ret['comment'], 'Tags are already set.'])
return ret | 0.003638 |
def send_events(self, events):
"""Adds multiple events to the queued message
:returns: None - nothing has been sent to the Riemann server yet
"""
for event in events:
self.queue.events.add().MergeFrom(event)
return None | 0.007353 |
def array(a, context=None, axis=(0,), dtype=None, npartitions=None):
"""
Create a spark bolt array from a local array.
Parameters
----------
a : array-like
An array, any object exposing the array interface, an
object whose __array__ method returns an array, or any
(nested) sequence.
context : SparkContext
A context running Spark. (see pyspark)
axis : tuple, optional, default=(0,)
Which axes to distribute the array along. The resulting
distributed object will use keys to represent these axes,
with the remaining axes represented by values.
dtype : data-type, optional, default=None
The desired data-type for the array. If None, will
be determined from the data. (see numpy)
npartitions : int
Number of partitions for parallization.
Returns
-------
BoltArraySpark
"""
if dtype is None:
arry = asarray(a)
dtype = arry.dtype
else:
arry = asarray(a, dtype)
shape = arry.shape
ndim = len(shape)
# handle the axes specification and transpose if necessary
axes = ConstructSpark._format_axes(axis, arry.shape)
key_axes, value_axes = get_kv_axes(arry.shape, axes)
permutation = key_axes + value_axes
arry = arry.transpose(*permutation)
split = len(axes)
if split < 1:
raise ValueError("split axis must be greater than 0, got %g" % split)
if split > len(shape):
raise ValueError("split axis must not exceed number of axes %g, got %g" % (ndim, split))
key_shape = shape[:split]
val_shape = shape[split:]
keys = zip(*unravel_index(arange(0, int(prod(key_shape))), key_shape))
vals = arry.reshape((prod(key_shape),) + val_shape)
rdd = context.parallelize(zip(keys, vals), npartitions)
return BoltArraySpark(rdd, shape=shape, split=split, dtype=dtype) | 0.001923 |
def _bind_exit_key(self, key):
u"""setup the mapping from key to call the function."""
keyinfo = make_KeyPress_from_keydescr(key.lower()).tuple()
self.exit_dispatch[keyinfo] = None | 0.009662 |
def search(self, query, index='default', **kwargs):
"""
kwargs supported are the parameters listed at:
http://www.elasticsearch.org/guide/reference/api/search/request-body/
Namely: timeout, from, size and search_type.
IMPORTANT: prepend ALL keys with "es_" as pyelasticsearch requires this
"""
# Looking up the index
if index not in self.conf.indexes:
self.raise_improperly_configured(index=index)
# Calling the backend search method
esurl = self.conf.connections[index]['URL']
esinst = pyelasticsearch.ElasticSearch(esurl)
query = isinstance(query, Query) and str(query) or query
self.raw_results = esinst.search(query, index=index, **kwargs)
return self | 0.002545 |
def modify(self, sp=None, ip_port=None, ip_address=None, netmask=None,
v6_prefix_length=None, gateway=None, vlan_id=None):
"""
Modifies a replication interface.
:param sp: same as the one in `create` method.
:param ip_port: same as the one in `create` method.
:param ip_address: same as the one in `create` method.
:param netmask: same as the one in `create` method.
:param v6_prefix_length: same as the one in `create` method.
:param gateway: same as the one in `create` method.
:param vlan_id: same as the one in `create` method.
"""
req_body = self._cli.make_body(sp=sp, ipPort=ip_port,
ipAddress=ip_address, netmask=netmask,
v6PrefixLength=v6_prefix_length,
gateway=gateway, vlanId=vlan_id)
resp = self.action('modify', **req_body)
resp.raise_if_err()
return resp | 0.002962 |
def retry(default=None):
"""Retry functions after failures"""
def decorator(func):
"""Retry decorator"""
@functools.wraps(func)
def _wrapper(*args, **kw):
for pos in range(1, MAX_RETRIES):
try:
return func(*args, **kw)
except (RuntimeError, requests.ConnectionError) as error:
LOGGER.warning("Failed: %s, %s", type(error), error)
# Wait a bit before retrying
for _ in range(pos):
_rest()
LOGGER.warning("Request Aborted")
return default
return _wrapper
return decorator | 0.001473 |
def _set_field_on_message(msg, key, value):
"""Set helper for protobuf Messages."""
# Attempt to set the value on the types of objects we know how to deal
# with.
if isinstance(value, (collections_abc.MutableSequence, tuple)):
# Clear the existing repeated protobuf message of any elements
# currently inside it.
while getattr(msg, key):
getattr(msg, key).pop()
# Write our new elements to the repeated field.
for item in value:
if isinstance(item, collections_abc.Mapping):
getattr(msg, key).add(**item)
else:
# protobuf's RepeatedCompositeContainer doesn't support
# append.
getattr(msg, key).extend([item])
elif isinstance(value, collections_abc.Mapping):
# Assign the dictionary values to the protobuf message.
for item_key, item_value in value.items():
set(getattr(msg, key), item_key, item_value)
elif isinstance(value, message.Message):
getattr(msg, key).CopyFrom(value)
else:
setattr(msg, key, value) | 0.000892 |
def download(self, output_dir, url, overwrite):
""" Dowload file to /tmp """
tmp = self.url2tmp(output_dir, url)
if os.path.isfile(tmp) and not overwrite:
logging.info("File {0} already exists. Skipping download.".format(tmp))
return tmp
f = open(tmp, 'wb')
logging.info("Downloading {0}".format(url))
res = requests.get(url, stream=True)
if res.status_code != 200:
# failed to download, cleanup and raise exception
f.close()
os.remove(tmp)
error = "{0}\n\nFailed to download < {0} >".format(res.content, url)
raise IOError(error)
for block in res.iter_content(1024):
f.write(block)
f.close()
return tmp | 0.005135 |
def get_key_value_pairs(self, subsystem, filename):
"""
Read the lines of the given file from the given subsystem
and split the lines into key-value pairs.
Do not include the subsystem name in the option name.
Only call this method if the given subsystem is available.
"""
assert subsystem in self
return util.read_key_value_pairs_from_file(self.per_subsystem[subsystem], subsystem + '.' + filename) | 0.006479 |
def read_env(path=None, environ=None, recurse=True):
"""Reads a .env file into ``environ`` (which defaults to ``os.environ``).
If .env is not found in the directory from which this function is called, recurse
up the directory tree until a .env file is found.
"""
environ = environ if environ is not None else os.environ
# By default, start search from the same file this function is called
if path is None:
frame = inspect.currentframe().f_back
caller_dir = os.path.dirname(frame.f_code.co_filename)
path = os.path.join(os.path.abspath(caller_dir), ENV)
if recurse:
current = path
pardir = os.path.abspath(os.path.join(current, os.pardir))
while current != pardir:
target = os.path.join(current, ENV)
if os.path.exists(target):
path = os.path.abspath(target)
break
else:
current = os.path.abspath(os.path.join(current, os.pardir))
pardir = os.path.abspath(os.path.join(current, os.pardir))
if not path:
raise FileNotFoundError('Could not find a .env file')
with open(path, 'r') as fp:
content = fp.read()
parsed = parse_env(content)
for key, value in parsed.items():
environ.setdefault(key, value) | 0.001507 |
def load_healthchecks(self):
"""
Loads healthchecks.
"""
self.load_default_healthchecks()
if getattr(settings, 'AUTODISCOVER_HEALTHCHECKS', True):
self.autodiscover_healthchecks()
self._registry_loaded = True | 0.007463 |
def __set_date(self, value):
'''
Sets the invoice date.
@param value:datetime
'''
value = date_to_datetime(value)
if value > datetime.now() + timedelta(hours=14, minutes=1): #More or less 14 hours from now in case the submitted date was local
raise ValueError("Date cannot be in the future.")
if self.__due_date and value.date() > self.__due_date:
raise ValueError("Date cannot be posterior to the due date.")
self.__date = value | 0.009634 |
def create_presentation(self):
""" Create the presentation.
The audio track is mixed with the slides. The resulting file is saved as self.output
DownloadError is raised if some resources cannot be fetched.
ConversionError is raised if the final video cannot be created.
"""
# Avoid wasting time and bandwidth if we known that conversion will fail.
if not self.overwrite and os.path.exists(self.output):
raise ConversionError("File %s already exist and --overwrite not specified" % self.output)
video = self.download_video()
raw_slides = self.download_slides()
# ffmpeg does not support SWF
png_slides = self._convert_slides(raw_slides)
# Create one frame per second using the time code information
frame_pattern = self._prepare_frames(png_slides)
return self._assemble(video, frame_pattern) | 0.005441 |
def demunge(s: str) -> str:
"""Replace munged string components with their original
representation."""
def demunge_replacer(match: Match) -> str:
full_match = match.group(0)
replacement = _DEMUNGE_REPLACEMENTS.get(full_match, None)
if replacement:
return replacement
return full_match
return re.sub(_DEMUNGE_PATTERN, demunge_replacer, s).replace("_", "-") | 0.002398 |
def intersect(self, other):
"""
self์ other ํค๊ฐ ๋์ผํ ์์ดํ
์ dictobj
:type other: dict
:rtype: dictobj:
"""
return ODict((k, self[k]) for k in self if k in other) | 0.009756 |
def list_known_codes(s, unique=True, rgb_mode=False):
""" Find and print all known escape codes in a string,
using get_known_codes.
"""
total = 0
for codedesc in get_known_codes(s, unique=unique, rgb_mode=rgb_mode):
total += 1
print(codedesc)
plural = 'code' if total == 1 else 'codes'
codetype = ' unique' if unique else ''
print('\nFound {}{} escape {}.'.format(total, codetype, plural))
return 0 if total > 0 else 1 | 0.00211 |
def copy_abs(self):
""" Return a copy of self with the sign bit unset.
Unlike abs(self), this does not make use of the context: the result
has the same precision as the original.
"""
result = mpfr.Mpfr_t.__new__(BigFloat)
mpfr.mpfr_init2(result, self.precision)
mpfr.mpfr_setsign(result, self, False, ROUND_TIES_TO_EVEN)
return result | 0.005 |
def set_centralized_assembled_values(self, a):
"""Set assembled matrix values on processor 0."""
if self.myid != 0:
return
assert a.size == self.id.nz
self._refs.update(a=a)
self.id.a = self.cast_array(a) | 0.007813 |
def extract_translations(self, string):
"""Extract messages from Django template string."""
trans = []
for t in Lexer(string.decode("utf-8"), None).tokenize():
if t.token_type == TOKEN_BLOCK:
if not t.contents.startswith(
(self.tranz_tag, self.tranzchoice_tag)):
continue
is_tranzchoice = t.contents.startswith(
self.tranzchoice_tag +
" ")
kwargs = {
"id": self._match_to_transvar(id_re, t.contents),
"number": self._match_to_transvar(number_re, t.contents),
"domain": self._match_to_transvar(domain_re, t.contents),
"locale": self._match_to_transvar(locale_re, t.contents),
"is_transchoice": is_tranzchoice, "parameters": TransVar(
[x.split("=")[0].strip() for x in properties_re.findall(t.contents) if x],
TransVar.LITERAL
),
"lineno": t.lineno,
}
trans.append(Translation(**kwargs))
return trans | 0.002506 |
def _proc_ctype_header(self, request, result):
"""
Process the Content-Type header rules for the request. Only
the desired API version can be determined from those rules.
:param request: The Request object provided by WebOb.
:param result: The Result object to store the results in.
"""
if result:
# Result has already been fully determined
return
try:
ctype = request.headers['content-type']
except KeyError:
# No content-type header to examine
return
# Parse the content type
ctype, params = parse_ctype(ctype)
# Is it a recognized content type?
if ctype not in self.types:
return
# Get the mapped ctype and version
mapped_ctype, mapped_version = self.types[ctype](params)
# Update the content type header and set the version
if mapped_ctype:
request.environ['aversion.request_type'] = mapped_ctype
request.environ['aversion.orig_request_type'] = ctype
request.environ['aversion.content_type'] = \
request.headers['content-type']
if self.overwrite_headers:
request.headers['content-type'] = mapped_ctype
if mapped_version:
result.set_version(mapped_version) | 0.001455 |
def _onMethodTimeout(self, serial, d):
"""
Called when a remote method invocation timeout occurs
"""
del self._pendingCalls[serial]
d.errback(error.TimeOut('Method call timed out')) | 0.00905 |
def _extract_gaussian_gradient_magnitude(image, mask = slice(None), sigma = 1, voxelspacing = None):
"""
Internal, single-image version of `gaussian_gradient_magnitude`.
"""
# set voxel spacing
if voxelspacing is None:
voxelspacing = [1.] * image.ndim
# determine gaussian kernel size in voxel units
sigma = _create_structure_array(sigma, voxelspacing)
return _extract_intensities(scipy_gaussian_gradient_magnitude(image, sigma), mask) | 0.022312 |
def WriteFlowProcessingRequests(self, requests):
"""Writes a list of flow processing requests to the database."""
# If we don't have a handler thread running, we might be able to process the
# requests inline. If we are not, we start the handler thread for real and
# queue the requests normally.
if not self.flow_handler_thread and self.flow_handler_target:
if self._InlineProcessingOK(requests):
for r in requests:
self.flow_handler_target(r)
return
else:
self._RegisterFlowProcessingHandler(self.flow_handler_target)
self.flow_handler_target = None
now = rdfvalue.RDFDatetime.Now()
for r in requests:
cloned_request = r.Copy()
cloned_request.timestamp = now
key = (r.client_id, r.flow_id)
self.flow_processing_requests[key] = cloned_request | 0.010588 |
def _get_fs(thin_pathname):
"""
Returns the file system type (xfs, ext4) of a given device
"""
cmd = ['lsblk', '-o', 'FSTYPE', '-n', thin_pathname]
fs_return = util.subp(cmd)
return fs_return.stdout.strip() | 0.007874 |
def keyPressEvent( self, event ):
"""
Overloads the key press event to listen for escape calls to cancel the
parts editing.
:param event | <QKeyPressEvent>
"""
if ( self.scrollWidget().isHidden() ):
if ( event.key() == Qt.Key_Escape ):
self.cancelEdit()
return
elif ( event.key() in (Qt.Key_Return, Qt.Key_Enter) ):
self.acceptEdit()
return
elif ( event.key() == Qt.Key_A and
event.modifiers() == Qt.ControlModifier ):
self.startEdit()
super(XNavigationEdit, self).keyPressEvent(event) | 0.023677 |
def set_option(self, option, value):
"""
Set a plugin option in configuration file.
Note: Use sig_option_changed to call it from widgets of the
same or another plugin.
"""
CONF.set(self.CONF_SECTION, str(option), value) | 0.007299 |
def _db_upgrade(self, db_name):
""" Upgrade nipap database schema
"""
current_db_version = self._get_db_version()
self._execute(db_schema.functions)
for i in range(current_db_version, nipap.__db_version__):
self._logger.info("Upgrading DB schema:", i, "to", i+1)
upgrade_sql = db_schema.upgrade[i-1] # 0 count on array
self._execute(upgrade_sql % (db_name))
self._execute(db_schema.triggers) | 0.006316 |
def focusOutEvent(self, ev):
"""Redefine focusOut events to stop editing"""
Kittens.widgets.ClickableTreeWidget.focusOutEvent(self, ev)
# if focus is going to a child of ours, do nothing
wid = QApplication.focusWidget()
while wid:
if wid is self:
return
wid = wid.parent()
# else we're truly losing focus -- stop the editor
self._startOrStopEditing() | 0.004484 |
def rotated(self, rotation_center, angle):
"""Returns a RotatedBox that is obtained by rotating this box around a given center by a given angle.
>>> assert RotatedBox([2, 2], 2, 1, 0.1).rotated([1, 1], np.pi/2).approx_equal([0, 2], 2, 1, np.pi/2+0.1)
"""
rot = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]])
t = np.asfarray(rotation_center)
new_c = np.dot(rot.T, (self.center - t)) + t
return RotatedBox(new_c, self.width, self.height, (self.angle+angle) % (np.pi*2)) | 0.01085 |
def Bern_to_Fierz_lep(C,ddll):
"""From semileptonic Bern basis to Fierz semileptonic basis for Class V.
C should be the corresponding leptonic Fierz basis and
`ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc."""
ind = ddll.replace('l_','').replace('nu_','')
return {'F' + ind + '9': C['1' + ind] + 10 * C['3' + ind],
'F' + ind + '10': -6 * C['3' + ind],
'F' + ind + 'S': C['5' + ind] + 40 * C['9' + ind],
'F' + ind + 'P': 24 * C['9' + ind],
'F' + ind + 'T': C['7' + ind] / 2 + C['7p' + ind] / 2 - 8 * C['9' + ind] - 8 * C['9p' + ind],
'F' + ind + 'T5': C['7' + ind] / 2 - C['7p' + ind] / 2 - 8 * C['9' + ind] + 8 * C['9p' + ind],
'F' + ind + '9p': C['1p' + ind] + 10 * C['3p' + ind],
'F' + ind + '10p': 6 * C['3p' + ind],
'F' + ind + 'Sp': C['5p' + ind] + 40 * C['9p' + ind],
'F' + ind + 'Pp': -24 * C['9p' + ind],
} | 0.006198 |
def get_all_keys(self, headers=None, **params):
"""
This method returns the single key around which this anonymous Bucket
was instantiated.
:rtype: SimpleResultSet
:return: The result from file system listing the keys requested
"""
key = Key(self.name, self.contained_key)
return SimpleResultSet([key]) | 0.005435 |
def on_epoch_end(self, epoch, **kwargs:Any)->None:
"Compare the value monitored to its best and maybe reduce lr."
current = self.get_monitor_value()
if current is None: return
if self.operator(current - self.min_delta, self.best): self.best,self.wait = current,0
else:
self.wait += 1
if self.wait > self.patience:
self.opt.lr *= self.factor
self.wait = 0
print(f'Epoch {epoch}: reducing lr to {self.opt.lr}') | 0.017308 |
def create(name, **params):
'''Create a check on a given URL.
Additional parameters can be used and are passed to API (for
example interval, maxTime, etc). See the documentation
https://github.com/fzaninotto/uptime for a full list of the
parameters.
CLI Example:
.. code-block:: bash
salt '*' uptime.create http://example.org
'''
if check_exists(name):
msg = 'Trying to create check that already exists : {0}'.format(name)
log.error(msg)
raise CommandExecutionError(msg)
application_url = _get_application_url()
log.debug('[uptime] trying PUT request')
params.update(url=name)
req = requests.put('{0}/api/checks'.format(application_url), data=params)
if not req.ok:
raise CommandExecutionError(
'request to uptime failed : {0}'.format(req.reason)
)
log.debug('[uptime] PUT request successful')
return req.json()['_id'] | 0.001056 |
def popitem (self):
"""Remove and return an item."""
key, value = super(LFUCache, self).popitem()
return (key, value[1]) | 0.020833 |
def _default_call_in_place(op, x, out, **kwargs):
"""Default in-place evaluation using ``Operator._call()``.
Parameters
----------
op : `Operator`
Operator to call
x : ``op.domain`` element
Point in which to call the operator.
out : ``op.range`` element
An object in the operator range. The result of an operator
evaluation is written here.
kwargs:
Optional arguments to the operator.
"""
out.assign(op.range.element(op._call_out_of_place(x, **kwargs))) | 0.001887 |
def dumps(obj):
"""
Dumps a serializable object to JSON. This API maps to the Python built-in
json dumps method, with a few differences:
* The return value is always valid JSON according to RFC 7159.
* The input can be any of the following types:
- SFrame
- SArray
- SGraph
- single flexible_type (Image, int, long, float, datetime.datetime)
- recursive flexible_type (list, dict, array.array)
- recursive variant_type (list or dict of all of the above)
* Serialized result includes both data and schema. Deserialization requires
valid schema information to disambiguate various other wrapped types
(like Image) from dict.
"""
(data, schema) = to_serializable(obj)
return _json.dumps({'data': data, 'schema': schema}) | 0.001229 |
def _set_keychain(self, v, load=False):
"""
Setter method for keychain, mapped from YANG variable /keychain (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_keychain is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_keychain() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name_of_keychain",keychain.keychain, yang_name="keychain", rest_name="keychain", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name-of-keychain', extensions={u'tailf-common': {u'info': u'Keychain configuration', u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'Keychain', u'cli-mode-name': u'config-$(name-of-keychain)'}}), is_container='list', yang_name="keychain", rest_name="keychain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Keychain configuration', u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'Keychain', u'cli-mode-name': u'config-$(name-of-keychain)'}}, namespace='urn:brocade.com:mgmt:brocade-keychain', defining_module='brocade-keychain', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """keychain must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name_of_keychain",keychain.keychain, yang_name="keychain", rest_name="keychain", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name-of-keychain', extensions={u'tailf-common': {u'info': u'Keychain configuration', u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'Keychain', u'cli-mode-name': u'config-$(name-of-keychain)'}}), is_container='list', yang_name="keychain", rest_name="keychain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Keychain configuration', u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'Keychain', u'cli-mode-name': u'config-$(name-of-keychain)'}}, namespace='urn:brocade.com:mgmt:brocade-keychain', defining_module='brocade-keychain', yang_type='list', is_config=True)""",
})
self.__keychain = t
if hasattr(self, '_set'):
self._set() | 0.003768 |
def shareItem(self, sharedItem, shareID=None, interfaces=ALL_IMPLEMENTED):
"""
Share an item with this role. This provides a way to expose items to
users for later retrieval with L{Role.getShare}.
@param sharedItem: an item to be shared.
@param shareID: a unicode string. If provided, specify the ID under which
the shared item will be shared.
@param interfaces: a list of Interface objects which specify the methods
and attributes accessible to C{toRole} on C{sharedItem}.
@return: a L{Share} which records the ability of the given role to
access the given item.
"""
if shareID is None:
shareID = genShareID(sharedItem.store)
return Share(store=self.store,
shareID=shareID,
sharedItem=sharedItem,
sharedTo=self,
sharedInterfaces=interfaces) | 0.004224 |
def _eintr_retry(func, *args):
"""restart a system call interrupted by EINTR"""
while True:
try:
return func(*args)
except (OSError, select.error) as e:
if e.args[0] != errno.EINTR:
raise | 0.003984 |
def split_sentences_spacy(text, language_model='en'):
r""" You must download a spacy language model with python -m download 'en'
The default English language model for spacy tends to be a lot more agressive than NLTK's punkt:
>>> split_sentences_nltk("Hi Ms. Lovelace.\nI'm a wanna-\nbe human @ I.B.M. ;) --Watson 2.0")
['Hi Ms. Lovelace.', "I'm a wanna-\nbe human @ I.B.M.", ';) --Watson 2.0']
>>> split_sentences_spacy("Hi Ms. Lovelace.\nI'm a wanna-\nbe human @ I.B.M. ;) --Watson 2.0")
['Hi Ms. Lovelace.', "I'm a wanna-", 'be human @', 'I.B.M. ;) --Watson 2.0']
>>> split_sentences_spacy("Hi Ms. Lovelace. I'm at I.B.M. --Watson 2.0")
['Hi Ms. Lovelace.', "I'm at I.B.M. --Watson 2.0"]
>>> split_sentences_nltk("Hi Ms. Lovelace. I'm at I.B.M. --Watson 2.0")
['Hi Ms. Lovelace.', "I'm at I.B.M.", '--Watson 2.0']
"""
doc = nlp(text)
sentences = []
if not hasattr(doc, 'sents'):
logger.warning("Using NLTK sentence tokenizer because SpaCy language model hasn't been loaded")
return split_sentences_nltk(text)
for w, span in enumerate(doc.sents):
sent = ''.join(doc[i].string for i in range(span.start, span.end)).strip()
if len(sent):
sentences.append(sent)
return sentences | 0.005435 |
def import_from_ding0(self, file, **kwargs):
"""Import grid data from DINGO file
For details see
:func:`edisgo.data.import_data.import_from_ding0`
"""
import_from_ding0(file=file, network=self.network) | 0.00823 |
def get_num_batches(self, instances: Iterable[Instance]) -> int:
"""
Returns the number of batches that ``dataset`` will be split into; if you want to track
progress through the batch with the generator produced by ``__call__``, this could be
useful.
"""
if is_lazy(instances) and self._instances_per_epoch is None:
# Unable to compute num batches, so just return 1.
return 1
elif self._instances_per_epoch is not None:
return math.ceil(self._instances_per_epoch / self._batch_size)
else:
# Not lazy, so can compute the list length.
return math.ceil(len(ensure_list(instances)) / self._batch_size) | 0.005548 |
def getScreenshotPropertyFilename(self, screenshotHandle, filenameType, pchFilename, cchFilename):
"""
Get the filename for the preview or vr image (see
vr::EScreenshotPropertyFilenames). The return value is
the size of the string.
"""
fn = self.function_table.getScreenshotPropertyFilename
pError = EVRScreenshotError()
result = fn(screenshotHandle, filenameType, pchFilename, cchFilename, byref(pError))
return result, pError | 0.007952 |
def get_attention(config: AttentionConfig, max_seq_len: int, prefix: str = C.ATTENTION_PREFIX) -> 'Attention':
"""
Returns an Attention instance based on attention_type.
:param config: Attention configuration.
:param max_seq_len: Maximum length of source sequences.
:param prefix: Name prefix.
:return: Instance of Attention.
"""
att_cls = Attention.get_attention_cls(config.type)
params = config.__dict__.copy()
params.pop('_frozen')
params['max_seq_len'] = max_seq_len
params['prefix'] = prefix
return _instantiate(att_cls, params) | 0.003407 |
def remove_from_user(self, name, *args):
"""Remove attributes from a user.
"""
user = self.get_user(name=name)
attrs_ = user['user']
for a in args:
del attrs_[a] | 0.00939 |
def get_func_argument_types(self, hsh: bytes):
"""Returns the tuple type signature for the arguments of the function associated with the selector ``hsh``.
If no normal contract function has the specified selector,
the empty tuple type signature ``'()'`` is returned.
"""
if not isinstance(hsh, (bytes, bytearray)):
raise TypeError('The selector argument must be a concrete byte array')
sig = self._function_signatures_by_selector.get(hsh)
return '()' if sig is None else sig[sig.find('('):] | 0.007156 |
def _message_queryset(self, include_read=False):
"""
Return a queryset of messages for the request user
"""
expire = timezone.now()
qs = PersistentMessage.objects.\
filter(user=self.get_user()).\
filter(Q(expires=None) | Q(expires__gt=expire))
if not include_read:
qs = qs.exclude(read=True)
return qs | 0.01292 |
def _fast_cross_3d(x, y):
"""Compute cross product between list of 3D vectors
Much faster than np.cross() when the number of cross products
becomes large (>500). This is because np.cross() methods become
less memory efficient at this stage.
Parameters
----------
x : array
Input array 1.
y : array
Input array 2.
Returns
-------
z : array
Cross product of x and y.
Notes
-----
x and y must both be 2D row vectors. One must have length 1, or both
lengths must match.
"""
assert x.ndim == 2
assert y.ndim == 2
assert x.shape[1] == 3
assert y.shape[1] == 3
assert (x.shape[0] == 1 or y.shape[0] == 1) or x.shape[0] == y.shape[0]
if max([x.shape[0], y.shape[0]]) >= 500:
return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1],
x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2],
x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]]
else:
return np.cross(x, y) | 0.000998 |
def get_assessments(self):
"""Gets all ``Assessments``.
In plenary mode, the returned list contains all known
assessments or an error results. Otherwise, the returned list
may contain only those assessments that are accessible through
this session.
return: (osid.assessment.AssessmentList) - a list of
``Assessments``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('assessment',
collection='Assessment',
runtime=self._runtime)
result = collection.find(self._view_filter()).sort('_id', DESCENDING)
return objects.AssessmentList(result, runtime=self._runtime, proxy=self._proxy) | 0.002705 |
def shot_chart_jointgrid(x, y, data=None, joint_type="scatter", title="",
joint_color="b", cmap=None, xlim=(-250, 250),
ylim=(422.5, -47.5), court_color="gray", court_lw=1,
outer_lines=False, flip_court=False,
joint_kde_shade=True, gridsize=None,
marginals_color="b", marginals_type="both",
marginals_kde_shade=True, size=(12, 11), space=0,
despine=False, joint_kws=None, marginal_kws=None,
**kwargs):
"""
Returns a JointGrid object containing the shot chart.
This function allows for more flexibility in customizing your shot chart
than the ``shot_chart_jointplot`` function.
Parameters
----------
x, y : strings or vector
The x and y coordinates of the shots taken. They can be passed in as
vectors (such as a pandas Series) or as columns from the pandas
DataFrame passed into ``data``.
data : DataFrame, optional
DataFrame containing shots where ``x`` and ``y`` represent the shot
location coordinates.
joint_type : { "scatter", "kde", "hex" }, optional
The type of shot chart for the joint plot.
title : str, optional
The title for the plot.
joint_color : matplotlib color, optional
Color used to plot the shots on the joint plot.
cmap : matplotlib Colormap object or name, optional
Colormap for the range of data values. If one isn't provided, the
colormap is derived from the value passed to ``color``. Used for KDE
and Hexbin joint plots.
{x, y}lim : two-tuples, optional
The axis limits of the plot. The defaults represent the out of bounds
lines and half court line.
court_color : matplotlib color, optional
The color of the court lines.
court_lw : float, optional
The linewidth the of the court lines.
outer_lines : boolean, optional
If ``True`` the out of bound lines are drawn in as a matplotlib
Rectangle.
flip_court : boolean, optional
If ``True`` orients the hoop towards the bottom of the plot. Default is
``False``, which orients the court where the hoop is towards the top of
the plot.
joint_kde_shade : boolean, optional
Default is ``True``, which shades in the KDE contours on the joint plot.
gridsize : int, optional
Number of hexagons in the x-direction. The default is calculated using
the Freedman-Diaconis method.
marginals_color : matplotlib color, optional
Color used to plot the shots on the marginal plots.
marginals_type : { "both", "hist", "kde"}, optional
The type of plot for the marginal plots.
marginals_kde_shade : boolean, optional
Default is ``True``, which shades in the KDE contours on the marginal
plots.
size : tuple, optional
The width and height of the plot in inches.
space : numeric, optional
The space between the joint and marginal plots.
despine : boolean, optional
If ``True``, removes the spines.
{joint, marginal}_kws : dicts
Additional kewyord arguments for joint and marginal plot components.
kwargs : key, value pairs
Keyword arguments for matplotlib Collection properties or seaborn plots.
Returns
-------
grid : JointGrid
The JointGrid object with the shot chart plotted on it.
"""
# The joint_kws and marginal_kws idea was taken from seaborn
# Create the default empty kwargs for joint and marginal plots
if joint_kws is None:
joint_kws = {}
joint_kws.update(kwargs)
if marginal_kws is None:
marginal_kws = {}
# If a colormap is not provided, then it is based off of the joint_color
if cmap is None:
cmap = sns.light_palette(joint_color, as_cmap=True)
# Flip the court so that the hoop is by the bottom of the plot
if flip_court:
xlim = xlim[::-1]
ylim = ylim[::-1]
# Create the JointGrid to draw the shot chart plots onto
grid = sns.JointGrid(x=x, y=y, data=data, xlim=xlim, ylim=ylim,
space=space)
# Joint Plot
# Create the main plot of the joint shot chart
if joint_type == "scatter":
grid = grid.plot_joint(plt.scatter, color=joint_color, **joint_kws)
elif joint_type == "kde":
grid = grid.plot_joint(sns.kdeplot, cmap=cmap,
shade=joint_kde_shade, **joint_kws)
elif joint_type == "hex":
if gridsize is None:
# Get the number of bins for hexbin using Freedman-Diaconis rule
# This is idea was taken from seaborn, which got the calculation
# from http://stats.stackexchange.com/questions/798/
from seaborn.distributions import _freedman_diaconis_bins
x_bin = _freedman_diaconis_bins(x)
y_bin = _freedman_diaconis_bins(y)
gridsize = int(np.mean([x_bin, y_bin]))
grid = grid.plot_joint(plt.hexbin, gridsize=gridsize, cmap=cmap,
**joint_kws)
else:
raise ValueError("joint_type must be 'scatter', 'kde', or 'hex'.")
# Marginal plots
# Create the plots on the axis of the main plot of the joint shot chart.
if marginals_type == "both":
grid = grid.plot_marginals(sns.distplot, color=marginals_color,
**marginal_kws)
elif marginals_type == "hist":
grid = grid.plot_marginals(sns.distplot, color=marginals_color,
kde=False, **marginal_kws)
elif marginals_type == "kde":
grid = grid.plot_marginals(sns.kdeplot, color=marginals_color,
shade=marginals_kde_shade, **marginal_kws)
else:
raise ValueError("marginals_type must be 'both', 'hist', or 'kde'.")
# Set the size of the joint shot chart
grid.fig.set_size_inches(size)
# Extract the the first axes, which is the main plot of the
# joint shot chart, and draw the court onto it
ax = grid.fig.get_axes()[0]
draw_court(ax, color=court_color, lw=court_lw, outer_lines=outer_lines)
# Get rid of the axis labels
grid.set_axis_labels(xlabel="", ylabel="")
# Get rid of all tick labels
ax.tick_params(labelbottom="off", labelleft="off")
# Set the title above the top marginal plot
ax.set_title(title, y=1.2, fontsize=18)
# Set the spines to match the rest of court lines, makes outer_lines
# somewhate unnecessary
for spine in ax.spines:
ax.spines[spine].set_lw(court_lw)
ax.spines[spine].set_color(court_color)
# set the marginal spines to be the same as the rest of the spines
grid.ax_marg_x.spines[spine].set_lw(court_lw)
grid.ax_marg_x.spines[spine].set_color(court_color)
grid.ax_marg_y.spines[spine].set_lw(court_lw)
grid.ax_marg_y.spines[spine].set_color(court_color)
if despine:
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
return grid | 0.000411 |
def before(point):
""" True if point datetime specification is before now """
if not point:
return True
if isinstance(point, str):
point = str_to_time(point)
elif isinstance(point, int):
point = time.gmtime(point)
return time.gmtime() < point | 0.003472 |
def rnaseqc_general_stats (self):
"""
Add alignment rate to the general stats table
"""
headers = OrderedDict()
headers['Expression Profiling Efficiency'] = {
'title': '% Expression Efficiency',
'description': 'Expression Profiling Efficiency: Ratio of exon reads to total reads',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn',
'modify': lambda x: float(x) * 100.0
}
headers['Genes Detected'] = {
'title': '# Genes',
'description': 'Number of genes detected with at least 5 reads.',
'min': 0,
'scale': 'Bu',
'format': '{:,.0f}'
}
headers['rRNA rate'] = {
'title': '% rRNA Alignment',
'description': ' rRNA reads (non-duplicate and duplicate reads) per total reads',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'Reds',
'modify': lambda x: float(x) * 100.0
}
self.general_stats_addcols(self.rna_seqc_metrics, headers) | 0.004382 |
def poisson(data):
""" Creates a segment cost function for a time series with a
poisson distribution with changing mean
Args:
data (:obj:`list` of float): 1D time series data
Returns:
function: Function with signature
(int, int) -> float
where the first arg is the starting index, and the second
is the last arg. Returns the cost of that segment
"""
data = np.hstack(([0.0], np.array(data)))
cumm = np.cumsum(data)
def cost(s, t):
""" Cost function for poisson distribution with changing mean
Args:
start (int): start index
end (int): end index
Returns:
float: Cost, from start to end
"""
diff = cumm[t]-cumm[s]
if diff == 0:
return -2 * diff * (- np.log(t-s) - 1)
else:
return -2 * diff * (np.log(diff) - np.log(t-s) - 1)
return cost | 0.001057 |
def OnReaderComboBox(self, event):
"""Called when the user activates a reader in the toolbar combo box."""
cb = event.GetEventObject()
reader = cb.GetClientData(cb.GetSelection())
if isinstance(reader, smartcard.reader.Reader.Reader):
self.treeuserpanel.dialogpanel.OnActivateReader(reader) | 0.005988 |
def create_dbsecurity_group(self, name, description=None):
"""
Create a new security group for your account.
This will create the security group within the region you
are currently connected to.
:type name: string
:param name: The name of the new security group
:type description: string
:param description: The description of the new security group
:rtype: :class:`boto.rds.dbsecuritygroup.DBSecurityGroup`
:return: The newly created DBSecurityGroup
"""
params = {'DBSecurityGroupName':name}
if description:
params['DBSecurityGroupDescription'] = description
group = self.get_object('CreateDBSecurityGroup', params,
DBSecurityGroup)
group.name = name
group.description = description
return group | 0.003413 |
def to_bytes(self):
'''
Create bytes from properties
'''
# Verify that properties make sense
self.sanitize()
# Start with the type
bitstream = BitArray('uint:4=%d' % self.message_type)
# Add the flags
bitstream += BitArray('bool=%d' % self.proxy_map_reply)
# Add reserved bits
bitstream += self._reserved1
# Decide on the has_xtr_site_id value
has_xtr_site_id = bool(self.xtr_id or self.site_id or self.for_rtr)
bitstream += BitArray('bool=%d, bool=%d' % (has_xtr_site_id,
self.for_rtr))
# Add reserved bits
bitstream += self._reserved2
# Add the rest of the flags
bitstream += BitArray('bool=%d' % self.want_map_notify)
# Add record count
bitstream += BitArray('uint:8=%d' % len(self.records))
# Add the nonce
bitstream += BitArray(bytes=self.nonce)
# Add the key-id and authentication data
bitstream += BitArray('uint:16=%d, uint:16=%d, hex=%s'
% (self.key_id,
len(self.authentication_data),
self.authentication_data.encode('hex')))
# Add the map-reply records
for record in self.records:
bitstream += record.to_bitstream()
# Add xTR-ID and site-ID if we said we would
if has_xtr_site_id:
bitstream += BitArray('uint:128=%d, uint:64=%d' % (self.xtr_id,
self.site_id))
return bitstream.bytes | 0.001197 |
def build_config(config : Dict[str, Any]) -> Dict[str, str]:
"""Will build the actual config for Jinja2, based on SDK config.
"""
result = config.copy()
# Manage the classifier stable/beta
is_stable = result.pop("is_stable", False)
if is_stable:
result["classifier"] = "Development Status :: 5 - Production/Stable"
else:
result["classifier"] = "Development Status :: 4 - Beta"
# Manage the nspkg
package_name = result["package_name"]
result["package_nspkg"] = result.pop(
"package_nspkg",
package_name[:package_name.rindex('-')]+"-nspkg"
)
# ARM?
result['is_arm'] = result.pop("is_arm", True)
# Do I need msrestazure for this package?
result['need_msrestazure'] = result.pop("need_msrestazure", True)
# Pre-compute some Jinja variable that are complicated to do inside the templates
package_parts = result["package_nspkg"][:-len('-nspkg')].split('-')
result['nspkg_names'] = [
".".join(package_parts[:i+1])
for i in range(len(package_parts))
]
result['init_names'] = [
"/".join(package_parts[:i+1])+"/__init__.py"
for i in range(len(package_parts))
]
# Return result
return result | 0.002421 |
def diags2(symmat):
"""
Diagonalize a symmetric 2x2 matrix.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/diags2_c.html
:param symmat: A symmetric 2x2 matrix.
:type symmat: 2x2-Element Array of floats
:return:
A diagonal matrix similar to symmat,
A rotation used as the similarity transformation.
:rtype: tuple
"""
symmat = stypes.toDoubleMatrix(symmat)
diag = stypes.emptyDoubleMatrix(x=2, y=2)
rotateout = stypes.emptyDoubleMatrix(x=2, y=2)
libspice.diags2_c(symmat, diag, rotateout)
return stypes.cMatrixToNumpy(diag), stypes.cMatrixToNumpy(rotateout) | 0.001558 |
def get(self, tag=None, **kwargs):
'''
Recursively searches children for tags of a certain
type with matching attributes.
'''
# Stupid workaround since we can not use dom_tag in the method declaration
if tag is None: tag = dom_tag
attrs = [(dom_tag.clean_attribute(attr), value)
for attr, value in kwargs.items()]
results = []
for child in self.children:
if (isinstance(tag, basestring) and type(child).__name__ == tag) or \
(not isinstance(tag, basestring) and isinstance(child, tag)):
if all(child.attributes.get(attribute) == value
for attribute, value in attrs):
# If the child is of correct type and has all attributes and values
# in kwargs add as a result
results.append(child)
if isinstance(child, dom_tag):
# If the child is a dom_tag extend the search down through its children
results.extend(child.get(tag, **kwargs))
return results | 0.010204 |
def show(*args, **kwargs):
r"""Show created figures, alias to ``plt.show()``.
By default, showing plots does not block the prompt.
Calling this function will block execution.
"""
_, plt, _ = _import_plt()
plt.show(*args, **kwargs) | 0.003922 |
def get_float(self,
key,
is_list=False,
is_optional=False,
is_secret=False,
is_local=False,
default=None,
options=None):
"""
Get a the value corresponding to the key and converts it to `float`/`list(float)`.
Args:
key: the dict key.
is_list: If this is one element or a list of elements.
is_optional: To raise an error if key was not found.
is_secret: If the key is a secret.
is_local: If the key is a local to this service.
default: default value if is_optional is True.
options: list/tuple if provided, the value must be one of these values.
Returns:
`float`: value corresponding to the key.
"""
if is_list:
return self._get_typed_list_value(key=key,
target_type=float,
type_convert=float,
is_optional=is_optional,
is_secret=is_secret,
is_local=is_local,
default=default,
options=options)
return self._get_typed_value(key=key,
target_type=float,
type_convert=float,
is_optional=is_optional,
is_secret=is_secret,
is_local=is_local,
default=default,
options=options) | 0.005975 |
def get_files(dir_name):
"""Simple directory walker"""
return [(os.path.join('.', d), [os.path.join(d, f) for f in files]) for d, _, files in os.walk(dir_name)] | 0.011765 |
def clear_pictures(self):
"""Delete all pictures from the file."""
blocks = [b for b in self.metadata_blocks if b.code != Picture.code]
self.metadata_blocks = blocks | 0.010526 |
def _utf8_encode(self, d):
"""
Ensures all values are encoded in UTF-8 and converts them to lowercase
"""
for k, v in d.items():
if isinstance(v, str):
d[k] = v.encode('utf8').lower()
if isinstance(v, list):
for index,item in enumerate(v):
item = item.encode('utf8').lower()
v[index] = item
if isinstance(v, dict):
d[k] = self._utf8_encode(v)
return d | 0.009381 |
async def run_action(self, action_name, **params):
"""Run an action on this unit.
:param str action_name: Name of action to run
:param **params: Action parameters
:returns: A :class:`juju.action.Action` instance.
Note that this only enqueues the action. You will need to call
``action.wait()`` on the resulting `Action` instance if you wish
to block until the action is complete.
"""
action_facade = client.ActionFacade.from_connection(self.connection)
log.debug('Starting action `%s` on %s', action_name, self.name)
res = await action_facade.Enqueue([client.Action(
name=action_name,
parameters=params,
receiver=self.tag,
)])
action = res.results[0].action
error = res.results[0].error
if error and error.code == 'not found':
raise ValueError('Action `%s` not found on %s' % (action_name,
self.name))
elif error:
raise Exception('Unknown action error: %s' % error.serialize())
action_id = action.tag[len('action-'):]
log.debug('Action started as %s', action_id)
# we mustn't use wait_for_action because that blocks until the
# action is complete, rather than just being in the model
return await self.model._wait_for_new('action', action_id) | 0.001391 |
def register_with_google(full_name, email, oauth2_token,
lang=None, timezone=None):
"""Register a new Todoist account by linking a Google account.
:param full_name: The user's full name.
:type full_name: str
:param email: The user's email address.
:type email: str
:param oauth2_token: The oauth2 token associated with the email.
:type oauth2_token: str
:param lang: The user's language.
:type lang: str
:param timezone: The user's timezone.
:type timezone: str
:return: The Todoist user.
:rtype: :class:`pytodoist.todoist.User`
.. note:: It is up to you to obtain the valid oauth2 token.
>>> from pytodoist import todoist
>>> oauth2_token = 'oauth2_token'
>>> user = todoist.register_with_google('John Doe', '[email protected]',
... oauth2_token)
>>> print(user.full_name)
John Doe
"""
response = API.login_with_google(email, oauth2_token, auto_signup=1,
full_name=full_name, lang=lang,
timezone=timezone)
_fail_if_contains_errors(response)
user_json = response.json()
user = User(user_json)
return user | 0.000801 |
def parse_observation_response(json):
"""Decode AQICN observation response JSON into python object."""
logging.debug(json)
iaqi = json['iaqi']
result = {
'idx': json['idx'],
'city': json.get('city', ''),
'aqi': json['aqi'],
'dominentpol': json.get("dominentpol", ''),
'time': json['time']['s'],
'iaqi': [{'p': item, 'v': iaqi[item]['v']} for item in iaqi]
}
return result | 0.002247 |
def set_best_hit_values_for_proteins(self, OrganismDB):
'''
Iterate through all proteins in the DB,
drop duplicates in the hit_dataframe, then store the maximum
hit information as protein attributes.
'''
for org in OrganismDB.organisms:
print 'setting best hit values for', org.name
for prot in org.proteins:
if len(prot.hmm_hit_list) > 0:
try:
dd_df = prot.hit_dataframe.drop_duplicates(subset='bitscore')
try:
prot.hit_name_best = dd_df.bitscore.idxmax()
prot.hit_evalue_best = dd_df.ix[prot.hit_name_best].evalue
prot.hit_bitscore_best = dd_df.ix[prot.hit_name_best].bitscore
prot.hit_bias_best = dd_df.ix[prot.hit_name_best].bias
prot.hit_start_best = dd_df.ix[prot.hit_name_best].hsp_start
prot.hit_end_best = dd_df.ix[prot.hit_name_best].hsp_end
except:
print 'could not set best hit values for ', org.name
except AttributeError:
pass | 0.007918 |
def CALLDATALOAD(self, offset):
"""Get input data of current environment"""
if issymbolic(offset):
if solver.can_be_true(self._constraints, offset == self._used_calldata_size):
self.constraints.add(offset == self._used_calldata_size)
raise ConcretizeArgument(1, policy='SAMPLED')
self._use_calldata(offset, 32)
data_length = len(self.data)
bytes = []
for i in range(32):
try:
c = Operators.ITEBV(8, offset + i < data_length, self.data[offset + i], 0)
except IndexError:
# offset + i is concrete and outside data
c = 0
bytes.append(c)
return Operators.CONCAT(256, *bytes) | 0.005298 |
def cmd_arp_ping(ip, iface, verbose):
"""
Send ARP packets to check if a host it's alive in the local network.
Example:
\b
# habu.arp.ping 192.168.0.1
Ether / ARP is at a4:08:f5:19:17:a4 says 192.168.0.1 / Padding
"""
if verbose:
logging.basicConfig(level=logging.INFO, format='%(message)s')
conf.verb = False
if iface:
conf.iface = iface
res, unans = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=ip), timeout=2)
for _, pkt in res:
if verbose:
print(pkt.show())
else:
print(pkt.summary()) | 0.001672 |
def merge_particle_emission(SS):
"""Returns a sim object summing the emissions and particles in SS (list).
"""
# Merge all the particles
P = reduce(lambda x, y: x + y, [Si.particles for Si in SS])
s = SS[0]
S = ParticlesSimulation(t_step=s.t_step, t_max=s.t_max,
particles=P, box=s.box, psf=s.psf)
S.em = np.zeros(s.em.shape, dtype=np.float64)
for Si in SS:
S.em += Si.em
return S | 0.002212 |
def df2sd(self, df: 'pd.DataFrame', table: str = '_df', libref: str = '',
results: str = '', keep_outer_quotes: bool = False) -> 'SASdata':
"""
This is an alias for 'dataframe2sasdata'. Why type all that?
:param df: :class:`pandas.DataFrame` Pandas Data Frame to import to a SAS Data Set
:param table: the name of the SAS Data Set to create
:param libref: the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
:param results: format of results, SASsession.results is default, PANDAS, HTML or TEXT are the alternatives
:param keep_outer_quotes: the defualt is for SAS to strip outer quotes from delimitted data. This lets you keep them
:return: SASdata object
"""
return self.dataframe2sasdata(df, table, libref, results, keep_outer_quotes) | 0.009249 |
def add_adjust(self, data, prehashed=False):
"""Add a new leaf, and adjust the tree, without rebuilding the whole thing.
"""
subtrees = self._get_whole_subtrees()
new_node = Node(data, prehashed=prehashed)
self.leaves.append(new_node)
for node in reversed(subtrees):
new_parent = Node(node.val + new_node.val)
node.p, new_node.p = new_parent, new_parent
new_parent.l, new_parent.r = node, new_node
node.sib, new_node.sib = new_node, node
node.side, new_node.side = 'L', 'R'
new_node = new_node.p
self.root = new_node | 0.004658 |
def _find_relation_factory(module):
"""
Attempt to find a RelationFactory subclass in the module.
Note: RelationFactory and RelationBase are ignored so they may
be imported to be used as base classes without fear.
"""
if not module:
return None
# All the RelationFactory subclasses
candidates = [o for o in (getattr(module, attr) for attr in dir(module))
if (o is not RelationFactory and
o is not RelationBase and
isclass(o) and
issubclass(o, RelationFactory))]
# Filter out any factories that are superclasses of another factory
# (none of the other factories subclass it). This usually makes
# the explict check for RelationBase and RelationFactory unnecessary.
candidates = [c1 for c1 in candidates
if not any(issubclass(c2, c1) for c2 in candidates
if c1 is not c2)]
if not candidates:
hookenv.log('No RelationFactory found in {}'.format(module.__name__),
hookenv.WARNING)
return None
if len(candidates) > 1:
raise RuntimeError('Too many RelationFactory found in {}'
''.format(module.__name__))
return candidates[0] | 0.000772 |
def write_backreferences(seen_backrefs, gallery_conf,
target_dir, fname, snippet):
"""Writes down back reference files, which include a thumbnail list
of examples using a certain module"""
if gallery_conf['backreferences_dir'] is None:
return
example_file = os.path.join(target_dir, fname)
backrefs = scan_used_functions(example_file, gallery_conf)
for backref in backrefs:
include_path = os.path.join(gallery_conf['src_dir'],
gallery_conf['backreferences_dir'],
'%s.examples.new' % backref)
seen = backref in seen_backrefs
with codecs.open(include_path, 'a' if seen else 'w',
encoding='utf-8') as ex_file:
if not seen:
heading = '\n\nExamples using ``%s``' % backref
ex_file.write(heading + '\n')
ex_file.write('^' * len(heading) + '\n')
ex_file.write(_thumbnail_div(target_dir, gallery_conf['src_dir'],
fname, snippet, is_backref=True))
seen_backrefs.add(backref) | 0.000855 |
def create(cls, statement_format, date_start, date_end,
monetary_account_id=None, regional_format=None,
custom_headers=None):
"""
:type user_id: int
:type monetary_account_id: int
:param statement_format: The format type of statement. Allowed values:
MT940, CSV, PDF.
:type statement_format: str
:param date_start: The start date for making statements.
:type date_start: str
:param date_end: The end date for making statements.
:type date_end: str
:param regional_format: Required for CSV exports. The regional format of
the statement, can be UK_US (comma-separated) or EUROPEAN
(semicolon-separated).
:type regional_format: str
:type custom_headers: dict[str, str]|None
:rtype: BunqResponseInt
"""
if custom_headers is None:
custom_headers = {}
request_map = {
cls.FIELD_STATEMENT_FORMAT: statement_format,
cls.FIELD_DATE_START: date_start,
cls.FIELD_DATE_END: date_end,
cls.FIELD_REGIONAL_FORMAT: regional_format
}
request_map_string = converter.class_to_json(request_map)
request_map_string = cls._remove_field_for_request(request_map_string)
api_client = client.ApiClient(cls._get_api_context())
request_bytes = request_map_string.encode()
endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id(),
cls._determine_monetary_account_id(
monetary_account_id))
response_raw = api_client.post(endpoint_url, request_bytes,
custom_headers)
return BunqResponseInt.cast_from_bunq_response(
cls._process_for_id(response_raw)
) | 0.004171 |
def _concat_rangeindex_same_dtype(indexes):
"""
Concatenates multiple RangeIndex instances. All members of "indexes" must
be of type RangeIndex; result will be RangeIndex if possible, Int64Index
otherwise. E.g.:
indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5])
"""
from pandas import Int64Index, RangeIndex
start = step = next = None
# Filter the empty indexes
non_empty_indexes = [obj for obj in indexes if len(obj)]
for obj in non_empty_indexes:
if start is None:
# This is set by the first non-empty index
start = obj._start
if step is None and len(obj) > 1:
step = obj._step
elif step is None:
# First non-empty index had only one element
if obj._start == start:
return _concat_index_same_dtype(indexes, klass=Int64Index)
step = obj._start - start
non_consecutive = ((step != obj._step and len(obj) > 1) or
(next is not None and obj._start != next))
if non_consecutive:
return _concat_index_same_dtype(indexes, klass=Int64Index)
if step is not None:
next = obj[-1] + step
if non_empty_indexes:
# Get the stop value from "next" or alternatively
# from the last non-empty index
stop = non_empty_indexes[-1]._stop if next is None else next
return RangeIndex(start, stop, step)
# Here all "indexes" had 0 length, i.e. were empty.
# In this case return an empty range index.
return RangeIndex(0, 0) | 0.000596 |
def review_metadata_csv(filedir, input_filepath):
"""
Check validity of metadata fields.
:param filedir: This field is the filepath of the directory whose csv
has to be made.
:param outputfilepath: This field is the file path of the output csv.
:param max_bytes: This field is the maximum file size to consider. Its
default value is 128m.
"""
try:
metadata = load_metadata_csv(input_filepath)
except ValueError as e:
print_error(e)
return False
with open(input_filepath) as f:
csv_in = csv.reader(f)
header = next(csv_in)
n_headers = len(header)
if header[0] == 'filename':
res = review_metadata_csv_single_user(filedir, metadata,
csv_in, n_headers)
return res
if header[0] == 'project_member_id':
res = review_metadata_csv_multi_user(filedir, metadata,
csv_in, n_headers)
return res | 0.000954 |
def _build_cm(self, cm):
"""Convert the passed CM to the proper format, or construct the
unitary CM if none was provided.
"""
if cm is None:
# Assume all are connected.
cm = np.ones((self.size, self.size))
else:
cm = np.array(cm)
utils.np_immutable(cm)
return (cm, utils.np_hash(cm)) | 0.005305 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.