text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def exception_format():
"""
Convert exception info into a string suitable for display.
"""
return "".join(traceback.format_exception(
sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]
)) | 0.004545 |
def serial_number(self, serial_number):
"""
Sets the serial_number of this DeviceDataPostRequest.
The serial number of the device.
:param serial_number: The serial_number of this DeviceDataPostRequest.
:type: str
"""
if serial_number is not None and len(serial_number) > 64:
raise ValueError("Invalid value for `serial_number`, length must be less than or equal to `64`")
self._serial_number = serial_number | 0.006186 |
def create_from_textgrid(self,word_list):
""" Fills the ParsedResponse object with a list of TextGrid.Word objects originally from a .TextGrid file.
:param list word_list: List of TextGrid.Word objects corresponding to words/tokens in the subject response.
Modifies:
- self.timing_included: TextGrid files include timing information
- self.unit_list: fills it with Unit objects derived from the word_list argument.
If the type is 'SEMANTIC', the words in these units are automatically lemmatized and
made into compound words where appropriate.
"""
self.timing_included = True
for i, entry in enumerate(word_list):
self.unit_list.append(Unit(entry, format="TextGrid",
type=self.type,
index_in_timed_response=i))
# combine compound words, remove pluralizations, etc
if self.type == "SEMANTIC":
self.lemmatize()
self.tokenize() | 0.006598 |
def list_attached_team(context, id, sort, limit, where, verbose):
"""list_attached_team(context, id, sort, limit. where. verbose)
List teams attached to a topic.
>>> dcictl topic-list-team
:param string id: ID of the topic to list teams for [required]
:param string sort: Field to apply sort
:param integer limit: Max number of rows to return
:param string where: An optional filter criteria
:param boolean verbose: Display verbose output
"""
result = topic.list_teams(context, id=id, sort=sort, limit=limit,
where=where)
utils.format_output(result, context.format, verbose=verbose) | 0.001517 |
def offer_answer(pool, answer, rationale, student_id, algo, options):
"""
submit a student answer to the answer pool
The answer maybe selected to stay in the pool depending on the selection algorithm
Args:
pool (dict): answer pool
Answer pool format:
{
option1_index: {
'student_id': { can store algorithm specific info here },
...
}
option2_index: ...
}
answer (int): the option student selected
rationale (str): the rationale text
student_id (str): student identifier
algo (str): the selection algorithm
options (dict): the options available in the question
Raises:
UnknownChooseAnswerAlgorithm: when we don't know the algorithm
"""
if algo['name'] == 'simple':
offer_simple(pool, answer, rationale, student_id, options)
elif algo['name'] == 'random':
offer_random(pool, answer, rationale, student_id, options)
else:
raise UnknownChooseAnswerAlgorithm() | 0.001821 |
def find_descriptor_schemas(self, schema_file):
"""Find descriptor schemas in given path."""
if not schema_file.lower().endswith(('.yml', '.yaml')):
return []
with open(schema_file) as fn:
schemas = yaml.load(fn, Loader=yaml.FullLoader)
if not schemas:
self.stderr.write("Could not read YAML file {}".format(schema_file))
return []
descriptor_schemas = []
for schema in schemas:
if 'schema' not in schema:
continue
descriptor_schemas.append(schema)
return descriptor_schemas | 0.004823 |
def delete(self, group_id, nick=None):
'''xxxxx.xxxxx.adgroup.delete
===================================
删除一个推广组'''
request = TOPRequest('xxxxx.xxxxx.adgroup.delete')
request['group_id'] = group_id
if nick!=None: request['nick'] = nick
self.create(self.execute(request), fields=['success','result','success','result_code','result_message'], models={'result':ADGroup})
return self.result | 0.024444 |
def _is_ignorable_404(uri):
"""
Returns True if the given request *shouldn't* notify the site managers.
"""
urls = getattr(django_settings, "IGNORABLE_404_URLS", ())
return any(pattern.search(uri) for pattern in urls) | 0.004219 |
def _flow(self, args):
"""Enable/disable flow from peer
This method asks the peer to pause or restart the flow of
content data. This is a simple flow-control mechanism that a
peer can use to avoid oveflowing its queues or otherwise
finding itself receiving more messages than it can process.
Note that this method is not intended for window control. The
peer that receives a request to stop sending content should
finish sending the current content, if any, and then wait
until it receives a Flow restart method.
RULE:
When a new channel is opened, it is active. Some
applications assume that channels are inactive until
started. To emulate this behaviour a client MAY open the
channel, then pause it.
RULE:
When sending content data in multiple frames, a peer
SHOULD monitor the channel for incoming methods and
respond to a Channel.Flow as rapidly as possible.
RULE:
A peer MAY use the Channel.Flow method to throttle
incoming content data for internal reasons, for example,
when exchangeing data over a slower connection.
RULE:
The peer that requests a Channel.Flow method MAY
disconnect and/or ban a peer that does not respect the
request.
PARAMETERS:
active: boolean
start/stop content frames
If True, the peer starts sending content frames. If
False, the peer stops sending content frames.
"""
self.active = args.read_bit()
self._x_flow_ok(self.active) | 0.001158 |
def _get_error_page_callback(self):
"""Return an error page for the current response status."""
if self.response.status in self._error_handlers:
return self._error_handlers[self.response.status]
elif None in self._error_handlers:
return self._error_handlers[None]
else:
# Rudimentary error handler if no error handler was found
self.response.media_type = 'text/plain'
return lambda: self.response.status_line | 0.004 |
def to_sparse(self, format='csr', **kwargs):
"""Convert into a sparse matrix.
Parameters
----------
format : {'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}
Sparse matrix format.
kwargs : keyword arguments
Passed through to sparse matrix constructor.
Returns
-------
m : scipy.sparse.spmatrix
Sparse matrix
Notes
-----
If a mask has been set, it is ignored by this function.
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0]],
... [[0, 1], [0, 1]],
... [[1, 1], [0, 0]],
... [[0, 0], [-1, -1]]], dtype='i1')
>>> m = g.to_sparse(format='csr')
>>> m
<4x4 sparse matrix of type '<class 'numpy.int8'>'
with 6 stored elements in Compressed Sparse Row format>
>>> m.data
array([ 1, 1, 1, 1, -1, -1], dtype=int8)
>>> m.indices
array([1, 3, 0, 1, 2, 3], dtype=int32)
>>> m.indptr
array([0, 0, 2, 4, 6], dtype=int32)
"""
h = self.to_haplotypes()
m = h.to_sparse(format=format, **kwargs)
return m | 0.001547 |
def get_collection(collection):
"""Return the appropriate *Response* for retrieving a collection of
resources.
:param string collection: a :class:`sandman.model.Model` endpoint
:param string key: the primary key for the :class:`sandman.model.Model`
:rtype: :class:`flask.Response`
"""
cls = endpoint_class(collection)
resources = retrieve_collection(collection, request.args)
_validate(cls, request.method, resources)
start = stop = None
if request.args and 'page' in request.args:
page = int(request.args['page'])
results_per_page = app.config.get('RESULTS_PER_PAGE', 20)
start, stop = page * results_per_page, (page + 1) * results_per_page
return collection_response(cls, resources, start, stop) | 0.00129 |
def get_file_contents_text(
filename: str = None, blob: bytes = None,
config: TextProcessingConfig = _DEFAULT_CONFIG) -> str:
"""
Returns the string contents of a file, or of a BLOB.
"""
binary_contents = get_file_contents(filename=filename, blob=blob)
# 1. Try the encoding the user specified
if config.encoding:
try:
return binary_contents.decode(config.encoding)
except ValueError: # of which UnicodeDecodeError is more specific
# ... https://docs.python.org/3/library/codecs.html
pass
# 2. Try the system encoding
sysdef = sys.getdefaultencoding()
if sysdef != config.encoding:
try:
return binary_contents.decode(sysdef)
except ValueError:
pass
# 3. Try the best guess from chardet
# http://chardet.readthedocs.io/en/latest/usage.html
if chardet:
guess = chardet.detect(binary_contents)
if guess['encoding']:
return binary_contents.decode(guess['encoding'])
raise ValueError("Unknown encoding ({})".format(
"filename={}".format(repr(filename)) if filename else "blob")) | 0.000853 |
def _get_video(edx_video_id):
"""
Get a Video instance, prefetching encoded video and course information.
Raises ValVideoNotFoundError if the video cannot be retrieved.
"""
try:
return Video.objects.prefetch_related("encoded_videos", "courses").get(edx_video_id=edx_video_id)
except Video.DoesNotExist:
error_message = u"Video not found for edx_video_id: {0}".format(edx_video_id)
raise ValVideoNotFoundError(error_message)
except Exception:
error_message = u"Could not get edx_video_id: {0}".format(edx_video_id)
logger.exception(error_message)
raise ValInternalError(error_message) | 0.004545 |
def _create_solr_query(self, line):
"""Actual search - easier to test. """
p0 = ""
if line:
p0 = line.strip()
p1 = self._query_string_to_solr_filter(line)
p2 = self._object_format_to_solr_filter(line)
p3 = self._time_span_to_solr_filter()
result = p0 + p1 + p2 + p3
return result.strip() | 0.00551 |
def addValue(self, type_uri, value):
"""Add a single value for the given attribute type to the
message. If there are already values specified for this type,
this value will be sent in addition to the values already
specified.
@param type_uri: The URI for the attribute
@param value: The value to add to the response to the relying
party for this attribute
@type value: unicode
@returns: None
"""
try:
values = self.data[type_uri]
except KeyError:
values = self.data[type_uri] = []
values.append(value) | 0.00314 |
def trim(self, lower=None, upper=None):
"""Trim upper values in accordance with :math:`RelWB \\leq RelWZ`.
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> nhru(3)
>>> lnk(ACKER)
>>> relwz.values = 0.5
>>> relwb(0.2, 0.5, 0.8)
>>> relwb
relwb(0.2, 0.5, 0.5)
"""
if upper is None:
upper = getattr(self.subpars.relwz, 'value', None)
lland_parameters.ParameterSoil.trim(self, lower, upper) | 0.003914 |
async def set_heating_level(self, level, duration=0):
"""Update heating data json."""
url = '{}/devices/{}'.format(API_URL, self.device.deviceid)
# Catch bad inputs
level = 10 if level < 10 else level
level = 100 if level > 100 else level
if self.side == 'left':
data = {
'leftHeatingDuration': duration,
'leftTargetHeatingLevel': level
}
elif self.side == 'right':
data = {
'rightHeatingDuration': duration,
'rightTargetHeatingLevel': level
}
set_heat = await self.device.api_put(url, data)
if set_heat is None:
_LOGGER.error('Unable to set eight heating level.')
else:
# Standard device json is returned after setting
self.device.handle_device_json(set_heat['device']) | 0.00222 |
def rename_pickled_ontology(filename, newname):
""" try to rename a cached ontology """
pickledfile = ONTOSPY_LOCAL_CACHE + "/" + filename + ".pickle"
newpickledfile = ONTOSPY_LOCAL_CACHE + "/" + newname + ".pickle"
if os.path.isfile(pickledfile) and not GLOBAL_DISABLE_CACHE:
os.rename(pickledfile, newpickledfile)
return True
else:
return None | 0.025281 |
def sendWebmention(sourceURL, targetURL, webmention=None, test_urls=True, vouchDomain=None,
headers={}, timeout=None, debug=False):
"""Send to the :targetURL: a WebMention for the :sourceURL:
The WebMention will be discovered if not given in the :webmention:
parameter.
:param sourceURL: URL that is referencing :targetURL:
:param targetURL: URL of mentioned post
:param webmention: optional WebMention endpoint
:param test_urls: optional flag to test URLs for validation
:param headers: optional headers to send with any web requests
:type headers dict
:param timeout: optional timeout for web requests
:type timeout float
:rtype: HTTPrequest object if WebMention endpoint was valid
"""
if test_urls:
v = URLValidator()
v(sourceURL)
v(targetURL)
debugOutput = []
originalURL = targetURL
try:
targetRequest = requests.get(targetURL)
if targetRequest.status_code == requests.codes.ok:
if len(targetRequest.history) > 0:
redirect = targetRequest.history[-1]
if (redirect.status_code == 301 or redirect.status_code == 302) and 'Location' in redirect.headers:
targetURL = urljoin(targetURL, redirect.headers['Location'])
debugOutput.append('targetURL redirected: %s' % targetURL)
if webmention is None:
wStatus, wUrl = discoverEndpoint(targetURL, headers=headers, timeout=timeout, request=targetRequest)
else:
wStatus = 200
wUrl = webmention
debugOutput.append('endpointURL: %s %s' % (wStatus, wUrl))
if wStatus == requests.codes.ok and wUrl is not None:
if test_urls:
v(wUrl)
payload = {'source': sourceURL,
'target': originalURL}
if vouchDomain is not None:
payload['vouch'] = vouchDomain
try:
result = requests.post(wUrl, data=payload, headers=headers, timeout=timeout)
debugOutput.append('POST %s -- %s' % (wUrl, result.status_code))
if result.status_code == 405 and len(result.history) > 0:
redirect = result.history[-1]
if redirect.status_code == 301 and 'Location' in redirect.headers:
result = requests.post(redirect.headers['Location'], data=payload, headers=headers, timeout=timeout)
debugOutput.append('redirected POST %s -- %s' % (redirect.headers['Location'], result.status_code))
except Exception as e:
result = None
except (requests.exceptions.RequestException, requests.exceptions.ConnectionError,
requests.exceptions.HTTPError, requests.exceptions.URLRequired,
requests.exceptions.TooManyRedirects, requests.exceptions.Timeout):
debugOutput.append('exception during GET request')
result = None
return result | 0.003644 |
def with_transactional_resource(
transactional_resource_class,
resource_name,
reference_value_from=None
):
"""a class decorator for Crontabber Apps. This decorator will give access
to a resource connection source. Configuration will be automatically set
up and the cron app can expect to have attributes:
self.{resource_name}_connection_factory
self.{resource_name}_transaction_executor
available to use.
Within the setup, the RequiredConfig structure gets set up like this:
config.{resource_name}.{resource_name}_class = \
transactional_resource_class
config.{resource_name}.{resource_name}_transaction_executor_class = \
'crontabber.transaction_executor.TransactionExecutor'
parameters:
transactional_resource_class - a string representing the full path of
the class that represents a connection to the resource. An example
is "crontabber.connection_factory.ConnectionFactory".
resource_name - a string that will serve as an identifier for this
resource within the mixin. For example, if the resource is
'database' we'll see configman namespace in the cron job section
of "...class-SomeCronJob.database.database_connection_class" and
"...class-SomeCronJob.database.transaction_executor_class"
"""
def class_decorator(cls):
if not issubclass(cls, RequiredConfig):
raise Exception(
'%s must have RequiredConfig as a base class' % cls
)
new_req = cls.get_required_config()
new_req.namespace(resource_name)
new_req[resource_name].add_option(
'%s_class' % resource_name,
default=transactional_resource_class,
from_string_converter=class_converter,
reference_value_from=reference_value_from,
)
new_req[resource_name].add_option(
'%s_transaction_executor_class' % resource_name,
default='crontabber.transaction_executor.TransactionExecutor',
doc='a class that will execute transactions',
from_string_converter=class_converter,
reference_value_from=reference_value_from
)
cls.required_config = new_req
#------------------------------------------------------------------
def new__init__(self, *args, **kwargs):
# instantiate the connection class for the resource
super(cls, self).__init__(*args, **kwargs)
setattr(
self,
"%s_connection_factory" % resource_name,
self.config[resource_name]['%s_class' % resource_name](
self.config[resource_name]
)
)
# instantiate a transaction executor bound to the
# resource connection
setattr(
self,
"%s_transaction_executor" % resource_name,
self.config[resource_name][
'%s_transaction_executor_class' % resource_name
](
self.config[resource_name],
getattr(self, "%s_connection_factory" % resource_name)
)
)
if hasattr(cls, '__init__'):
original_init = cls.__init__
def both_inits(self, *args, **kwargs):
new__init__(self, *args, **kwargs)
return original_init(self, *args, **kwargs)
cls.__init__ = both_inits
else:
cls.__init__ = new__init__
return cls
return class_decorator | 0.000547 |
def GRUCell(units):
"""Builds a traditional GRU cell with dense internal transformations.
Gated Recurrent Unit paper: https://arxiv.org/abs/1412.3555
Args:
units: Number of hidden units.
Returns:
A Stax model representing a traditional GRU RNN cell.
"""
return GeneralGRUCell(
candidate_transform=lambda: core.Dense(units=units),
memory_transform=combinators.Identity,
gate_nonlinearity=core.Sigmoid,
candidate_nonlinearity=core.Tanh) | 0.006224 |
def add_transition_to_state(from_port, to_port):
"""Interface method between Gaphas and RAFCON core for adding transitions
The method checks the types of the given ports (IncomeView or OutcomeView) and from this determines the necessary
parameters for the add_transition method of the RAFCON core. Also the parent state is derived from the ports.
:param from_port: Port from which the transition starts
:param to_port: Port to which the transition goes to
:return: True if a transition was added, False if an error occurred
"""
from rafcon.gui.mygaphas.items.ports import IncomeView, OutcomeView
from_state_v = from_port.parent
to_state_v = to_port.parent
from_state_m = from_state_v.model
to_state_m = to_state_v.model
# Gather necessary information to create transition
from_state_id = from_state_m.state.state_id
to_state_id = to_state_m.state.state_id
responsible_parent_m = None
# Start transition
if isinstance(from_port, IncomeView):
from_state_id = None
from_outcome_id = None
responsible_parent_m = from_state_m
# Transition from parent income to child income
if isinstance(to_port, IncomeView):
to_outcome_id = None
# Transition from parent income to parent outcome
elif isinstance(to_port, OutcomeView):
to_outcome_id = to_port.outcome_id
elif isinstance(from_port, OutcomeView):
from_outcome_id = from_port.outcome_id
# Transition from child outcome to child income
if isinstance(to_port, IncomeView):
responsible_parent_m = from_state_m.parent
to_outcome_id = None
# Transition from child outcome to parent outcome
elif isinstance(to_port, OutcomeView):
responsible_parent_m = to_state_m
to_outcome_id = to_port.outcome_id
else:
raise ValueError("Invalid port type")
from rafcon.gui.models.container_state import ContainerStateModel
if not responsible_parent_m:
logger.error("Transitions only exist between incomes and outcomes. Given: {0} and {1}".format(type(
from_port), type(to_port)))
return False
elif not isinstance(responsible_parent_m, ContainerStateModel):
logger.error("Transitions only exist in container states (e.g. hierarchy states)")
return False
try:
t_id = responsible_parent_m.state.add_transition(from_state_id, from_outcome_id, to_state_id, to_outcome_id)
if from_state_id == to_state_id:
gui_helper_meta_data.insert_self_transition_meta_data(responsible_parent_m.states[from_state_id], t_id,
combined_action=True)
return True
except (ValueError, AttributeError, TypeError) as e:
logger.error("Transition couldn't be added: {0}".format(e))
return False | 0.002728 |
def dump_by_server(self, hosts):
"""Returns the output of dump for each server.
:param hosts: comma separated lists of members of the ZK ensemble.
:returns: A dictionary of ((server_ip, port), ClientInfo).
"""
dump_by_endpoint = {}
for endpoint in self._to_endpoints(hosts):
try:
out = self.cmd([endpoint], "dump")
except self.CmdFailed as ex:
out = ""
dump_by_endpoint[endpoint] = out
return dump_by_endpoint | 0.003724 |
def modifiedaminoacids(df):
"""
Calculate the number of modified amino acids in supplied ``DataFrame``.
Returns the total of all modifications and the total for each amino acid individually, as an ``int`` and a
``dict`` of ``int``, keyed by amino acid, respectively.
:param df: Pandas ``DataFrame`` containing processed data.
:return: total_aas ``int`` the total number of all modified amino acids
quants ``dict`` of ``int`` keyed by amino acid, giving individual counts for each aa.
"""
amino_acids = list(df['Amino acid'].values)
aas = set(amino_acids)
quants = {}
for aa in aas:
quants[aa] = amino_acids.count(aa)
total_aas = len(amino_acids)
return total_aas, quants | 0.005284 |
def _interpret_response(self, response, payload, expected_status):
# type: (Response, dict, Container[int]) -> dict
"""
Interprets the HTTP response from the node.
:param response:
The response object received from
:py:meth:`_send_http_request`.
:param payload:
The request payload that was sent (used for debugging).
:param expected_status:
The response should match one of these status codes to be
considered valid.
"""
raw_content = response.text
if not raw_content:
raise with_context(
exc=BadApiResponse(
'Empty {status} response from node.'.format(
status=response.status_code,
),
),
context={
'request': payload,
},
)
try:
decoded = json.loads(raw_content) # type: dict
# :bc: py2k doesn't have JSONDecodeError
except ValueError:
raise with_context(
exc=BadApiResponse(
'Non-JSON {status} response from node: '
'{raw_content}'.format(
status=response.status_code,
raw_content=raw_content,
)
),
context={
'request': payload,
'raw_response': raw_content,
},
)
if not isinstance(decoded, dict):
raise with_context(
exc=BadApiResponse(
'Malformed {status} response from node: {decoded!r}'.format(
status=response.status_code,
decoded=decoded,
),
),
context={
'request': payload,
'response': decoded,
},
)
if response.status_code in expected_status:
return decoded
error = None
try:
if response.status_code == codes['bad_request']:
error = decoded['error']
elif response.status_code == codes['internal_server_error']:
error = decoded['exception']
except KeyError:
pass
raise with_context(
exc=BadApiResponse(
'{status} response from node: {error}'.format(
error=error or decoded,
status=response.status_code,
),
),
context={
'request': payload,
'response': decoded,
},
) | 0.001447 |
def _pack_target_set_default_reset_type(self):
"""! @brief Set's the first core's default reset type to the one specified in the pack."""
if 0 in self.cores:
self.cores[0].default_reset_type = self._pack_device.default_reset_type | 0.015564 |
def find_ui_tree_entity(entity_id=None, entity_value=None, entity_ca=None):
"""
find the Ariane UI tree menu entity depending on its id (priority), value or context address
:param entity_id: the Ariane UI tree menu ID to search
:param entity_value: the Ariane UI tree menu Value to search
:param entity_ca: the Ariane UI tree menu context address to search
:return:
"""
LOGGER.debug("InjectorUITreeService.find_ui_tree_entity")
operation = None
search_criteria = None
criteria_value = None
if entity_id is not None:
operation = 'GET_TREE_MENU_ENTITY_I'
search_criteria = 'id'
criteria_value = entity_id
if operation is None and entity_value is not None:
operation = 'GET_TREE_MENU_ENTITY_V'
search_criteria = 'value'
criteria_value = entity_value
if operation is None and entity_ca is not None:
operation = 'GET_TREE_MENU_ENTITY_C'
search_criteria = 'context address'
criteria_value = entity_ca
ret = None
if operation is not None:
args = {'properties': {'OPERATION': operation, 'TREE_MENU_ENTITY_ID': criteria_value}}
result = InjectorUITreeService.requester.call(args).get()
if result.rc == 0:
ret = InjectorUITreeEntity.json_2_injector_ui_tree_menu_entity(result.response_content)
elif result.rc != 404:
err_msg = 'InjectorUITreeService.find_ui_tree_entity - Problem while finding ' \
'injector UI Tree Menu Entity ('+search_criteria+':' + \
str(criteria_value) + '). ' + \
'Reason: ' + str(result.response_content) + '-' + str(result.error_message) + \
" (" + str(result.rc) + ")"
LOGGER.warning(err_msg)
return ret | 0.004069 |
def _growing_step_sequence(interval_growth, max_interval, init_interval, start_level=None):
"""
Returns an iterator that constructs a sequence of trigger levels with growing intervals.
The interval is growing exponentially until it reaches the maximum value. Then the interval
stays the same and the sequence becomes linear.
An optional starting level `start_level` defaults to the initial interval. The interval
starts out as `init_interval`, multiplied by `interval_growth` in each step until it
reaches the `max_interval`.
"""
interval = init_interval
next_level = start_level or init_interval
while True:
yield next_level
interval = min(interval * interval_growth, max_interval)
next_level += interval | 0.008434 |
def _set_apply_dscp_exp_map_name(self, v, load=False):
"""
Setter method for apply_dscp_exp_map_name, mapped from YANG variable /qos_mpls/map_apply/apply_dscp_exp_map_name (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_apply_dscp_exp_map_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_apply_dscp_exp_map_name() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=apply_dscp_exp_map_name.apply_dscp_exp_map_name, is_container='container', presence=False, yang_name="apply-dscp-exp-map-name", rest_name="dscp-exp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply dscp exp map', u'cli-sequence-commands': None, u'alt-name': u'dscp-exp', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """apply_dscp_exp_map_name must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=apply_dscp_exp_map_name.apply_dscp_exp_map_name, is_container='container', presence=False, yang_name="apply-dscp-exp-map-name", rest_name="dscp-exp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply dscp exp map', u'cli-sequence-commands': None, u'alt-name': u'dscp-exp', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='container', is_config=True)""",
})
self.__apply_dscp_exp_map_name = t
if hasattr(self, '_set'):
self._set() | 0.005558 |
def send_response(self, transaction):
"""
updates the cache with the response if there was a cache miss
:param transaction:
:return:
"""
if transaction.cacheHit is False:
"""
handling response based on the code
"""
logger.debug("handling response")
self._handle_response(transaction)
return transaction | 0.004773 |
def mutate(self,p_i,func_set,term_set): #, max_depth=2
"""point mutation, addition, removal"""
self.point_mutate(p_i,func_set,term_set) | 0.059603 |
def mainset(self):
"""Returns information regarding the set"""
if self.mainsetcache:
return self.mainsetcache
set_uri = self.get_set_uri()
for row in self.graph.query("SELECT ?seturi ?setid ?setlabel ?setopen ?setempty WHERE { ?seturi rdf:type skos:Collection . OPTIONAL { ?seturi skos:notation ?setid } OPTIONAL { ?seturi skos:prefLabel ?setlabel } OPTIONAL { ?seturi fsd:open ?setopen } OPTIONAL { ?seturi fsd:empty ?setempty } FILTER NOT EXISTS { ?y skos:member ?seturi . ?y rdf:type skos:Collection } }"):
self.mainsetcache = {'uri': str(row.seturi), 'id': str(row.setid), 'label': str(row.setlabel) if row.setlabel else "", 'open': bool(row.setopen), 'empty': bool(row.setempty) }
return self.mainsetcache
raise DeepValidationError("Unable to find main set (set_uri=" + str(set_uri)+"), this should not happen") | 0.006726 |
def fetch_logs(self, lambda_name, filter_pattern='', limit=10000, start_time=0):
"""
Fetch the CloudWatch logs for a given Lambda name.
"""
log_name = '/aws/lambda/' + lambda_name
streams = self.logs_client.describe_log_streams(
logGroupName=log_name,
descending=True,
orderBy='LastEventTime'
)
all_streams = streams['logStreams']
all_names = [stream['logStreamName'] for stream in all_streams]
events = []
response = {}
while not response or 'nextToken' in response:
extra_args = {}
if 'nextToken' in response:
extra_args['nextToken'] = response['nextToken']
# Amazon uses millisecond epoch for some reason.
# Thanks, Jeff.
start_time = start_time * 1000
end_time = int(time.time()) * 1000
response = self.logs_client.filter_log_events(
logGroupName=log_name,
logStreamNames=all_names,
startTime=start_time,
endTime=end_time,
filterPattern=filter_pattern,
limit=limit,
interleaved=True, # Does this actually improve performance?
**extra_args
)
if response and 'events' in response:
events += response['events']
return sorted(events, key=lambda k: k['timestamp']) | 0.002725 |
def _handle_get(self, method, remainder, request=None):
'''
Routes ``GET`` actions to the appropriate controller.
'''
if request is None:
self._raise_method_deprecation_warning(self._handle_get)
# route to a get_all or get if no additional parts are available
if not remainder or remainder == ['']:
remainder = list(six.moves.filter(bool, remainder))
controller = self._find_controller('get_all', 'get')
if controller:
self._handle_bad_rest_arguments(controller, remainder, request)
return controller, []
abort(405)
method_name = remainder[-1]
# check for new/edit/delete GET requests
if method_name in ('new', 'edit', 'delete'):
if method_name == 'delete':
method_name = 'get_delete'
controller = self._find_controller(method_name)
if controller:
return controller, remainder[:-1]
match = self._handle_custom_action(method, remainder, request)
if match:
return match
controller = self._lookup_child(remainder[0])
if controller and not ismethod(controller):
return lookup_controller(controller, remainder[1:], request)
# finally, check for the regular get_one/get requests
controller = self._find_controller('get_one', 'get')
if controller:
self._handle_bad_rest_arguments(controller, remainder, request)
return controller, remainder
abort(405) | 0.001255 |
def termLons(TERMS):
""" Returns a list with the absolute longitude
of all terms.
"""
res = []
for i, sign in enumerate(SIGN_LIST):
termList = TERMS[sign]
res.extend([
ID,
sign,
start + 30 * i,
] for (ID, start, end) in termList)
return res | 0.009091 |
def quote_split(sep,string):
"""
Splits the strings into pieces divided by sep, when sep in not inside quotes.
"""
if len(sep) != 1: raise Exception("Separation string must be one character long")
retlist = []
squote = False
dquote = False
left = 0
i = 0
while i < len(string):
if string[i] == '"' and not dquote:
if not squote:
squote = True
elif (i+1) < len(string) and string[i+1] == '"':
i += 1
else:
squote = False
elif string[i] == "'" and not squote:
if not dquote:
dquote = True
elif (i+1) < len(string) and string[i+1] == "'":
i += 1
else:
dquote = False
elif string[i] == sep and not dquote and not squote:
retlist.append(string[left:i])
left = i + 1
i += 1
retlist.append(string[left:])
return retlist | 0.006024 |
def list_abundance_cartesian_expansion(graph: BELGraph) -> None:
"""Expand all list abundances to simple subject-predicate-object networks."""
for u, v, k, d in list(graph.edges(keys=True, data=True)):
if CITATION not in d:
continue
if isinstance(u, ListAbundance) and isinstance(v, ListAbundance):
for u_member, v_member in itt.product(u.members, v.members):
graph.add_qualified_edge(
u_member, v_member,
relation=d[RELATION],
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
elif isinstance(u, ListAbundance):
for member in u.members:
graph.add_qualified_edge(
member, v,
relation=d[RELATION],
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
elif isinstance(v, ListAbundance):
for member in v.members:
graph.add_qualified_edge(
u, member,
relation=d[RELATION],
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
_remove_list_abundance_nodes(graph) | 0.001378 |
def Email(v):
"""Verify that the value is an Email or not.
>>> s = Schema(Email())
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("a.com")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("[email protected]")
>>> with raises(MultipleInvalid, 'expected an Email'):
... s("[email protected]")
>>> s('[email protected]')
'[email protected]'
"""
try:
if not v or "@" not in v:
raise EmailInvalid("Invalid Email")
user_part, domain_part = v.rsplit('@', 1)
if not (USER_REGEX.match(user_part) and DOMAIN_REGEX.match(domain_part)):
raise EmailInvalid("Invalid Email")
return v
except:
raise ValueError | 0.004274 |
def receive_one_ping(mySocket, myID, timeout):
"""
Receive the ping from the socket. Timeout = in ms
"""
timeLeft = timeout/1000
while True: # Loop while waiting for packet or timeout
startedSelect = default_timer()
whatReady = select.select([mySocket], [], [], timeLeft)
howLongInSelect = (default_timer() - startedSelect)
if whatReady[0] == []: # Timeout
return None, 0, 0, 0, 0
timeReceived = default_timer()
recPacket, addr = mySocket.recvfrom(ICMP_MAX_RECV)
ipHeader = recPacket[:20]
iphVersion, iphTypeOfSvc, iphLength, \
iphID, iphFlags, iphTTL, iphProtocol, \
iphChecksum, iphSrcIP, iphDestIP = struct.unpack(
"!BBHHHBBHII", ipHeader
)
icmpHeader = recPacket[20:28]
icmpType, icmpCode, icmpChecksum, \
icmpPacketID, icmpSeqNumber = struct.unpack(
"!BBHHH", icmpHeader
)
if icmpPacketID == myID: # Our packet
dataSize = len(recPacket) - 28
#print (len(recPacket.encode()))
return timeReceived, (dataSize+8), iphSrcIP, icmpSeqNumber, iphTTL
timeLeft = timeLeft - howLongInSelect
if timeLeft <= 0:
return None, 0, 0, 0, 0 | 0.006245 |
def add_file(self, name, filename, compress_hint=True):
"""Saves the actual file in the store.
``compress_hint`` suggests whether the file should be compressed
before transfer
Works like :meth:`add_stream`, but ``filename`` is the name of
an existing file in the filesystem.
"""
return self.add_stream(name, open(filename, 'rb')) | 0.005013 |
def serialize_tag(tag, *, indent=None, compact=False, quote=None):
"""Serialize an nbt tag to its literal representation."""
serializer = Serializer(indent=indent, compact=compact, quote=quote)
return serializer.serialize(tag) | 0.004202 |
def validateDocumentFinal(self, doc):
"""Does the final step for the document validation once all
the incremental validation steps have been completed
basically it does the following checks described by the XML
Rec Check all the IDREF/IDREFS attributes definition for
validity """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidateDocumentFinal(self._o, doc__o)
return ret | 0.008264 |
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params) | 0.008955 |
def update_reward(self, new_reward):
"""
Updates reward value for policy.
:param new_reward: New reward to save.
"""
self.sess.run(self.model.update_reward,
feed_dict={self.model.new_reward: new_reward}) | 0.007547 |
def _StrftimeGm(value, unused_context, args):
"""Convert a timestamp in seconds to a string based on the format string.
Returns GM time.
"""
time_tuple = time.gmtime(value)
return _StrftimeHelper(args, time_tuple) | 0.004348 |
def call(obj, method, *args, **kwargs):
"""
Allows to call any method of any object with parameters.
Because come on! It's bloody stupid that Django's templating engine doesn't
allow that.
Usage::
{% call myobj 'mymethod' myvar foobar=myvar2 as result %}
{% call myobj 'mydict' 'mykey' as result %}
{% call myobj 'myattribute' as result %}
:param obj: The object which has the method that you would like to call
:param method: A string representing the attribute on the object that
should be called.
"""
function_or_dict_or_member = getattr(obj, method)
if callable(function_or_dict_or_member):
# If it is a function, let's call it
return function_or_dict_or_member(*args, **kwargs)
if not len(args):
# If it is a member, lets return it
return function_or_dict_or_member
# If it is a dict, let's access one of it's keys
return function_or_dict_or_member[args[0]] | 0.001019 |
def read_int64(self, little_endian=True):
"""
Read 8 bytes as a signed integer value from the stream.
Args:
little_endian (bool): specify the endianness. (Default) Little endian.
Returns:
int:
"""
if little_endian:
endian = "<"
else:
endian = ">"
return self.unpack('%sq' % endian, 8) | 0.007538 |
def unpacktar(tarfile, destdir):
""" Unpack given tarball into the specified dir """
nullfd = open(os.devnull, "w")
tarfile = cygpath(os.path.abspath(tarfile))
log.debug("unpack tar %s into %s", tarfile, destdir)
try:
check_call([TAR, '-xzf', tarfile], cwd=destdir,
stdout=nullfd, preexec_fn=_noumask)
except Exception:
log.exception("Error unpacking tar file %s to %s", tarfile, destdir)
raise
nullfd.close() | 0.002083 |
def _cryptography_cipher(key, iv):
"""Build a cryptography AES Cipher object.
:param bytes key: Encryption key
:param bytes iv: Initialization vector
:returns: AES Cipher instance
:rtype: cryptography.hazmat.primitives.ciphers.Cipher
"""
return Cipher(
algorithm=algorithms.AES(key),
mode=modes.CFB(iv),
backend=default_backend()
) | 0.002577 |
def set_naxis(self, idx, n):
"""Change the slice shown in the channel viewer.
`idx` is the slice index (0-based); `n` is the axis (0-based)
"""
self.play_idx = idx
self.logger.debug("naxis %d index is %d" % (n + 1, idx + 1))
image = self.fitsimage.get_image()
slidername = 'choose_naxis%d' % (n + 1)
try:
if image is None:
raise ValueError("Please load an image cube")
m = n - 2
self.naxispath[m] = idx
self.logger.debug("m=%d naxispath=%s" % (m, str(self.naxispath)))
image.set_naxispath(self.naxispath)
self.logger.debug("NAXIS%d slice %d loaded." % (n + 1, idx + 1))
if self.play_indices:
# save play index
self.play_indices[m] = idx
text = [i + 1 for i in self.naxispath]
if slidername in self.w:
self.w[slidername].set_value(text[m])
else:
text = idx + 1
if slidername in self.w:
self.w[slidername].set_value(text)
self.w.slice.set_text(str(text))
# schedule a redraw
self.fitsimage.redraw(whence=0)
except Exception as e:
errmsg = "Error loading NAXIS%d slice %d: %s" % (
n + 1, idx + 1, str(e))
self.logger.error(errmsg)
self.fv.error(errmsg) | 0.001366 |
def _remove_mapper_from_plotter(plotter, actor, reset_camera):
"""removes this actor's mapper from the given plotter's _scalar_bar_mappers"""
try:
mapper = actor.GetMapper()
except AttributeError:
return
for name in list(plotter._scalar_bar_mappers.keys()):
try:
plotter._scalar_bar_mappers[name].remove(mapper)
except ValueError:
pass
if len(plotter._scalar_bar_mappers[name]) < 1:
slot = plotter._scalar_bar_slot_lookup.pop(name)
plotter._scalar_bar_mappers.pop(name)
plotter._scalar_bar_ranges.pop(name)
plotter.remove_actor(plotter._scalar_bar_actors.pop(name), reset_camera=reset_camera)
plotter._scalar_bar_slots.add(slot)
return | 0.003851 |
def _find_port(self, host): # pylint: disable=no-self-use
"""
Finds port number from host. Defaults to 3000 if not found
:param host: host as string
:return: (host, port)
"""
ind = host.rfind(":")
if ind != -1:
try:
port = int(host[ind + 1:])
host = host[:ind]
except ValueError:
port = 3000
else:
port = 3000
return host, port | 0.004132 |
def iterate(self):
"""
Reads and handles data from the microcontroller over the serial port.
This method should be called in a main loop or in an :class:`Iterator`
instance to keep this boards pin values up to date.
"""
byte = self.sp.read()
if not byte:
return
data = ord(byte)
received_data = []
handler = None
if data < START_SYSEX:
# These commands can have 'channel data' like a pin nummber appended.
try:
handler = self._command_handlers[data & 0xF0]
except KeyError:
return
received_data.append(data & 0x0F)
while len(received_data) < handler.bytes_needed:
received_data.append(ord(self.sp.read()))
elif data == START_SYSEX:
data = ord(self.sp.read())
handler = self._command_handlers.get(data)
if not handler:
return
data = ord(self.sp.read())
while data != END_SYSEX:
received_data.append(data)
data = ord(self.sp.read())
else:
try:
handler = self._command_handlers[data]
except KeyError:
return
while len(received_data) < handler.bytes_needed:
received_data.append(ord(self.sp.read()))
# Handle the data
try:
handler(*received_data)
except ValueError:
pass | 0.001961 |
def create_archive(
self,
archive_name,
authority_name,
archive_path,
versioned,
raise_on_err=True,
metadata=None,
user_config=None,
tags=None,
helper=False):
'''
Create a new data archive
Returns
-------
archive : object
new :py:class:`~datafs.core.data_archive.DataArchive` object
'''
archive_metadata = self._create_archive_metadata(
archive_name=archive_name,
authority_name=authority_name,
archive_path=archive_path,
versioned=versioned,
raise_on_err=raise_on_err,
metadata=metadata,
user_config=user_config,
tags=tags,
helper=helper)
if raise_on_err:
self._create_archive(
archive_name,
archive_metadata)
else:
self._create_if_not_exists(
archive_name,
archive_metadata)
return self.get_archive(archive_name) | 0.001775 |
def posterior_covariance_between_points(self, X1, X2):
"""
Computes the posterior covariance between points.
:param X1: some input observations
:param X2: other input observations
"""
return self.posterior.covariance_between_points(self.kern, self.X, X1, X2) | 0.009772 |
def extract_gcc_binaries():
"""Try to find GCC on OSX for OpenMP support."""
patterns = ['/opt/local/bin/g++-mp-[0-9].[0-9]',
'/opt/local/bin/g++-mp-[0-9]',
'/usr/local/bin/g++-[0-9].[0-9]',
'/usr/local/bin/g++-[0-9]']
if 'darwin' in platform.platform().lower():
gcc_binaries = []
for pattern in patterns:
gcc_binaries += glob.glob(pattern)
gcc_binaries.sort()
if gcc_binaries:
_, gcc = os.path.split(gcc_binaries[-1])
return gcc
else:
return None
else:
return None | 0.0016 |
def _evalWeekday(self, datetimeString, sourceTime):
"""
Evaluate text passed by L{_partialParseWeekday()}
"""
s = datetimeString.strip()
sourceTime = self._evalDT(datetimeString, sourceTime)
# Given string is a weekday
yr, mth, dy, hr, mn, sec, wd, yd, isdst = sourceTime
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
wkdy = self.ptc.WeekdayOffsets[s]
if wkdy > wd:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
else:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
self.currentContext.updateAccuracy(pdtContext.ACU_DAY)
target = start + datetime.timedelta(days=qty)
return target.timetuple() | 0.001984 |
def language(self):
"""The language of this file"""
try:
return self._language
except AttributeError:
self._language = find_language(self, getattr(self, 'exts', None))
return self._language | 0.008163 |
def get_absolute_url(self):
"""Determine where I am coming from and where I am going"""
# Determine if this configuration is on a stage
if self.stage:
# Stage specific configurations go back to the stage view
url = reverse('projects_stage_view', args=(self.project.pk, self.stage.pk))
else:
# Project specific configurations go back to the project page
url = self.project.get_absolute_url()
return url | 0.00611 |
def gen_otu_dict(nex_obj, nexson_version=None):
"""Takes a NexSON object and returns a dict of
otu_id -> otu_obj
"""
if nexson_version is None:
nexson_version = detect_nexson_version(nex_obj)
if _is_by_id_hbf(nexson_version):
otus = nex_obj['nexml']['otusById']
if len(otus) > 1:
d = {}
for v in otus.values():
d.update(v['otuById'])
return d
else:
return otus.values()[0]['otuById']
o_dict = {}
for ob in nex_obj.get('otus', []):
for o in ob.get('otu', []):
oid = o['@id']
o_dict[oid] = o
return o_dict | 0.001508 |
def request_encode_url(self, method, url, fields=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **urlopen_kw) | 0.008174 |
async def anon_decrypt(wallet_handle: int,
recipient_vk: str,
encrypted_msg: bytes) -> bytes:
"""
Decrypts a message by anonymous-encryption scheme.
Sealed boxes are designed to anonymously send messages to a Recipient given its public key.
Only the Recipient can decrypt these messages, using its private key.
While the Recipient can verify the integrity of the message, it cannot verify the identity of the Sender.
Note to use DID keys with this function you can call key_for_did to get key id (verkey)
for specific DID.
Note: use unpack_message function for A2A goals.
:param wallet_handle: wallet handler (created by open_wallet).
:param recipient_vk: id (verkey) of my key. The key must be created by calling indy_create_key or create_and_store_my_did
:param encrypted_msg: encrypted message
:return: decrypted message as an array of bytes
"""
logger = logging.getLogger(__name__)
logger.debug("anon_decrypt: >>> wallet_handle: %r, recipient_vk: %r, encrypted_msg: %r",
wallet_handle,
recipient_vk,
encrypted_msg)
def transform_cb(arr_ptr: POINTER(c_uint8), arr_len: c_uint32):
return bytes(arr_ptr[:arr_len]),
if not hasattr(anon_decrypt, "cb"):
logger.debug("anon_decrypt: Creating callback")
anon_decrypt.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, POINTER(c_uint8), c_uint32), transform_cb)
c_wallet_handle = c_int32(wallet_handle)
c_recipient_vk = c_char_p(recipient_vk.encode('utf-8'))
c_encrypted_msg_len = c_uint32(len(encrypted_msg))
decrypted_message = await do_call('indy_crypto_anon_decrypt',
c_wallet_handle,
c_recipient_vk,
encrypted_msg,
c_encrypted_msg_len,
anon_decrypt.cb)
logger.debug("crypto_box_seal_open: <<< res: %r", decrypted_message)
return decrypted_message | 0.003324 |
def listDataTypes(self, datatype="", dataset=""):
"""
API to list data types known to dbs (when no parameter supplied).
:param dataset: Returns data type (of primary dataset) of the dataset (Optional)
:type dataset: str
:param datatype: List specific data type
:type datatype: str
:returns: List of dictionaries containing the following keys (primary_ds_type_id, data_type)
:rtype: list of dicts
"""
try:
return self.dbsDataType.listDataType(dataType=datatype, dataset=dataset)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listDataTypes. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) | 0.008946 |
def read_mesh(fname):
"""Read mesh data from file.
Parameters
----------
fname : str
File name to read. Format will be inferred from the filename.
Currently only '.obj' and '.obj.gz' are supported.
Returns
-------
vertices : array
Vertices.
faces : array | None
Triangle face definitions.
normals : array
Normals for the mesh.
texcoords : array | None
Texture coordinates.
"""
# Check format
fmt = op.splitext(fname)[1].lower()
if fmt == '.gz':
fmt = op.splitext(op.splitext(fname)[0])[1].lower()
if fmt in ('.obj'):
return WavefrontReader.read(fname)
elif not format:
raise ValueError('read_mesh needs could not determine format.')
else:
raise ValueError('read_mesh does not understand format %s.' % fmt) | 0.001168 |
def update(self, properties=None):
"""Updates the properties being held for this instance.
:param properties: The list of properties to update.
:type properties: list or None (default). If None, update all
currently cached properties.
"""
if properties is None:
try:
self.update_view_data(properties=list(self._cache.keys()))
except AttributeError:
# We end up here and ignore it self._cache doesn't exist
pass
else:
self.update_view_data(properties=properties) | 0.003317 |
def qname(self):
"""
Get the B{fully} qualified name of this element
@return: The fully qualified name.
@rtype: basestring
"""
if self.prefix is None:
return self.name
else:
return '%s:%s' % (self.prefix, self.name) | 0.00678 |
def distance_correlation_sqr(x, y, **kwargs):
"""
distance_correlation_sqr(x, y, *, exponent=1)
Computes the usual (biased) estimator for the squared distance correlation
between two random vectors.
Parameters
----------
x: array_like
First random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
y: array_like
Second random vector. The columns correspond with the individual random
variables while the rows are individual instances of the random vector.
exponent: float
Exponent of the Euclidean distance, in the range :math:`(0, 2)`.
Equivalently, it is twice the Hurst parameter of fractional Brownian
motion.
Returns
-------
numpy scalar
Value of the biased estimator of the squared distance correlation.
See Also
--------
distance_correlation
u_distance_correlation_sqr
Notes
-----
The algorithm uses the fast distance covariance algorithm proposed in
:cite:`b-fast_distance_correlation` when possible.
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[1], [0], [0], [1]])
>>> dcor.distance_correlation_sqr(a, a)
1.0
>>> dcor.distance_correlation_sqr(a, b) # doctest: +ELLIPSIS
0.2773500...
>>> dcor.distance_correlation_sqr(b, b)
1.0
>>> dcor.distance_correlation_sqr(a, b, exponent=0.5) # doctest: +ELLIPSIS
0.4493308...
"""
if _can_use_fast_algorithm(x, y, **kwargs):
return _distance_correlation_sqr_fast(x, y)
else:
return _distance_correlation_sqr_naive(x, y, **kwargs) | 0.000535 |
def get_posts(self, count=10, offset=0, recent=True, tag=None,
user_id=None, include_draft=False):
"""
Get posts given by filter criteria
:param count: The number of posts to retrieve (default 10). If count
is ``None``, all posts are returned.
:type count: int
:param offset: The number of posts to offset (default 0)
:type offset: int
:param recent: Order by recent posts or not
:type recent: bool
:param tag: Filter by a specific tag
:type tag: str
:param user_id: Filter by a specific user
:type user_id: str
:param include_draft: Whether to include posts marked as draft or not
:type include_draft: bool
:return: A list of posts, with each element a dict containing values
for the following keys: (title, text, draft, post_date,
last_modified_date). If count is ``None``, then all the posts are
returned.
"""
raise NotImplementedError("This method needs to be implemented by the "
"inheriting class") | 0.002653 |
async def set_volume(self, volume: int, *, device: Optional[SomeDevice] = None):
"""Set the volume for the user’s current playback device.
Parameters
----------
volume : int
The volume to set. Must be a value from 0 to 100 inclusive.
device : Optional[:obj:`SomeDevice`]
The Device object or id of the device this command is targeting.
If not supplied, the user’s currently active device is the target.
"""
await self._user.http.set_playback_volume(volume, device_id=str(device)) | 0.006993 |
def _move_el_inside_block(el, tag):
""" helper for _fixup_ins_del_tags; actually takes the <ins> etc tags
and moves them inside any block-level tags. """
for child in el:
if _contains_block_level_tag(child):
break
else:
import sys
# No block-level tags in any child
children_tag = etree.Element(tag)
children_tag.text = el.text
el.text = None
children_tag.extend(list(el))
el[:] = [children_tag]
return
for child in list(el):
if _contains_block_level_tag(child):
_move_el_inside_block(child, tag)
if child.tail:
tail_tag = etree.Element(tag)
tail_tag.text = child.tail
child.tail = None
el.insert(el.index(child)+1, tail_tag)
else:
child_tag = etree.Element(tag)
el.replace(child, child_tag)
child_tag.append(child)
if el.text:
text_tag = etree.Element(tag)
text_tag.text = el.text
el.text = None
el.insert(0, text_tag) | 0.000909 |
def on_en_passant_valid_location(self):
"""
Finds out if pawn is on enemy center rank.
:rtype: bool
"""
return (self.color == color.white and self.location.rank == 4) or \
(self.color == color.black and self.location.rank == 3) | 0.007067 |
def copy(self, deep=True):
"""
Make a copy of this SparseDataFrame
"""
result = super().copy(deep=deep)
result._default_fill_value = self._default_fill_value
result._default_kind = self._default_kind
return result | 0.007435 |
def add_url(self, *args, **kwargs):
"""Add a new url to the sitemap.
This function can either be called with a :class:`UrlEntry`
or some keyword and positional arguments that are forwarded to
the :class:`UrlEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], UrlEntry):
self.urls.append(args[0])
else:
self.urls.append(UrlEntry(*args, **kwargs)) | 0.004396 |
def load_json_string(data):
"""
<Purpose>
Deserialize 'data' (JSON string) to a Python object.
<Arguments>
data:
A JSON string.
<Exceptions>
securesystemslib.exceptions.Error, if 'data' cannot be deserialized to a
Python object.
<Side Effects>
None.
<Returns>
Deserialized object. For example, a dictionary.
"""
deserialized_object = None
try:
deserialized_object = json.loads(data)
except TypeError:
message = 'Invalid JSON string: ' + repr(data)
raise securesystemslib.exceptions.Error(message)
except ValueError:
message = 'Cannot deserialize to a Python object: ' + repr(data)
raise securesystemslib.exceptions.Error(message)
else:
return deserialized_object | 0.009333 |
def read_url(url):
"""Reads given URL as JSON and returns data as loaded python object."""
logging.debug('reading {url} ...'.format(url=url))
token = os.environ.get("BOKEH_GITHUB_API_TOKEN")
headers = {}
if token:
headers['Authorization'] = 'token %s' % token
request = Request(url, headers=headers)
response = urlopen(request).read()
return json.loads(response.decode("UTF-8")) | 0.002392 |
def score_for_percentile_in(self, leaderboard_name, percentile):
'''
Calculate the score for a given percentile value in the leaderboard.
@param leaderboard_name [String] Name of the leaderboard.
@param percentile [float] Percentile value (0.0 to 100.0 inclusive).
@return the score corresponding to the percentile argument. Return +None+ for arguments outside 0-100 inclusive and for leaderboards with no members.
'''
if not 0 <= percentile <= 100:
return None
total_members = self.total_members_in(leaderboard_name)
if total_members < 1:
return None
if self.order == self.ASC:
percentile = 100 - percentile
index = (total_members - 1) * (percentile / 100.0)
scores = [
pair[1] for pair in self.redis_connection.zrange(
leaderboard_name, int(
math.floor(index)), int(
math.ceil(index)), withscores=True)]
if index == math.floor(index):
return scores[0]
else:
interpolate_fraction = index - math.floor(index)
return scores[0] + interpolate_fraction * (scores[1] - scores[0]) | 0.002435 |
def get_sockinfo (host, port=None):
"""Return socket.getaddrinfo for given host and port."""
family, socktype = socket.AF_INET, socket.SOCK_STREAM
return socket.getaddrinfo(host, port, family, socktype) | 0.009346 |
def visit_NameConstant(self, node):
"""Tangent of e.g.
x = None
Lines of this type are sometimes auto-generated by reverse-mode,
and thus handling them is important for higher-order autodiff
We will shunt NameConstant tangents off to visit_Name, to prevent
code duplication.
"""
constant_val = {
True: 'True',
False: 'False',
None: 'None',
}[node.value]
new_node = gast.Name(id=constant_val,ctx=gast.Load(),annotation=None)
return self.visit_Name(new_node) | 0.005714 |
def add_data_point_xy(self, x, y):
"""Add a new data point to the data set to be smoothed."""
self.x.append(x)
self.y.append(y) | 0.013245 |
def as4_capability(self, **kwargs):
"""Set Spanning Tree state.
Args:
enabled (bool): Is AS4 Capability enabled? (True, False)
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
ValueError: if `enabled` are invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.bgp.local_asn(local_as='65535',
... rbridge_id='225')
... output = dev.bgp.as4_capability(
... rbridge_id='225', enabled=True)
... output = dev.bgp.as4_capability(
... rbridge_id='225', enabled=False)
"""
enabled = kwargs.pop('enabled', True)
callback = kwargs.pop('callback', self._callback)
if not isinstance(enabled, bool):
raise ValueError('%s must be `True` or `False`.' % repr(enabled))
as4_capability_args = dict(vrf_name=kwargs.pop('vrf', 'default'),
rbridge_id=kwargs.pop('rbridge_id', '1'))
as4_capability = getattr(self._rbridge,
'rbridge_id_router_router_bgp_router_bgp'
'_attributes_capability_as4_enable')
config = as4_capability(**as4_capability_args)
if not enabled:
capability = config.find('.//*capability')
capability.set('operation', 'delete')
# shutdown = capability.find('.//*as4-enable')
# shutdown.set('operation', 'delete')
return callback(config) | 0.000979 |
def all_blocks(self):
status = OrderedDict.fromkeys(parameters.BLOCKS.keys())
status['13AE'] = ['discovery complete', '50', '24.05']
status['13AO'] = ['discovery complete', '36', '24.40']
status['13BL'] = ['discovery complete', '79', '24.48']
status['14BH'] = ['discovery running', '-', '-']
status['15AP'] = ['discovery running', '-', '-']
status['15AM'] = ['discovery running', '-', '-']
'''Overview tal table is expecting:
ID observations processing status discoveries m_r 40%
'''
bks = []
for block in status.iterkeys():
bk = [block, self.num_block_images(block)] # if set in the .fromkeys(), doesn't give a unique list
if status[block] is not None:
bk = bk + status[block]
else:
bk = bk + ['awaiting triplets', '-', '-']
bks.append(bk)
retval = {'blocks': bks, 'status': status}
return retval | 0.002997 |
def has_next(self):
"""Return True if there are more values present"""
if self._result_cache:
return self._result_cache.has_next
return self.all().has_next | 0.010417 |
def _places(client, url_part, query=None, location=None, radius=None,
keyword=None, language=None, min_price=0, max_price=4, name=None,
open_now=False, rank_by=None, type=None, region=None, page_token=None):
"""
Internal handler for ``places``, ``places_nearby``, and ``places_radar``.
See each method's docs for arg details.
"""
params = {"minprice": min_price, "maxprice": max_price}
if query:
params["query"] = query
if location:
params["location"] = convert.latlng(location)
if radius:
params["radius"] = radius
if keyword:
params["keyword"] = keyword
if language:
params["language"] = language
if name:
params["name"] = convert.join_list(" ", name)
if open_now:
params["opennow"] = "true"
if rank_by:
params["rankby"] = rank_by
if type:
params["type"] = type
if region:
params["region"] = region
if page_token:
params["pagetoken"] = page_token
url = "/maps/api/place/%ssearch/json" % url_part
return client._request(url, params) | 0.001786 |
def show_graph(self, format='svg'):
"""
Render this Pipeline as a DAG.
Parameters
----------
format : {'svg', 'png', 'jpeg'}
Image format to render with. Default is 'svg'.
"""
g = self.to_simple_graph(AssetExists())
if format == 'svg':
return g.svg
elif format == 'png':
return g.png
elif format == 'jpeg':
return g.jpeg
else:
# We should never get here because of the expect_element decorator
# above.
raise AssertionError("Unknown graph format %r." % format) | 0.003155 |
def init_from_datastore(self):
"""Initializes batches by reading from the datastore."""
self._data = {}
for entity in self._datastore_client.query_fetch(
kind=self._entity_kind_batches):
batch_id = entity.key.flat_path[-1]
self._data[batch_id] = dict(entity)
self._data[batch_id]['images'] = {}
for entity in self._datastore_client.query_fetch(
kind=self._entity_kind_images):
batch_id = entity.key.flat_path[-3]
image_id = entity.key.flat_path[-1]
self._data[batch_id]['images'][image_id] = dict(entity) | 0.015734 |
def get_boards(self):
"""
Get the list of boards to pull cards from. If the user gave a value to
trello.include_boards use that, otherwise ask the Trello API for the
user's boards.
"""
if 'include_boards' in self.config:
for boardid in self.config.get('include_boards', to_type=aslist):
# Get the board name
yield self.api_request(
"/1/boards/{id}".format(id=boardid), fields='name')
else:
boards = self.api_request("/1/members/me/boards", fields='name')
for board in boards:
yield board | 0.003086 |
def _finish_connection_action(self, action):
"""Finish a connection attempt
Args:
action (ConnectionAction): the action object describing what we are
connecting to and what the result of the operation was
"""
success = action.data['success']
conn_key = action.data['id']
if self._get_connection_state(conn_key) != self.Connecting:
print("Invalid finish_connection action on a connection whose state is not Connecting, conn_key=%s" % str(conn_key))
return
# Cannot be None since we checked above to make sure it exists
data = self._get_connection(conn_key)
callback = data['callback']
conn_id = data['conn_id']
int_id = data['int_id']
if success is False:
reason = action.data['reason']
if reason is None:
reason = "No reason was given"
del self._connections[conn_id]
del self._int_connections[int_id]
callback(conn_id, self.id, False, reason)
else:
data['state'] = self.Idle
data['microstate'] = None
data['callback'] = None
callback(conn_id, self.id, True, None) | 0.0024 |
def collections(self):
"""获取用户收藏夹.
:return: 用户收藏夹,返回生成器
:rtype: Collection.Iterable
"""
from .collection import Collection
if self.url is None or self.collection_num == 0:
return
else:
collection_num = self.collection_num
for page_index in range(1, (collection_num - 1) // 20 + 2):
html = self._session.get(
self.url + 'collections?page=' + str(page_index)).text
soup = BeautifulSoup(html)
collections_names = soup.find_all(
'a', class_='zm-profile-fav-item-title')
collection_follower_nums = soup.find_all(
'div', class_='zm-profile-fav-bio')
for c, f in zip(collections_names, collection_follower_nums):
c_url = Zhihu_URL + c['href']
c_name = c.text
c_fn = int(re_get_number.match(f.contents[2]).group(1))
yield Collection(c_url, self, c_name, c_fn,
session=self._session) | 0.001778 |
def read(self, vals):
"""Read values.
Args:
vals (list): list of strings representing values
"""
i = 0
if len(vals[i]) == 0:
self.title_of_design_condition = None
else:
self.title_of_design_condition = vals[i]
i += 1
if len(vals[i]) == 0:
self.unkown_field = None
else:
self.unkown_field = vals[i]
i += 1
if len(vals[i]) == 0:
self.design_stat_heating = None
else:
self.design_stat_heating = vals[i]
i += 1
if len(vals[i]) == 0:
self.coldestmonth = None
else:
self.coldestmonth = vals[i]
i += 1
if len(vals[i]) == 0:
self.db996 = None
else:
self.db996 = vals[i]
i += 1
if len(vals[i]) == 0:
self.db990 = None
else:
self.db990 = vals[i]
i += 1
if len(vals[i]) == 0:
self.dp996 = None
else:
self.dp996 = vals[i]
i += 1
if len(vals[i]) == 0:
self.hr_dp996 = None
else:
self.hr_dp996 = vals[i]
i += 1
if len(vals[i]) == 0:
self.db_dp996 = None
else:
self.db_dp996 = vals[i]
i += 1
if len(vals[i]) == 0:
self.dp990 = None
else:
self.dp990 = vals[i]
i += 1
if len(vals[i]) == 0:
self.hr_dp990 = None
else:
self.hr_dp990 = vals[i]
i += 1
if len(vals[i]) == 0:
self.db_dp990 = None
else:
self.db_dp990 = vals[i]
i += 1
if len(vals[i]) == 0:
self.ws004c = None
else:
self.ws004c = vals[i]
i += 1
if len(vals[i]) == 0:
self.db_ws004c = None
else:
self.db_ws004c = vals[i]
i += 1
if len(vals[i]) == 0:
self.ws010c = None
else:
self.ws010c = vals[i]
i += 1
if len(vals[i]) == 0:
self.db_ws010c = None
else:
self.db_ws010c = vals[i]
i += 1
if len(vals[i]) == 0:
self.ws_db996 = None
else:
self.ws_db996 = vals[i]
i += 1
if len(vals[i]) == 0:
self.wd_db996 = None
else:
self.wd_db996 = vals[i]
i += 1
if len(vals[i]) == 0:
self.design_stat_cooling = None
else:
self.design_stat_cooling = vals[i]
i += 1
if len(vals[i]) == 0:
self.hottestmonth = None
else:
self.hottestmonth = vals[i]
i += 1
if len(vals[i]) == 0:
self.dbr = None
else:
self.dbr = vals[i]
i += 1
if len(vals[i]) == 0:
self.db004 = None
else:
self.db004 = vals[i]
i += 1
if len(vals[i]) == 0:
self.wb_db004 = None
else:
self.wb_db004 = vals[i]
i += 1
if len(vals[i]) == 0:
self.db010 = None
else:
self.db010 = vals[i]
i += 1
if len(vals[i]) == 0:
self.wb_db010 = None
else:
self.wb_db010 = vals[i]
i += 1
if len(vals[i]) == 0:
self.db020 = None
else:
self.db020 = vals[i]
i += 1
if len(vals[i]) == 0:
self.wb_db020 = None
else:
self.wb_db020 = vals[i]
i += 1
if len(vals[i]) == 0:
self.wb004 = None
else:
self.wb004 = vals[i]
i += 1
if len(vals[i]) == 0:
self.db_wb004 = None
else:
self.db_wb004 = vals[i]
i += 1
if len(vals[i]) == 0:
self.wb010 = None
else:
self.wb010 = vals[i]
i += 1
if len(vals[i]) == 0:
self.db_wb010 = None
else:
self.db_wb010 = vals[i]
i += 1
if len(vals[i]) == 0:
self.wb020 = None
else:
self.wb020 = vals[i]
i += 1
if len(vals[i]) == 0:
self.db_wb020 = None
else:
self.db_wb020 = vals[i]
i += 1
if len(vals[i]) == 0:
self.ws_db004 = None
else:
self.ws_db004 = vals[i]
i += 1
if len(vals[i]) == 0:
self.wd_db004 = None
else:
self.wd_db004 = vals[i]
i += 1
if len(vals[i]) == 0:
self.dp004 = None
else:
self.dp004 = vals[i]
i += 1
if len(vals[i]) == 0:
self.hr_dp004 = None
else:
self.hr_dp004 = vals[i]
i += 1
if len(vals[i]) == 0:
self.db_dp004 = None
else:
self.db_dp004 = vals[i]
i += 1
if len(vals[i]) == 0:
self.dp010 = None
else:
self.dp010 = vals[i]
i += 1
if len(vals[i]) == 0:
self.hr_dp010 = None
else:
self.hr_dp010 = vals[i]
i += 1
if len(vals[i]) == 0:
self.db_dp010 = None
else:
self.db_dp010 = vals[i]
i += 1
if len(vals[i]) == 0:
self.dp020 = None
else:
self.dp020 = vals[i]
i += 1
if len(vals[i]) == 0:
self.hr_dp020 = None
else:
self.hr_dp020 = vals[i]
i += 1
if len(vals[i]) == 0:
self.db_dp020 = None
else:
self.db_dp020 = vals[i]
i += 1
if len(vals[i]) == 0:
self.en004 = None
else:
self.en004 = vals[i]
i += 1
if len(vals[i]) == 0:
self.db_en004 = None
else:
self.db_en004 = vals[i]
i += 1
if len(vals[i]) == 0:
self.en010 = None
else:
self.en010 = vals[i]
i += 1
if len(vals[i]) == 0:
self.db_en010 = None
else:
self.db_en010 = vals[i]
i += 1
if len(vals[i]) == 0:
self.en020 = None
else:
self.en020 = vals[i]
i += 1
if len(vals[i]) == 0:
self.db_en020 = None
else:
self.db_en020 = vals[i]
i += 1
if len(vals[i]) == 0:
self.hrs_84_and_db12_8_or_20_6 = None
else:
self.hrs_84_and_db12_8_or_20_6 = vals[i]
i += 1
if len(vals[i]) == 0:
self.design_stat_extremes = None
else:
self.design_stat_extremes = vals[i]
i += 1
if len(vals[i]) == 0:
self.ws010 = None
else:
self.ws010 = vals[i]
i += 1
if len(vals[i]) == 0:
self.ws025 = None
else:
self.ws025 = vals[i]
i += 1
if len(vals[i]) == 0:
self.ws050 = None
else:
self.ws050 = vals[i]
i += 1
if len(vals[i]) == 0:
self.wbmax = None
else:
self.wbmax = vals[i]
i += 1
if len(vals[i]) == 0:
self.dbmin_mean = None
else:
self.dbmin_mean = vals[i]
i += 1
if len(vals[i]) == 0:
self.dbmax_mean = None
else:
self.dbmax_mean = vals[i]
i += 1
if len(vals[i]) == 0:
self.dbmin_stddev = None
else:
self.dbmin_stddev = vals[i]
i += 1
if len(vals[i]) == 0:
self.dbmax_stddev = None
else:
self.dbmax_stddev = vals[i]
i += 1
if len(vals[i]) == 0:
self.dbmin05years = None
else:
self.dbmin05years = vals[i]
i += 1
if len(vals[i]) == 0:
self.dbmax05years = None
else:
self.dbmax05years = vals[i]
i += 1
if len(vals[i]) == 0:
self.dbmin10years = None
else:
self.dbmin10years = vals[i]
i += 1
if len(vals[i]) == 0:
self.dbmax10years = None
else:
self.dbmax10years = vals[i]
i += 1
if len(vals[i]) == 0:
self.dbmin20years = None
else:
self.dbmin20years = vals[i]
i += 1
if len(vals[i]) == 0:
self.dbmax20years = None
else:
self.dbmax20years = vals[i]
i += 1
if len(vals[i]) == 0:
self.dbmin50years = None
else:
self.dbmin50years = vals[i]
i += 1
if len(vals[i]) == 0:
self.dbmax50years = None
else:
self.dbmax50years = vals[i]
i += 1 | 0.000223 |
def run(self, daemon=False):
"""Launch the process with the given inputs, by default running in the current interpreter.
:param daemon: boolean, if True, will submit the process instead of running it.
"""
from aiida.engine import launch
# If daemon is True, submit the process and return
if daemon:
node = launch.submit(self.process, **self.inputs)
echo.echo_info('Submitted {}<{}>'.format(self.process_name, node.pk))
return
# Otherwise we run locally and wait for the process to finish
echo.echo_info('Running {}'.format(self.process_name))
try:
_, node = launch.run_get_node(self.process, **self.inputs)
except Exception as exception: # pylint: disable=broad-except
echo.echo_critical('an exception occurred during execution: {}'.format(str(exception)))
if node.is_killed:
echo.echo_critical('{}<{}> was killed'.format(self.process_name, node.pk))
elif not node.is_finished_ok:
arguments = [self.process_name, node.pk, node.exit_status, node.exit_message]
echo.echo_warning('{}<{}> failed with exit status {}: {}'.format(*arguments))
else:
output = []
echo.echo_success('{}<{}> finished successfully\n'.format(self.process_name, node.pk))
for triple in sorted(node.get_outgoing().all(), key=lambda triple: triple.link_label):
output.append([triple.link_label, '{}<{}>'.format(triple.node.__class__.__name__, triple.node.pk)])
echo.echo(tabulate.tabulate(output, headers=['Output label', 'Node'])) | 0.007794 |
def transform_file(ELEMS, ofname, EPO, TREE, opt):
"transform/map the elements of this file and dump the output on 'ofname'"
BED4_FRM = "%s\t%d\t%d\t%s\n"
log.info("%s (%d) elements ..." % (opt.screen and "screening" or "transforming", ELEMS.shape[0]))
with open(ofname, 'w') as out_fd:
if opt.screen:
for elem in ELEMS.flat:
matching_blocks = [attrgetter("value")(_) for _ in TREE.find(elem['chrom'], elem['start'], elem['end'])]
assert set( matching_blocks ) <= set( EPO.keys() )
if matching_blocks:
out_fd.write(BED4_FRM % elem)
else:
for chrom in set( ELEMS['chrom'] ):
transform_by_chrom(EPO,
ELEMS[ELEMS['chrom'] == chrom],
TREE, chrom, opt, out_fd)
log.info("DONE!") | 0.011547 |
def _read_ent(ent_file):
"""Read notes stored in .ent file.
This is a basic implementation, that relies on turning the information in
the string in the dict format, and then evaluate it. It's not very flexible
and it might not read some notes, but it's fast. I could not implement a
nice, recursive approach.
Returns
-------
allnote : a list of dict
where each dict contains keys such as:
- type
- length : length of the note in B,
- prev_length : length of the previous note in B,
- unused,
- value : the actual content of the note.
Notes
-----
The notes are stored in a format called 'Excel list' but could not find
more information. It's based on "(" and "(.", and I found it very hard to
parse. With some basic regex and substitution, it can be evaluated into
a dict, with sub dictionaries. However, the note containing the name of the
electrodes (I think they called it "montage") cannot be parsed, because
it's too complicated. If it cannot be converted into a dict, the whole
string is passed as value.
"""
with ent_file.open('rb') as f:
f.seek(352) # end of header
note_hdr_length = 16
allnote = []
while True:
note = {}
note['type'], = unpack('<i', f.read(4))
note['length'], = unpack('<i', f.read(4))
note['prev_length'], = unpack('<i', f.read(4))
note['unused'], = unpack('<i', f.read(4))
if not note['type']:
break
s = f.read(note['length'] - note_hdr_length)
s = s[:-2] # it ends with one empty byte
s = s.decode('utf-8', errors='replace')
s1 = s.replace('\n', ' ')
s1 = s1.replace('\\xd ', '')
s1 = s1.replace('(.', '{')
s1 = sub(r'\(([A-Za-z0-9," ]*)\)', r'[\1]', s1)
s1 = s1.replace(')', '}')
# s1 = s1.replace('",', '" :')
s1 = sub(r'(\{[\w"]*),', r'\1 :', s1)
s1 = s1.replace('{"', '"')
s1 = s1.replace('},', ',')
s1 = s1.replace('}}', '}')
s1 = sub(r'\(([0-9 ,-\.]*)\}', r'[\1]', s1)
try:
note['value'] = eval(s1)
except:
note['value'] = s
allnote.append(note)
return allnote | 0.000835 |
def vertical_scroll(self, image, padding=True):
"""Returns a list of images which appear to scroll from top to bottom
down the input image when displayed on the LED matrix in order.
The input image is not limited to being 8x16. If the input image is
largerthan this, then all rows will be scrolled through but only the
left-most 8 columns of pixels will be displayed.
Keyword arguments:
image -- The image to scroll down.
padding -- If True, the animation will begin with a blank screen and the
input image will scroll into the blank screen one pixel row at a
time. Similarly, after scrolling down the whole input image, the end
of the image will scroll out of a blank screen one row at a time.
If this is not True, then only the input image will be scroll down
without beginning or ending with "whitespace." (Default = True)
"""
image_list = list()
height = image.size[1]
# Scroll into the blank image.
if padding:
for y in range(16):
section = image.crop((0, 0, 8, y))
display_section = self.create_blank_image()
display_section.paste(section, (0, 8 - y, 8, 16))
image_list.append(display_section)
#Scroll across the input image.
for y in range(16, height + 1):
section = image.crop((0, y - 16, 8, y))
display_section = self.create_blank_image()
display_section.paste(section, (0, 0, 8, 16))
image_list.append(display_section)
#Scroll out, leaving the blank image.
if padding:
for y in range(height - 15, height + 1):
section = image.crop((0, y, 8, height))
display_section = self.create_blank_image()
display_section.paste(section, (0, 0, 8, 7 - (y - (height - 15))))
image_list.append(display_section)
#Return the list of images created
return image_list | 0.003854 |
def as_tuple(self):
"""Return all of the matrix’s components.
:returns: A ``(xx, yx, xy, yy, x0, y0)`` tuple of floats.
"""
ptr = self._pointer
return (ptr.xx, ptr.yx, ptr.xy, ptr.yy, ptr.x0, ptr.y0) | 0.008299 |
def semilocal_linear_trend_transition_matrix(autoregressive_coef):
"""Build the transition matrix for a semi-local linear trend model."""
# We want to write the following 2 x 2 matrix:
# [[1., 1., ], # level(t+1) = level(t) + slope(t)
# [0., ar_coef], # slope(t+1) = ar_coef * slope(t)
# but it's slightly tricky to properly incorporate the batch shape of
# autoregressive_coef. E.g., if autoregressive_coef has shape [4,6], we want
# to return shape [4, 6, 2, 2]. We do this by breaking the matrix into its
# fixed entries, written explicitly, and then the autoregressive_coef part
# which we add in after using a mask to broadcast to the correct matrix shape.
fixed_entries = tf.constant(
[[1., 1.],
[0., 0.]],
dtype=autoregressive_coef.dtype)
autoregressive_coef_mask = tf.constant([[0., 0.],
[0., 1.]],
dtype=autoregressive_coef.dtype)
bottom_right_entry = (autoregressive_coef[..., tf.newaxis, tf.newaxis] *
autoregressive_coef_mask)
return tf.linalg.LinearOperatorFullMatrix(
fixed_entries + bottom_right_entry) | 0.01268 |
def _build_pcollection(self, pipeline, filepaths, language):
"""Build PCollection of examples in the raw (text) form."""
beam = tfds.core.lazy_imports.apache_beam
def _extract_content(filepath):
"""Extracts article content from a single WikiMedia XML file."""
logging.info("generating examples from = %s", filepath)
with tf.io.gfile.GFile(filepath) as f:
for _, elem in etree.iterparse(f, events=("end",)):
if not elem.tag.endswith("page"):
continue
namespace = elem.tag[:-4]
title = elem.find("./{0}title".format(namespace)).text
ns = elem.find("./{0}ns".format(namespace)).text
# Filter pages that are not in the "main" namespace.
if ns != "0":
continue
raw_content = elem.find(
"./{0}revision/{0}text".format(namespace)).text
elem.clear()
# Filter redirects.
if raw_content is None or raw_content.lower().startswith("#redirect"):
beam.metrics.Metrics.counter(language, "filtered-redirects").inc()
continue
beam.metrics.Metrics.counter(language, "extracted-examples").inc()
yield (title, raw_content)
def _clean_content(inputs):
"""Cleans raw wikicode to extract text."""
title, raw_content = inputs
try:
text = _parse_and_clean_wikicode(raw_content)
except (
tfds.core.lazy_imports.mwparserfromhell.parser.ParserError) as e:
beam.metrics.Metrics.counter(language, "parser-error").inc()
logging.error("mwparserfromhell ParseError: %s", e)
return
beam.metrics.Metrics.counter(language, "cleaned-examples").inc()
yield {
"title": title,
"text": text
}
return (
pipeline
| beam.Create(filepaths)
| beam.FlatMap(_extract_content)
| beam.FlatMap(_clean_content)
) | 0.012384 |
def documentation(self, base_url=None, api_version=None, prefix=""):
"""Generates and returns documentation for this API endpoint"""
documentation = OrderedDict()
base_url = self.base_url if base_url is None else base_url
overview = self.api.doc
if overview:
documentation['overview'] = overview
version_dict = OrderedDict()
versions = self.versions
versions_list = list(versions)
if None in versions_list:
versions_list.remove(None)
if False in versions_list:
versions_list.remove(False)
if api_version is None and len(versions_list) > 0:
api_version = max(versions_list)
documentation['version'] = api_version
elif api_version is not None:
documentation['version'] = api_version
if versions_list:
documentation['versions'] = versions_list
for router_base_url, routes in self.routes.items():
for url, methods in routes.items():
for method, method_versions in methods.items():
for version, handler in method_versions.items():
if getattr(handler, 'private', False):
continue
if version is None:
applies_to = versions
else:
applies_to = (version, )
for version in applies_to:
if api_version and version != api_version:
continue
if base_url and router_base_url != base_url:
continue
doc = version_dict.setdefault(url, OrderedDict())
doc[method] = handler.documentation(doc.get(method, None), version=version, prefix=prefix,
base_url=router_base_url, url=url)
documentation['handlers'] = version_dict
return documentation | 0.001907 |
def interactive(self, bConfirmQuit = True, bShowBanner = True):
"""
Start an interactive debugging session.
@type bConfirmQuit: bool
@param bConfirmQuit: Set to C{True} to ask the user for confirmation
before closing the session, C{False} otherwise.
@type bShowBanner: bool
@param bShowBanner: Set to C{True} to show a banner before entering
the session and after leaving it, C{False} otherwise.
@warn: This will temporarily disable the user-defined event handler!
This method returns when the user closes the session.
"""
print('')
print("-" * 79)
print("Interactive debugging session started.")
print("Use the \"help\" command to list all available commands.")
print("Use the \"quit\" command to close this session.")
print("-" * 79)
if self.lastEvent is None:
print('')
console = ConsoleDebugger()
console.confirm_quit = bConfirmQuit
console.load_history()
try:
console.start_using_debugger(self)
console.loop()
finally:
console.stop_using_debugger()
console.save_history()
print('')
print("-" * 79)
print("Interactive debugging session closed.")
print("-" * 79)
print('') | 0.00437 |
def timedelta_seconds(td):
'''
Return the offset stored by a :class:`datetime.timedelta` object as an
integer number of seconds. Microseconds, if present, are rounded to
the nearest second.
Delegates to
:meth:`timedelta.total_seconds() <datetime.timedelta.total_seconds()>`
if available.
>>> timedelta_seconds(timedelta(hours=1))
3600
>>> timedelta_seconds(timedelta(hours=-1))
-3600
>>> timedelta_seconds(timedelta(hours=1, minutes=30))
5400
>>> timedelta_seconds(timedelta(hours=1, minutes=30,
... microseconds=300000))
5400
>>> timedelta_seconds(timedelta(hours=1, minutes=30,
... microseconds=900000))
5401
'''
try:
return int(round(td.total_seconds()))
except AttributeError:
days = td.days
seconds = td.seconds
microseconds = td.microseconds
return int(round((days * 86400) + seconds + (microseconds / 1000000))) | 0.00105 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.