text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _index_loopbacks(self):
"""Finds all loopbacks and stores them in :attr:`loopbacks`"""
self.loopbacks = {}
try:
result = _util.check_output_(['losetup', '-a'])
for line in result.splitlines():
m = re.match(r'(.+): (.+) \((.+)\).*', line)
if m:
self.loopbacks[m.group(1)] = m.group(3)
except Exception:
pass | 0.00464 |
def condition_input(args, kwargs):
'''
Return a single arg structure for the publisher to safely use
'''
ret = []
for arg in args:
if (six.PY3 and isinstance(arg, six.integer_types) and salt.utils.jid.is_jid(six.text_type(arg))) or \
(six.PY2 and isinstance(arg, long)): # pylint: disable=incompatible-py3-code,undefined-variable
ret.append(six.text_type(arg))
else:
ret.append(arg)
if isinstance(kwargs, dict) and kwargs:
kw_ = {'__kwarg__': True}
for key, val in six.iteritems(kwargs):
kw_[key] = val
return ret + [kw_]
return ret | 0.006202 |
def summary(self, sortOn=None):
"""
Summarize all the alignments for this title.
@param sortOn: A C{str} attribute to sort titles on. One of 'length',
'maxScore', 'medianScore', 'readCount', or 'title'.
@raise ValueError: If an unknown C{sortOn} value is given.
@return: A generator that yields C{dict} instances as produced by
C{TitleAlignments} (see class earlier in this file), sorted by
C{sortOn}.
"""
titles = self if sortOn is None else self.sortTitles(sortOn)
for title in titles:
yield self[title].summary() | 0.00318 |
def get_img_info(image):
"""Return the header and affine matrix from a Nifti file.
Parameters
----------
image: img-like object or str
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
Returns
-------
hdr, aff
"""
try:
img = check_img(image)
except Exception as exc:
raise Exception('Error reading file {0}.'.format(repr_imgs(image))) from exc
else:
return img.get_header(), img.get_affine() | 0.003846 |
def _process_state(cls, unprocessed, processed, state):
"""Preprocess a single state definition."""
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokens = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokens.extend(cls._process_state(unprocessed, processed,
str(tdef)))
continue
if isinstance(tdef, _inherit):
# should be processed already, but may not in the case of:
# 1. the state has no counterpart in any parent
# 2. the state includes more than one 'inherit'
continue
if isinstance(tdef, default):
new_state = cls._process_new_state(tdef.state, unprocessed, processed)
tokens.append((re.compile('').match, None, new_state))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = cls._process_regex(tdef[0], rflags, state)
except Exception as err:
raise ValueError("uncompilable regex %r in state %r of %r: %s" %
(tdef[0], state, cls, err))
token = cls._process_token(tdef[1])
if len(tdef) == 2:
new_state = None
else:
new_state = cls._process_new_state(tdef[2],
unprocessed, processed)
tokens.append((rex, token, new_state))
return tokens | 0.002112 |
def modify_agent_property(self, agent_id, key, value):
'''
modify_agent_property(self, agent_id, key, value)
Modifies a single single property of an agent. If the property does not exists then it is created as a custom property.
:Parameters:
* *agent_id* (`string`) -- Identifier of an existing agent
* *key* (`string`) -- Key of a property to change
* *value* (`string`) -- New Value of the property to change
:Example:
.. code-block:: python
opereto_client.modify_agent_property('my_agent_id', 'agent_new_property', 'agent value')
'''
return self._call_rest_api('post', '/agents/'+agent_id+'/properties', data={key: value}, error='Failed to modify agent [%s] property [%s]'%(agent_id,key)) | 0.008805 |
def next(self):
"""Goes one item ahead and returns it."""
rv = self.current
self.pos = (self.pos + 1) % len(self.items)
return rv | 0.012422 |
def _CallPlugin(self, cmd, input_json):
"""Calls the plugin and validates the response."""
# Calculate length of input
input_length = len(input_json)
length_bytes_le = struct.pack('<I', input_length)
request = length_bytes_le + input_json.encode()
# Call plugin
sign_process = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout = sign_process.communicate(request)[0]
exit_status = sign_process.wait()
# Parse and validate response size
response_len_le = stdout[:4]
response_len = struct.unpack('<I', response_len_le)[0]
response = stdout[4:]
if response_len != len(response):
raise errors.PluginError(
'Plugin response length {} does not match data {} (exit_status={})'
.format(response_len, len(response), exit_status))
# Ensure valid json
try:
json_response = json.loads(response.decode())
except ValueError:
raise errors.PluginError('Plugin returned invalid output (exit_status={})'
.format(exit_status))
# Ensure response type
if json_response.get('type') != 'sign_helper_reply':
raise errors.PluginError('Plugin returned invalid response type '
'(exit_status={})'
.format(exit_status))
# Parse response codes
result_code = json_response.get('code')
if result_code is None:
raise errors.PluginError('Plugin missing result code (exit_status={})'
.format(exit_status))
# Handle errors
if result_code == SK_SIGNING_PLUGIN_TOUCH_REQUIRED:
raise errors.U2FError(errors.U2FError.TIMEOUT)
elif result_code == SK_SIGNING_PLUGIN_WRONG_DATA:
raise errors.U2FError(errors.U2FError.DEVICE_INELIGIBLE)
elif result_code != SK_SIGNING_PLUGIN_NO_ERROR:
raise errors.PluginError(
'Plugin failed with error {} - {} (exit_status={})'
.format(result_code,
json_response.get('errorDetail'),
exit_status))
# Ensure response data is present
response_data = json_response.get('responseData')
if response_data is None:
raise errors.PluginErrors(
'Plugin returned output with missing responseData (exit_status={})'
.format(exit_status))
return response_data | 0.004517 |
def del_Unnamed(df):
"""
Deletes all the unnamed columns
:param df: pandas dataframe
"""
cols_del=[c for c in df.columns if 'Unnamed' in c]
return df.drop(cols_del,axis=1) | 0.015306 |
def _get_file(self, share_name, directory_name, file_name,
start_range=None, end_range=None, validate_content=False,
timeout=None, _context=None):
'''
Downloads a file's content, metadata, and properties. You can specify a
range if you don't need to download the file in its entirety. If no range
is specified, the full file will be downloaded.
See get_file_to_* for high level functions that handle the download
of large files with automatic chunking and progress notifications.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param int start_range:
Start of byte range to use for downloading a section of the file.
If no end_range is given, all bytes after the start_range will be downloaded.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for downloading a section of the file.
If end_range is given, start_range must be provided.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool validate_content:
When this is set to True and specified together with the Range header,
the service returns the MD5 hash for the range, as long as the range
is less than or equal to 4 MB in size.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: A File with content, properties, and metadata.
:rtype: :class:`~azure.storage.file.models.File`
'''
_validate_not_none('share_name', share_name)
_validate_not_none('file_name', file_name)
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations()
request.path = _get_path(share_name, directory_name, file_name)
request.query = { 'timeout': _int_to_str(timeout)}
_validate_and_format_range_headers(
request,
start_range,
end_range,
start_range_required=False,
end_range_required=False,
check_content_md5=validate_content)
return self._perform_request(request, _parse_file,
[file_name, validate_content],
operation_context=_context) | 0.005549 |
def _make_list(predictions, targets):
"""Helper: make predictions and targets lists, check they match on length."""
# Our models sometimes return predictions in lists, make it a list always.
# TODO(lukaszkaiser): make abstractions for nested structures and refactor.
if not isinstance(predictions, (list, tuple)):
if isinstance(targets, (list, tuple)):
raise ValueError("Targets are a list or tuple but predictions are not.")
predictions, targets = [predictions], [targets]
if len(predictions) != len(targets):
raise ValueError("Predictions and targets have different lengths.")
return list(predictions), list(targets) | 0.012327 |
def reservations(self):
"""
Access the reservations
:returns: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationList
:rtype: twilio.rest.taskrouter.v1.workspace.task.reservation.ReservationList
"""
if self._reservations is None:
self._reservations = ReservationList(
self._version,
workspace_sid=self._solution['workspace_sid'],
task_sid=self._solution['sid'],
)
return self._reservations | 0.007533 |
async def close(self):
"""
Cleans up after the connection to the SMTP server has been closed
(voluntarily or not).
"""
if self.writer is not None:
# Close the transport:
try:
self.writer.close()
except OSError as exc:
if exc.errno != errno.ENOTCONN:
raise
self.reset_state() | 0.004866 |
def idle_task(self):
'''handle missing parameters'''
self.pstate.vehicle_name = self.vehicle_name
self.pstate.fetch_check(self.master) | 0.012658 |
def currency_to_protocol(amount):
"""
Convert a string of 'currency units' to 'protocol units'. For instance
converts 19.1 bitcoin to 1910000000 satoshis.
Input is a float, output is an integer that is 1e8 times larger.
It is hard to do this conversion because multiplying
floats causes rounding nubers which will mess up the transactions creation
process.
examples:
19.1 -> 1910000000
0.001 -> 100000
"""
if type(amount) in [float, int]:
amount = "%.8f" % amount
return int(amount.replace(".", '')) | 0.00177 |
def generateLinearRDD(sc, nexamples, nfeatures, eps,
nParts=2, intercept=0.0):
"""
Generate an RDD of LabeledPoints.
"""
return callMLlibFunc(
"generateLinearRDDWrapper", sc, int(nexamples), int(nfeatures),
float(eps), int(nParts), float(intercept)) | 0.009063 |
def get_pages_for_display(self):
"""Return all pages needed for rendering all sub-levels for the current
menu"""
parent_page = self.parent_page_for_menu_items
pages = self.get_base_page_queryset().filter(
depth__gt=parent_page.depth,
depth__lte=parent_page.depth + self.max_levels,
path__startswith=parent_page.path,
)
# Return 'specific' page instances if required
if(self.use_specific == constants.USE_SPECIFIC_ALWAYS):
return pages.specific()
return pages | 0.003509 |
def load_selected_bot(self):
"""
Loads all the values belonging to the new selected agent into the bot_config_groupbox
:return:
"""
# prevent processing from itself (clearing the other one processes this)
if not self.sender().selectedItems():
return
blue = True if self.sender() is self.blue_listwidget else False
if blue: # deselect the other listbox
self.orange_listwidget.clearSelection()
else:
self.blue_listwidget.clearSelection()
item_name = self.sender().selectedItems()[0].text()
agent = self.bot_names_to_agent_dict[item_name]
if agent is None: # something went wrong if agent is None
return
self.current_bot = agent
self.bot_config_groupbox.setEnabled(True) # Make sure that you can edit the bot
# enable [-] for right listwidget
if blue:
self.blue_minus_toolbutton.setDisabled(False)
self.orange_minus_toolbutton.setDisabled(True)
else:
self.orange_minus_toolbutton.setDisabled(False)
self.blue_minus_toolbutton.setDisabled(True)
# load the bot parameters into the edit frame
agent_type = agent.get_participant_type()
known_types = ['human', 'psyonix', 'rlbot', 'party_member_bot']
assert agent_type in known_types, 'Bot has unknown type: %s' % agent_type
self.bot_type_combobox.setCurrentIndex(known_types.index(agent_type))
if blue:
self.blue_radiobutton.setChecked(True)
else:
self.orange_radiobutton.setChecked(True)
self.ign_lineedit.setText(agent.ingame_name)
loadout_index = index_of_config_path_in_combobox(
self.loadout_preset_combobox, agent.get_loadout_preset().config_path)
self.loadout_preset_combobox.setCurrentIndex(loadout_index or 0)
self.agent_preset_combobox.blockSignals(True)
self.agent_preset_combobox.setCurrentText(agent.get_agent_preset().get_name())
self.agent_preset_combobox.blockSignals(False)
self.bot_level_slider.setValue(int(agent.get_bot_skill() * 100)) | 0.003653 |
def pause(self):
"""Set the execution mode to paused
"""
if self.state_machine_manager.active_state_machine_id is None:
logger.info("'Pause' is not a valid action to initiate state machine execution.")
return
if self.state_machine_manager.get_active_state_machine() is not None:
self.state_machine_manager.get_active_state_machine().root_state.recursively_pause_states()
logger.debug("Pause execution ...")
self.set_execution_mode(StateMachineExecutionStatus.PAUSED) | 0.00726 |
def emit(self, event: str, *args, **kwargs) -> None:
""" Emit an event and run the subscribed functions.
:param event: Name of the event.
:type event: str
.. notes:
Passing in threads=True as a kwarg allows to run emitted events
as separate threads. This can significantly speed up code execution
depending on the code being executed.
"""
threads = kwargs.pop('threads', None)
if threads:
events = [
Thread(target=f, args=args, kwargs=kwargs) for f in
self._event_funcs(event)
]
for event in events:
event.start()
else:
for func in self._event_funcs(event):
func(*args, **kwargs) | 0.002509 |
def p_annotation_type_1(self, p):
"""annotation_type : ANNOTATION_TYPE LINE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.add_annotation_type(self.document, value)
except CardinalityError:
self.more_than_one_error('AnnotationType', p.lineno(1))
except SPDXValueError:
self.error = True
msg = ERROR_MESSAGES['ANNOTATION_TYPE_VALUE'].format(p.lineno(1))
self.logger.log(msg)
except OrderError:
self.order_error('AnnotationType', 'Annotator', p.lineno(1)) | 0.003012 |
def fidx(right, left, left_fk=None):
"""
Re-indexes a series or data frame (right) to align with
another (left) series or data frame via foreign key relationship.
The index of the right must be unique.
This is similar to misc.reindex, but allows for data frame
re-indexes and supports re-indexing data frames or
series with a multi-index.
Parameters:
-----------
right: pandas.DataFrame or pandas.Series
Series or data frame to re-index from.
left: pandas.Series or pandas.DataFrame
Series or data frame to re-index to.
If a series is provided, its values serve as the foreign keys.
If a data frame is provided, one or more columns may be used
as foreign keys, must specify the ``left_fk`` argument to
specify which column(s) will serve as keys.
left_fk: optional, str or list of str
Used when the left is a data frame, specifies the column(s) in
the left to serve as foreign keys. The specified columns' ordering
must match the order of the multi-index in the right.
Returns:
--------
pandas.Series or pandas.DataFrame with column(s) from
right aligned with the left.
"""
# ensure that we can align correctly
if not right.index.is_unique:
raise ValueError("The right's index must be unique!")
# simpler case:
# if the left (target) is a single series then just re-index to it
if isinstance(left_fk, str):
left = left[left_fk]
if isinstance(left, pd.Series):
a = right.reindex(left)
a.index = left.index
return a
# when reindexing using multiple columns (composite foreign key)
# i.e. the right has a multindex
# if a series for the right provided, convert to a data frame
if isinstance(right, pd.Series):
right = right.to_frame('right')
right_cols = 'right'
else:
right_cols = right.columns
# do the merge
return pd.merge(
left=left,
right=right,
left_on=left_fk,
right_index=True,
how='left'
)[right_cols] | 0.000473 |
def get_logger(name, level=None):
""" Return a setup logger for the given name
:param name: The name for the logger. It is advised to use __name__. The logger name will be prepended by \"jb.\".
:type name: str
:param level: the logging level, e.g. logging.DEBUG, logging.INFO etc
:type level: int
:returns: Logger
:rtype: logging.Logger
:raises: None
The logger default level is defined in the constants :data:`jukeboxcore.constants.DEFAULT_LOGGING_LEVEL` but can be overwritten by the environment variable \"JUKEBOX_LOG_LEVEL\"
"""
log = logging.getLogger("jb.%s" % name)
if level is not None:
log.setLevel(level)
return log | 0.00436 |
def set_gateway(self, gateway):
'''
:param crabpy.gateway.capakey.CapakeyGateway gateway: Gateway to use.
'''
self.gateway = gateway
self.sectie.set_gateway(gateway) | 0.009756 |
def lookup_tf(self, h):
'''Get stream IDs and term frequencies for a single hash.
This yields pairs of strings that can be retrieved using
:func:`streamcorpus_pipeline._kvlayer.get_kvlayer_stream_item`
and the corresponding term frequency.
..see:: :meth:`lookup`
'''
for ((_, k1, k2), v) in self.client.scan(HASH_TF_INDEX_TABLE,
((h,), (h,))):
yield (kvlayer_key_to_stream_id((k1, k2)), v) | 0.003922 |
def get_bytes(self):
"""set_client_DH_params#f5045f1f nonce:int128 server_nonce:int128 encrypted_data:bytes = Set_client_DH_params_answer"""
ret = struct.pack("<I16s16s", set_client_DH_params.constructor, self.nonce, self.server_nonce)
bytes_io = BytesIO()
bytes_io.write(ret)
serialize_string(bytes_io, self.encrypted_data)
return bytes_io.getvalue() | 0.00995 |
def map(self, msg):
"""
Apply key function to ``msg`` to obtain a key. Return the routing table entry.
"""
k = self.key_function(msg)
key = k[0] if isinstance(k, (tuple, list)) else k
return self.routing_table[key] | 0.01145 |
def connect_get_namespaced_pod_attach(self, name, namespace, **kwargs): # noqa: E501
"""connect_get_namespaced_pod_attach # noqa: E501
connect GET requests to attach of Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.connect_get_namespaced_pod_attach(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the PodAttachOptions (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str container: The container in which to execute the command. Defaults to only container if there is only one container in the pod.
:param bool stderr: Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.
:param bool stdin: Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.
:param bool stdout: Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.
:param bool tty: TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.connect_get_namespaced_pod_attach_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.connect_get_namespaced_pod_attach_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | 0.001053 |
def save(self, insert=False, changed=None, saved=None,
send_dispatch=True, version=False, version_fieldname=None,
version_exception=True):
"""
If insert=True, then it'll use insert() indead of update()
changed will be callback function, only when the non manytomany properties
are saved, the signature is:
def changed(obj, created, old_data, diff_data):
if flag is true, then it means the record is changed
you can change new_data, and the new_data will be saved to database
version = Optimistic Concurrency Control
version_fieldname default is 'version'
if check_many, it'll auto check if manytomany value need to save,
only available in UPDATE
"""
_saved = False
created = False
version_fieldname = version_fieldname or 'version'
d = self._get_data()
#fix when d is empty, orm will not insert record bug 2013/04/07
if d or not self._saved or insert:
_id = d.get(self._primary_field, None)
if insert or not self._saved or not _id:
created = True
old = d.copy()
if get_dispatch_send() and self.__dispatch_enabled__:
dispatch.call(self.__class__, 'pre_save', instance=self, created=True, data=d, old_data=self._old_values, signal=self.tablename)
#process auto_now_add
_manytomany = {}
for k, v in self.properties.items():
if v.property_type == 'compound':
continue
if not isinstance(v, ManyToMany):
if isinstance(v, DateTimeProperty) and v.auto_now_add and k not in d:
d[k] = v.now()
elif (not k in d) and v.auto_add:
d[k] = v.default_value()
else:
if k in d:
_manytomany[k] = d.pop(k)
if d:
if callable(changed):
changed(self, created, self._old_values, d)
old.update(d)
obj = do_(self.table.insert().values(**d), self.get_session())
_saved = True
if obj.inserted_primary_key and self._primary_field:
setattr(self, self._primary_field, obj.inserted_primary_key[0])
if _manytomany:
for k, v in _manytomany.items():
if v:
_saved = getattr(self, k).update(v) or _saved
else:
_id = d.pop(self._primary_field)
if d:
old = d.copy()
if get_dispatch_send() and self.__dispatch_enabled__:
dispatch.call(self.__class__, 'pre_save', instance=self, created=False, data=d, old_data=self._old_values, signal=self.tablename)
#process auto_now
_manytomany = {}
for k, v in self.properties.items():
if v.property_type == 'compound' or k == self._primary_field:
continue
if not isinstance(v, ManyToMany):
if isinstance(v, DateTimeProperty) and v.auto_now and k not in d:
d[k] = v.now()
elif (not k in d) and v.auto:
d[k] = v.default_value()
else:
if k in d:
_manytomany[k] = d.pop(k)
if d:
_cond = self.table.c[self._primary_field] == self._key
if version:
version_field = self.table.c.get(version_fieldname)
if version_field is None:
raise KindError("version_fieldname %s is not existed in Model %s" % (version_fieldname, self.__class__.__name__))
_version_value = getattr(self, version_fieldname, 0)
# setattr(self, version_fieldname, _version_value+1)
d[version_fieldname] = _version_value+1
_cond = (version_field == _version_value) & _cond
if callable(changed):
changed(self, created, self._old_values, d)
old.update(d)
result = do_(self.table.update(_cond).values(**d), self.get_session())
_saved = True
if version:
if result.rowcount == 0:
_saved = False
if version_exception:
raise SaveError("The record {0}:{1} has been saved by others, current version is {2}".format(
self.tablename, self._key, _version_value))
else:
setattr(self, version_fieldname, d[version_fieldname])
elif result.rowcount == 0:
_saved = False
# raise NotFound("The record can't be found!", self.tablename, self._key)
if _manytomany:
for k, v in _manytomany.items():
if v is not None:
_saved = getattr(self, k).update(v) or _saved
# else:
# #check if the field is primary_key, true will raise Exception
# #2015/11/20 limodou
# raise ValueError("You can't only change primary key '{}'.".format(self._primary_field))
if _saved:
for k, v in d.items():
x = self.properties[k].get_value_for_datastore(self)
if self.field_str(x) != self.field_str(v):
setattr(self, k, v)
if send_dispatch and get_dispatch_send() and self.__dispatch_enabled__:
dispatch.call(self.__class__, 'post_save', instance=self, created=created, data=old, old_data=self._old_values, signal=self.tablename)
self.set_saved()
if callable(saved):
saved(self, created, self._old_values, old)
return _saved | 0.006208 |
def _yield_subpatches(patch, splits, name='split'):
"""
Iterator for subtables defined by a splits string
Parameters
----------
patch : obj
Patch object containing data to subset
splits : str
Specifies how a column of a dataset should be split. See Notes.
Yields
------
tuple
First element is subset string, second is subtable dataframe
Notes
-----
{0}
"""
if splits:
subset_list = _parse_splits(patch, splits)
for subset in subset_list:
logging.info('Analyzing subset %s: %s' % (name, subset))
subpatch = copy.copy(patch)
subpatch.table = _subset_table(patch.table, subset)
subpatch.meta, subpatch.incremented = _subset_meta(patch.meta,
subset, incremented=True)
yield subset, subpatch
else:
yield '', patch | 0.002151 |
def solve(self, A, F, N, b):
"""
Solve linear system ``Ax = b`` using numeric factorization ``N`` and symbolic factorization ``F``.
Store the solution in ``b``.
Parameters
----------
A
Sparse matrix
F
Symbolic factorization
N
Numeric factorization
b
RHS of the equation
Returns
-------
None
"""
if self.sparselib == 'umfpack':
umfpack.solve(A, N, b)
elif self.sparselib == 'klu':
klu.solve(A, F, N, b) | 0.005034 |
def processCommit(self, commit: Commit, sender: str) -> None:
"""
Validate and process the COMMIT specified.
If validation is successful, return the message to the node.
:param commit: an incoming COMMIT message
:param sender: name of the node that sent the COMMIT
"""
self.logger.debug("{} received COMMIT{} from {}".format(
self, (commit.viewNo, commit.ppSeqNo), sender))
if self.validateCommit(commit, sender):
self.stats.inc(TPCStat.CommitRcvd)
self.addToCommits(commit, sender)
self.logger.debug("{} processed incoming COMMIT{}".format(
self, (commit.viewNo, commit.ppSeqNo))) | 0.002813 |
def rc(self):
"""Return the reverse complemented motif.
Returns
-------
m : Motif instance
New Motif instance with the reverse complement of the input motif.
"""
m = Motif()
m.pfm = [row[::-1] for row in self.pfm[::-1]]
m.pwm = [row[::-1] for row in self.pwm[::-1]]
m.id = self.id + "_revcomp"
return m | 0.005063 |
def perform_request(self, request_type, *args, **kwargs):
"""
Create and send a request.
`request_type` is the request type (string). This is used to look up a
plugin, whose request class is instantiated and passed the remaining
arguments passed to this function.
"""
# create a request
req = api_request(request_type, *args, **kwargs)
# send it
res = self.send_request(req)
return res | 0.004202 |
def surface_brightness(self, x, y, kwargs_list, k=None):
"""
:param x: coordinate in units of arcsec relative to the center of the image
:type x: set or single 1d numpy array
"""
x = np.array(x, dtype=float)
y = np.array(y, dtype=float)
flux = np.zeros_like(x)
for i, func in enumerate(self.func_list):
if k is None or k == i:
out = np.array(func.function(x, y, **kwargs_list[i]), dtype=float)
flux += out
return flux | 0.007505 |
def ts(self, data, lon_cyclic=True, lon_str=LON_STR, lat_str=LAT_STR,
land_mask_str=LAND_MASK_STR, sfc_area_str=SFC_AREA_STR):
"""Create yearly time-series of region-averaged data.
Parameters
----------
data : xarray.DataArray
The array to create the regional timeseries of
lon_cyclic : { None, True, False }, optional (default True)
Whether or not the longitudes of ``data`` span the whole globe,
meaning that they should be wrapped around as necessary to cover
the Region's full width.
lat_str, lon_str, land_mask_str, sfc_area_str : str, optional
The name of the latitude, longitude, land mask, and surface area
coordinates, respectively, in ``data``. Defaults are the
corresponding values in ``aospy.internal_names``.
Returns
-------
xarray.DataArray
The timeseries of values averaged within the region and within each
year, one value per year.
"""
data_masked = self.mask_var(data, lon_cyclic=lon_cyclic,
lon_str=lon_str, lat_str=lat_str)
sfc_area = data[sfc_area_str]
sfc_area_masked = self.mask_var(sfc_area, lon_cyclic=lon_cyclic,
lon_str=lon_str, lat_str=lat_str)
land_mask = _get_land_mask(data, self.do_land_mask,
land_mask_str=land_mask_str)
weights = sfc_area_masked * land_mask
# Mask weights where data values are initially invalid in addition
# to applying the region mask.
weights = weights.where(np.isfinite(data))
weights_reg_sum = weights.sum(lon_str).sum(lat_str)
data_reg_sum = (data_masked * sfc_area_masked *
land_mask).sum(lat_str).sum(lon_str)
return data_reg_sum / weights_reg_sum | 0.001552 |
def waiters(self, path=None):
"""Iterate over all waiters.
This method will return the waiters in unspecified order
including the future or callback object that will be invoked
and a list containing the keys/value that are being matched.
Yields:
list, future or callable
"""
context = self._waiters
if path is None:
path = []
for key in path:
context = context[key]
if self._LEAF in context:
for future in context[self._LEAF]:
yield (path, future)
for key in context:
if key is self._LEAF:
continue
yield from self.waiters(path=path + [key]) | 0.002699 |
def before_request(self) -> Optional[Response]:
"""Determine if a user is allowed to view this route."""
auth = request.authorization
if not auth or not self._check_auth(auth.username, auth.password):
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'}
)
session['logged_in'] = auth.username
# pylint wants this return statement
return None | 0.003442 |
def compile_schema(self, schema):
""" Compile the current schema into a callable validator
:return: Callable validator
:rtype: callable
:raises SchemaError: Schema compilation error
"""
compiler = self.get_schema_compiler(schema)
if compiler is None:
raise SchemaError(_(u'Unsupported schema data type {!r}').format(type(schema).__name__))
return compiler(schema) | 0.006787 |
def seektime(self, disk):
"""
Gives seek latency on disk which is a very good indication to the `type` of the disk.
it's a very good way to verify if the underlying disk type is SSD or HDD
:param disk: disk path or name (/dev/sda, or sda)
:return: a dict as follows {'device': '<device-path>', 'elapsed': <seek-time in us', 'type': '<SSD or HDD>'}
"""
args = {
'disk': disk,
}
self._seektime_chk.check(args)
return self._client.json("disk.seektime", args) | 0.009091 |
def regexNamer(regex, usePageUrl=False):
"""Get name from regular expression."""
@classmethod
def _namer(cls, imageUrl, pageUrl):
"""Get first regular expression group."""
url = pageUrl if usePageUrl else imageUrl
mo = regex.search(url)
if mo:
return mo.group(1)
return _namer | 0.002976 |
def run(self, galaxy_data, results=None, mask=None):
"""
Run this phase.
Parameters
----------
galaxy_data
mask: Mask
The default masks passed in by the pipeline
results: autofit.tools.pipeline.ResultsCollection
An object describing the results of the last phase or None if no phase has been executed
Returns
-------
result: AbstractPhase.Result
A result object comprising the best fit model and other hyper.
"""
analysis = self.make_analysis(galaxy_data=galaxy_data, results=results, mask=mask)
result = self.run_analysis(analysis)
return self.make_result(result, analysis) | 0.005517 |
def h6_mahe(simulated_array, observed_array, k=1, replace_nan=None, replace_inf=None,
remove_neg=False,
remove_zero=False):
"""Compute the H6 mean absolute error.
.. image:: /pictures/H6.png
.. image:: /pictures/AHE.png
**Range:**
**Notes:**
Parameters
----------
simulated_array: one dimensional ndarray
An array of simulated data from the time series.
observed_array: one dimensional ndarray
An array of observed data from the time series.
k: int or float
If given, sets the value of k. If None, k=1.
replace_nan: float, optional
If given, indicates which value to replace NaN values with in the two arrays. If None, when
a NaN value is found at the i-th position in the observed OR simulated array, the i-th value
of the observed and simulated array are removed before the computation.
replace_inf: float, optional
If given, indicates which value to replace Inf values with in the two arrays. If None, when
an inf value is found at the i-th position in the observed OR simulated array, the i-th
value of the observed and simulated array are removed before the computation.
remove_neg: boolean, optional
If True, when a negative value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
remove_zero: boolean, optional
If true, when a zero value is found at the i-th position in the observed OR simulated
array, the i-th value of the observed AND simulated array are removed before the
computation.
Returns
-------
float
The mean absolute H6 error.
Examples
--------
>>> import HydroErr as he
>>> import numpy as np
>>> sim = np.array([5, 7, 9, 2, 4.5, 6.7])
>>> obs = np.array([4.7, 6, 10, 2.5, 4, 7])
>>> he.h6_mahe(sim, obs)
0.11743831388794852
References
----------
- Tornquist, L., Vartia, P., Vartia, Y.O., 1985. How Should Relative Changes be Measured?
The American Statistician 43-46.
"""
# Treats data
simulated_array, observed_array = treat_values(
simulated_array,
observed_array,
replace_nan=replace_nan,
replace_inf=replace_inf,
remove_neg=remove_neg,
remove_zero=remove_zero
)
top = (simulated_array / observed_array - 1)
bot = np.power(0.5 * (1 + np.power(simulated_array / observed_array, k)), 1 / k)
h = top / bot
return np.mean(np.abs(h)) | 0.00495 |
def default_rotations(*qubits):
"""
Generates the Quil programs for the tomographic pre- and post-rotations of any number of qubits.
:param list qubits: A list of qubits to perform tomography on.
"""
for gates in cartesian_product(TOMOGRAPHY_GATES.keys(), repeat=len(qubits)):
tomography_program = Program()
for qubit, gate in izip(qubits, gates):
tomography_program.inst(gate(qubit))
yield tomography_program | 0.006438 |
def _get_first_aggregate_text(node_list):
'''
Extract text from the first occurred DOM aggregate.
'''
if not node_list:
return ''
out = []
for node in node_list[0].childNodes:
if node.nodeType == dom.Document.TEXT_NODE:
out.append(node.nodeValue)
return '\n'.join(out) | 0.003077 |
def encrypt(self, msg):
"""encrypts a message"""
iv = self.random_bytes(AES.block_size)
ctr = Counter.new(AES.block_size * 8, initial_value=self.bin2long(iv))
cipher = AES.AESCipher(self._cipherkey, AES.MODE_CTR, counter=ctr)
cipher_text = cipher.encrypt(msg)
intermediate = iv + cipher_text
signature = self.sign(intermediate)
return signature + intermediate | 0.004728 |
def pull_image(self, image, progress_callback=None):
"""
Pull image from docker repository
:params image: Image name
:params progress_callback: A function that receive a log message about image download progress
"""
try:
yield from self.query("GET", "images/{}/json".format(image))
return # We already have the image skip the download
except DockerHttp404Error:
pass
if progress_callback:
progress_callback("Pull {} from docker hub".format(image))
response = yield from self.http_query("POST", "images/create", params={"fromImage": image}, timeout=None)
# The pull api will stream status via an HTTP JSON stream
content = ""
while True:
try:
chunk = yield from response.content.read(1024)
except aiohttp.ServerDisconnectedError:
break
if not chunk:
break
content += chunk.decode("utf-8")
try:
while True:
content = content.lstrip(" \r\n\t")
answer, index = json.JSONDecoder().raw_decode(content)
if "progress" in answer and progress_callback:
progress_callback("Pulling image {}:{}: {}".format(image, answer["id"], answer["progress"]))
content = content[index:]
except ValueError: # Partial JSON
pass
response.close()
if progress_callback:
progress_callback("Success pulling image {}".format(image)) | 0.003062 |
def rlecode_hqx(s):
"""
Run length encoding for binhex4.
The CPython implementation does not do run length encoding
of \x90 characters. This implementation does.
"""
if not s:
return ''
result = []
prev = s[0]
count = 1
# Add a dummy character to get the loop to go one extra round.
# The dummy must be different from the last character of s.
# In the same step we remove the first character, which has
# already been stored in prev.
if s[-1] == '!':
s = s[1:] + '?'
else:
s = s[1:] + '!'
for c in s:
if c == prev and count < 255:
count += 1
else:
if count == 1:
if prev != '\x90':
result.append(prev)
else:
result += ['\x90', '\x00']
elif count < 4:
if prev != '\x90':
result += [prev] * count
else:
result += ['\x90', '\x00'] * count
else:
if prev != '\x90':
result += [prev, '\x90', chr(count)]
else:
result += ['\x90', '\x00', '\x90', chr(count)]
count = 1
prev = c
return ''.join(result) | 0.000775 |
def multiget_cached(object_key, argument_key=None, default_result=None,
result_fields=None, join_table_name=None, coerce_args_to_strings=False):
"""
:param object_key: the names of the attributes on the result object that are meant to match the function parameters
:param argument_key: the function parameter names you wish to match with the `object_key`s.
By default, this will be all of your wrapped function's arguments, in order.
So, you'd really only use this when you want to ignore a given function argument.
:param default_result: The result to put into the cache if nothing is matched.
:param result_fields: The attribute on your result object you wish to return the value of.
By default, the whole object is returned.
:param join_table_name: A temporary shortcut until we allow dot.path traversal for object_key.
Will call getattr(getattr(result, join_table_name), object_key)
:param coerce_args_to_strings: force coerce all arguments to the inner function to strings.
Useful for SQL where mixes of ints and strings in `WHERE x IN (list)` clauses causes poor performance.
:return: A wrapper that allows you to queue many O(1) calls and flush the queue all at once,
rather than executing the inner function body N times.
"""
def create_wrapper(inner_f):
return MultigetCacheWrapper(
inner_f, object_key, argument_key, default_result, result_fields,
join_table_name, coerce_args_to_strings=coerce_args_to_strings
)
return create_wrapper | 0.007624 |
def serialize_rules(self, rules):
"""Creates a payload for the redis server."""
# TODO(mdietz): If/when we support other rule types, this comment
# will have to be revised.
# Action and direction are static, for now. The implementation may
# support 'deny' and 'egress' respectively in the future. We allow
# the direction to be set to something else, technically, but current
# plugin level call actually raises. It's supported here for unit
# test purposes at this time
serialized = []
for rule in rules:
direction = rule["direction"]
source = ''
destination = ''
if rule.get("remote_ip_prefix"):
prefix = rule["remote_ip_prefix"]
if direction == "ingress":
source = self._convert_remote_network(prefix)
else:
if (Capabilities.EGRESS not in
CONF.QUARK.environment_capabilities):
raise q_exc.EgressSecurityGroupRulesNotEnabled()
else:
destination = self._convert_remote_network(prefix)
optional_fields = {}
# NOTE(mdietz): this will expand as we add more protocols
protocol_map = protocols.PROTOCOL_MAP[rule["ethertype"]]
if rule["protocol"] == protocol_map["icmp"]:
optional_fields["icmp type"] = rule["port_range_min"]
optional_fields["icmp code"] = rule["port_range_max"]
else:
optional_fields["port start"] = rule["port_range_min"]
optional_fields["port end"] = rule["port_range_max"]
payload = {"ethertype": rule["ethertype"],
"protocol": rule["protocol"],
"source network": source,
"destination network": destination,
"action": "allow",
"direction": direction}
payload.update(optional_fields)
serialized.append(payload)
return serialized | 0.000928 |
def is_periodic_image(self, other, tolerance=1e-8, check_lattice=True):
"""
Returns True if sites are periodic images of each other.
Args:
other (PeriodicSite): Other site
tolerance (float): Tolerance to compare fractional coordinates
check_lattice (bool): Whether to check if the two sites have the
same lattice.
Returns:
bool: True if sites are periodic images of each other.
"""
if check_lattice and self.lattice != other.lattice:
return False
if self.species != other.species:
return False
frac_diff = pbc_diff(self.frac_coords, other.frac_coords)
return np.allclose(frac_diff, [0, 0, 0], atol=tolerance) | 0.002594 |
def apply(self, parent_environ=None):
"""Apply the context to the current python session.
Note that this updates os.environ and possibly sys.path, if
`parent_environ` is not provided.
Args:
parent_environ: Environment to interpret the context within,
defaults to os.environ if None.
"""
interpreter = Python(target_environ=os.environ)
executor = self._create_executor(interpreter, parent_environ)
self._execute(executor)
interpreter.apply_environ() | 0.003636 |
def connect(self, protocol=None):
"""! @brief Initialize DAP IO pins for JTAG or SWD"""
self._link.enter_debug(STLink.Protocol.SWD)
self._is_connected = True | 0.01105 |
def expand_user(path):
"""Expand '~'-style usernames in strings.
This is similar to :func:`os.path.expanduser`, but it computes and returns
extra information that will be useful if the input was being used in
computing completions, and you wish to return the completions with the
original '~' instead of its expanded value.
Parameters
----------
path : str
String to be expanded. If no ~ is present, the output is the same as the
input.
Returns
-------
newpath : str
Result of ~ expansion in the input path.
tilde_expand : bool
Whether any expansion was performed or not.
tilde_val : str
The value that ~ was replaced with.
"""
# Default values
tilde_expand = False
tilde_val = ''
newpath = path
if path.startswith('~'):
tilde_expand = True
rest = len(path)-1
newpath = os.path.expanduser(path)
if rest:
tilde_val = newpath[:-rest]
else:
tilde_val = newpath
return newpath, tilde_expand, tilde_val | 0.000929 |
def build_url_name(cls, name, name_prefix=None):
"""
Given a ``name`` & an optional ``name_prefix``, this generates a name
for a URL.
:param name: The name for the URL (ex. 'detail')
:type name: string
:param name_prefix: (Optional) A prefix for the URL's name (for
resolving). The default is ``None``, which will autocreate a prefix
based on the class name. Ex: ``BlogPostResource`` ->
``api_blog_post_list``
:type name_prefix: string
:returns: The final name
:rtype: string
"""
if name_prefix is None:
name_prefix = 'api_{}'.format(
cls.__name__.replace('Resource', '').lower()
)
name_prefix = name_prefix.rstrip('_')
return '_'.join([name_prefix, name]) | 0.002381 |
def url(self):
"""The URL as a string of the resource."""
if not self._url[2].endswith('/'):
self._url[2] += '/'
return RestURL.url.__get__(self) | 0.01105 |
def as_cql_query(self, formatted=False):
"""
Returns a CQL query that can be used to recreate this type.
If `formatted` is set to :const:`True`, extra whitespace will
be added to make the query more readable.
"""
ret = "CREATE TYPE %s.%s (%s" % (
protect_name(self.keyspace),
protect_name(self.name),
"\n" if formatted else "")
if formatted:
field_join = ",\n"
padding = " "
else:
field_join = ", "
padding = ""
fields = []
for field_name, field_type in zip(self.field_names, self.field_types):
fields.append("%s %s" % (protect_name(field_name), field_type))
ret += field_join.join("%s%s" % (padding, field) for field in fields)
ret += "\n)" if formatted else ")"
return ret | 0.002273 |
def delete(self, request, bot_id, id, format=None):
"""
Delete existing Telegram Bot
---
responseMessages:
- code: 401
message: Not authenticated
"""
return super(TelegramBotDetail, self).delete(request, bot_id, id, format) | 0.010101 |
def data_available(dataset_name=None):
"""Check if the data set is available on the local machine already."""
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
dr = data_resources[dataset_name]
zip_urls = (dr['files'], )
if 'save_names' in dr: zip_urls += (dr['save_names'], )
else: zip_urls += ([],)
for file_list, save_list in zip_longest(*zip_urls, fillvalue=[]):
for f, s in zip_longest(file_list, save_list, fillvalue=None):
if s is not None: f=s # If there is a save_name given, use that one
if not os.path.exists(os.path.join(data_path, dataset_name, f)):
return False
return True | 0.008032 |
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m:
return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
return _parse_date_w3dtf(w3dtfdate) | 0.006006 |
def on_assert(self, node): # ('test', 'msg')
"""Assert statement."""
if not self.run(node.test):
self.raise_exception(node, exc=AssertionError, msg=node.msg)
return True | 0.009615 |
def _resizeCurrentColumnToContents(self, new_index, old_index):
"""Resize the current column to its contents."""
if new_index.column() not in self._autosized_cols:
# Ensure the requested column is fully into view after resizing
self._resizeVisibleColumnsToContents()
self.dataTable.scrollTo(new_index) | 0.005587 |
def vm_deploy(name, kwargs=None, call=None):
'''
Initiates the instance of the given VM on the target host.
.. versionadded:: 2016.3.0
name
The name of the VM to deploy.
host_id
The ID of the target host where the VM will be deployed. Can be used instead
of ``host_name``.
host_name
The name of the target host where the VM will be deployed. Can be used instead
of ``host_id``.
capacity_maintained
True to enforce the Host capacity is not over-committed. This parameter is only
acknowledged for users in the ``oneadmin`` group. Host capacity will be always
enforced for regular users.
datastore_id
The ID of the target system data-store where the VM will be deployed. Optional
and can be used instead of ``datastore_name``. If neither ``datastore_id`` nor
``datastore_name`` are set, OpenNebula will choose the data-store.
datastore_name
The name of the target system data-store where the VM will be deployed. Optional,
and can be used instead of ``datastore_id``. If neither ``datastore_id`` nor
``datastore_name`` are set, OpenNebula will choose the data-store.
CLI Example:
.. code-block:: bash
salt-cloud -a vm_deploy my-vm host_id=0
salt-cloud -a vm_deploy my-vm host_id=1 capacity_maintained=False
salt-cloud -a vm_deploy my-vm host_name=host01 datastore_id=1
salt-cloud -a vm_deploy my-vm host_name=host01 datastore_name=default
'''
if call != 'action':
raise SaltCloudSystemExit(
'The vm_deploy action must be called with -a or --action.'
)
if kwargs is None:
kwargs = {}
host_id = kwargs.get('host_id', None)
host_name = kwargs.get('host_name', None)
capacity_maintained = kwargs.get('capacity_maintained', True)
datastore_id = kwargs.get('datastore_id', None)
datastore_name = kwargs.get('datastore_name', None)
if host_id:
if host_name:
log.warning(
'Both the \'host_id\' and \'host_name\' arguments were provided. '
'\'host_id\' will take precedence.'
)
elif host_name:
host_id = get_host_id(kwargs={'name': host_name})
else:
raise SaltCloudSystemExit(
'The vm_deploy function requires a \'host_id\' or a \'host_name\' '
'to be provided.'
)
if datastore_id:
if datastore_name:
log.warning(
'Both the \'datastore_id\' and \'datastore_name\' arguments were provided. '
'\'datastore_id\' will take precedence.'
)
elif datastore_name:
datastore_id = get_datastore_id(kwargs={'name': datastore_name})
else:
datastore_id = '-1'
server, user, password = _get_xml_rpc()
auth = ':'.join([user, password])
vm_id = get_vm_id(kwargs={'name': name})
response = server.one.vm.deploy(auth,
int(vm_id),
int(host_id),
salt.utils.data.is_true(capacity_maintained),
int(datastore_id))
data = {
'action': 'vm.deploy',
'deployed': response[0],
'vm_id': response[1],
'error_code': response[2],
}
return data | 0.00354 |
def unregister_language(self, name):
"""
Unregisters language with given name from the :obj:`LanguagesModel.languages` class property.
:param name: Language to unregister.
:type name: unicode
:return: Method success.
:rtype: bool
"""
if not self.get_language(name):
raise foundations.exceptions.ProgrammingError("{0} | '{1}' language isn't registered!".format(
self.__class__.__name__, name))
LOGGER.debug("> Unregistering '{0}' language.".format(name))
for i, language in enumerate(self.__languages):
if not language.name == name:
continue
del (self.__languages[i])
self.sort_languages()
return True | 0.005148 |
def add_user_jobs(session, job_ids):
"""
Add a list of jobs to the currently authenticated user
"""
jobs_data = {
'jobs[]': job_ids
}
response = make_post_request(session, 'self/jobs', json_data=jobs_data)
json_data = response.json()
if response.status_code == 200:
return json_data['status']
else:
raise UserJobsNotAddedException(
message=json_data['message'],
error_code=json_data['error_code'],
request_id=json_data['request_id']) | 0.00189 |
def copy(source, dest):
"""
use the vospace service to get a file.
@param source:
@param dest:
@return:
"""
logger.info("copying {} -> {}".format(source, dest))
return client.copy(source, dest) | 0.004405 |
def from_jwt(cls, jwt, key=''):
"""
Decode a JWT string into a Jwt object
:param str jwt: JWT string
:param Optional[str] key: key used to verify JWT signature, if not provided then validation
is skipped.
:raises JwtDecodeError if decoding JWT fails for any reason.
:return: A DecodedJwt object containing the jwt information.
"""
verify = True if key else False
try:
payload = jwt_lib.decode(bytes(jwt), key, options={
'verify_signature': verify,
'verify_exp': True,
'verify_nbf': True,
})
headers = jwt_lib.get_unverified_header(jwt)
except Exception as e:
raise JwtDecodeError(getattr(e, 'message', str(e)))
return cls._from_jwt(headers, payload, key) | 0.003432 |
def update_group(self, group, process_id, wit_ref_name, page_id, section_id, group_id):
"""UpdateGroup.
[Preview API] Updates a group in the work item form.
:param :class:`<Group> <azure.devops.v5_0.work_item_tracking_process.models.Group>` group: The updated group.
:param str process_id: The ID of the process.
:param str wit_ref_name: The reference name of the work item type.
:param str page_id: The ID of the page the group is in.
:param str section_id: The ID of the section the group is in.
:param str group_id: The ID of the group.
:rtype: :class:`<Group> <azure.devops.v5_0.work_item_tracking_process.models.Group>`
"""
route_values = {}
if process_id is not None:
route_values['processId'] = self._serialize.url('process_id', process_id, 'str')
if wit_ref_name is not None:
route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str')
if page_id is not None:
route_values['pageId'] = self._serialize.url('page_id', page_id, 'str')
if section_id is not None:
route_values['sectionId'] = self._serialize.url('section_id', section_id, 'str')
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'str')
content = self._serialize.body(group, 'Group')
response = self._send(http_method='PATCH',
location_id='766e44e1-36a8-41d7-9050-c343ff02f7a5',
version='5.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('Group', response) | 0.00625 |
def fit(self, X, y, **kwargs):
"""
Fits the estimator to calculate feature correlation to
dependent variable.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
kwargs : dict
Keyword arguments passed to the fit method of the estimator.
Returns
-------
self : visualizer
The fit method must always return self to support pipelines.
"""
self._create_labels_for_features(X)
self._select_features_to_plot(X)
# Calculate Features correlation with target variable
if self.method == "pearson":
self.scores_ = np.array(
[pearsonr(x, y, **kwargs)[0] for x in np.asarray(X).T]
)
else:
self.scores_ = np.array(
self.correlation_methods[self.method](X, y, **kwargs)
)
# If feature indices are given, plot only the given features
if self.feature_index:
self.scores_ = self.scores_[self.feature_index]
self.features_ = self.features_[self.feature_index]
# Sort features by correlation
if self.sort:
sort_idx = np.argsort(self.scores_)
self.scores_ = self.scores_[sort_idx]
self.features_ = self.features_[sort_idx]
self.draw()
return self | 0.00196 |
def source_path(self):
"""The name in a form suitable for use in a filesystem.
Excludes the revision
"""
# Need to do this to ensure the function produces the
# bundle path when called from subclasses
names = [k for k, _, _ in self._name_parts]
parts = [self.source]
if self.bspace:
parts.append(self.bspace)
parts.append(
self._path_join(names=names, excludes=['source', 'version', 'bspace'], sep=self.NAME_PART_SEP))
return os.path.join(*parts) | 0.005396 |
def get_json_encoders_for_type(self, type_to_encode: type) -> Optional[Iterable[JSONEncoder]]:
"""
Gets the registered JSON encoder for the given type.
:param type_to_encode: the type of object that is to be encoded
:return: the encoder for the given object else `None` if unknown
"""
if type_to_encode not in self._json_encoders:
return None
return self._json_encoders[type_to_encode] | 0.006623 |
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in list(self._store.items())
) | 0.009091 |
def get_task(self, task=None):
"""
Returns a (task, description) tuple for a given task
"""
# Iterate over the grindstone tasks
for t in self.grindstone['tasks']:
# if they key matches the task
if key_of(t) == task:
# Return this task
return t
# Otherwise return nothing
return None | 0.005076 |
def export_html(html, filename, image_tag = None, inline = True):
""" Export the contents of the ConsoleWidget as HTML.
Parameters:
-----------
html : str,
A utf-8 encoded Python string containing the Qt HTML to export.
filename : str
The file to be saved.
image_tag : callable, optional (default None)
Used to convert images. See ``default_image_tag()`` for information.
inline : bool, optional [default True]
If True, include images as inline PNGs. Otherwise, include them as
links to external PNG files, mimicking web browsers' "Web Page,
Complete" behavior.
"""
if image_tag is None:
image_tag = default_image_tag
else:
image_tag = ensure_utf8(image_tag)
if inline:
path = None
else:
root,ext = os.path.splitext(filename)
path = root + "_files"
if os.path.isfile(path):
raise OSError("%s exists, but is not a directory." % path)
with open(filename, 'w') as f:
html = fix_html(html)
f.write(IMG_RE.sub(lambda x: image_tag(x, path = path, format = "png"),
html)) | 0.008518 |
def admin_tools_render_dashboard_css(
context, location='index', dashboard=None):
"""
Template tag that renders the dashboard css files, it takes two optional
arguments:
``location``
The location of the dashboard, it can be 'index' (for the admin index
dashboard) or 'app_index' (for the app index dashboard), the default
value is 'index'.
``dashboard``
An instance of ``Dashboard``, if not given, the dashboard is retrieved
with the ``get_index_dashboard`` or ``get_app_index_dashboard``
functions, depending on the ``location`` argument.
"""
if dashboard is None:
dashboard = get_dashboard(context, location)
context.update({
'template': 'admin_tools/dashboard/css.html',
'css_files': dashboard.Media.css,
})
return context | 0.001179 |
def _selectLines(self, startBlockNumber, endBlockNumber):
"""Select whole lines
"""
startBlock = self.document().findBlockByNumber(startBlockNumber)
endBlock = self.document().findBlockByNumber(endBlockNumber)
cursor = QTextCursor(startBlock)
cursor.setPosition(endBlock.position(), QTextCursor.KeepAnchor)
cursor.movePosition(QTextCursor.EndOfBlock, QTextCursor.KeepAnchor)
self.setTextCursor(cursor) | 0.004301 |
def request(self, endpoint, data=None, json=None, filename=None, save_to=None):
"""
Perform a REST API request to the backend H2O server.
:param endpoint: (str) The endpoint's URL, for example "GET /4/schemas/KeyV4"
:param data: data payload for POST (and sometimes GET) requests. This should be a dictionary of simple
key/value pairs (values can also be arrays), which will be sent over in x-www-form-encoded format.
:param json: also data payload, but it will be sent as a JSON body. Cannot be used together with `data`.
:param filename: file to upload to the server. Cannot be used with `data` or `json`.
:param save_to: if provided, will write the response to that file (additionally, the response will be
streamed, so large files can be downloaded seamlessly). This parameter can be either a file name,
or a folder name. If the folder doesn't exist, it will be created automatically.
:returns: an H2OResponse object representing the server's response (unless ``save_to`` parameter is
provided, in which case the output file's name will be returned).
:raises H2OConnectionError: if the H2O server cannot be reached (or connection is not initialized)
:raises H2OServerError: if there was a server error (http 500), or server returned malformed JSON
:raises H2OResponseError: if the server returned an H2OErrorV3 response (e.g. if the parameters were invalid)
"""
if self._stage == 0: raise H2OConnectionError("Connection not initialized; run .connect() first.")
if self._stage == -1: raise H2OConnectionError("Connection was closed, and can no longer be used.")
# Prepare URL
assert_is_type(endpoint, str)
match = assert_matches(str(endpoint), r"^(GET|POST|PUT|DELETE|PATCH|HEAD) (/.*)$")
method = match.group(1)
urltail = match.group(2)
url = self._base_url + urltail
# Prepare data
if filename is not None:
assert_is_type(filename, str)
assert_is_type(json, None, "Argument `json` should be None when `filename` is used.")
assert_is_type(data, None, "Argument `data` should be None when `filename` is used.")
assert_satisfies(method, method == "POST",
"File uploads can only be done via POST method, got %s" % method)
elif data is not None:
assert_is_type(data, dict)
assert_is_type(json, None, "Argument `json` should be None when `data` is used.")
elif json is not None:
assert_is_type(json, dict)
data = self._prepare_data_payload(data)
files = self._prepare_file_payload(filename)
params = None
if method == "GET" and data:
params = data
data = None
stream = False
if save_to is not None:
assert_is_type(save_to, str)
stream = True
if self._cookies is not None and isinstance(self._cookies, list):
self._cookies = ";".join(self._cookies)
# Make the request
start_time = time.time()
try:
self._log_start_transaction(endpoint, data, json, files, params)
headers = {"User-Agent": "H2O Python client/" + sys.version.replace("\n", ""),
"X-Cluster": self._cluster_id,
"Cookie": self._cookies}
resp = requests.request(method=method, url=url, data=data, json=json, files=files, params=params,
headers=headers, timeout=self._timeout, stream=stream,
auth=self._auth, verify=self._verify_ssl_cert, proxies=self._proxies)
self._log_end_transaction(start_time, resp)
return self._process_response(resp, save_to)
except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError) as e:
if self._local_server and not self._local_server.is_running():
self._log_end_exception("Local server has died.")
raise H2OConnectionError("Local server has died unexpectedly. RIP.")
else:
self._log_end_exception(e)
raise H2OConnectionError("Unexpected HTTP error: %s" % e)
except requests.exceptions.Timeout as e:
self._log_end_exception(e)
elapsed_time = time.time() - start_time
raise H2OConnectionError("Timeout after %.3fs" % elapsed_time)
except H2OResponseError as e:
err = e.args[0]
err.endpoint = endpoint
err.payload = (data, json, files, params)
raise | 0.006134 |
def get_uuid(length=32, version=1):
"""
Returns a unique ID of a given length.
User `version=2` for cross-systems uniqueness.
"""
if version == 1:
return uuid.uuid1().hex[:length]
else:
return uuid.uuid4().hex[:length] | 0.003876 |
def hessian_component(self, index1, index2):
"""Compute the hessian of the energy for one atom pair"""
result = np.zeros((3, 3), float)
if index1 == index2:
for index3 in range(self.numc):
if self.scaling[index1, index3] > 0:
d_1 = 1/self.distances[index1, index3]
for (se, ve), (sg, vg), (sh, vh) in zip(
self.yield_pair_energies(index1, index3),
self.yield_pair_gradients(index1, index3),
self.yield_pair_hessians(index1, index3)
):
result += (
+sh*self.dirouters[index1, index3]*ve
+sg*(np.identity(3, float) - self.dirouters[index1, index3])*ve*d_1
+sg*np.outer(self.directions[index1, index3], vg)
+sg*np.outer(vg, self.directions[index1, index3])
+se*vh
)*self.scaling[index1, index3]
elif self.scaling[index1, index2] > 0:
d_1 = 1/self.distances[index1, index2]
for (se, ve), (sg, vg), (sh, vh) in zip(
self.yield_pair_energies(index1, index2),
self.yield_pair_gradients(index1, index2),
self.yield_pair_hessians(index1, index2)
):
result -= (
+sh*self.dirouters[index1, index2]*ve
+sg*(np.identity(3, float) - self.dirouters[index1, index2])*ve*d_1
+sg*np.outer(self.directions[index1, index2], vg)
+sg*np.outer(vg, self.directions[index1, index2])
+se*vh
)*self.scaling[index1, index2]
return result | 0.006615 |
def query(self, query):
'''Returns an iterable of objects matching criteria expressed in `query`.
LoggingDatastore logs the access.
'''
self.logger.info('%s: query %s' % (self, query))
return super(LoggingDatastore, self).query(query) | 0.003891 |
def setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items",
predictionCol="prediction", numPartitions=None):
"""
setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items", \
predictionCol="prediction", numPartitions=None)
"""
kwargs = self._input_kwargs
return self._set(**kwargs) | 0.007916 |
def listContents(self):
""" Return list of volumes or diffs in this Store's selected directory. """
items = list(self.extraKeys.items())
items.sort(key=lambda t: t[1])
(count, size) = (0, 0)
for (diff, path) in items:
if path.startswith("/"):
continue
yield str(diff)
count += 1
size += diff.size
yield "TOTAL: %d diffs %s" % (count, humanize(size)) | 0.006466 |
def _sub_ms_char(self, match):
"""Changes a MS smart quote character to an XML or HTML
entity, or an ASCII character."""
orig = match.group(1)
if self.smart_quotes_to == 'ascii':
sub = self.MS_CHARS_TO_ASCII.get(orig).encode()
else:
sub = self.MS_CHARS.get(orig)
if type(sub) == tuple:
if self.smart_quotes_to == 'xml':
sub = '&#x'.encode() + sub[1].encode() + ';'.encode()
else:
sub = '&'.encode() + sub[0].encode() + ';'.encode()
else:
sub = sub.encode()
return sub | 0.003072 |
def displayNewKey(key):
"""Use ``gnupg.GPG.list_keys()`` to display details of the new key."""
if key.keyring:
gpg.keyring = key.keyring
if key.secring:
gpg.secring = key.secring
# Using '--fingerprint' twice will display subkey fingerprints too:
gpg.options = ['--fingerprint', '--fingerprint']
keylist = gpg.list_keys(secret=True)
# `result` is a `gnupg._parsers.ListKeys`, which is list-like, so iterate
# over all the keys and display their info:
for gpgkey in keylist:
for k, v in gpgkey.items():
log.info("%s: %s" % (k.capitalize(), v))
return keylist | 0.00157 |
def print_page(text):
"""Format the text and prints it on stdout.
Text is formatted by adding a ASCII frame around it and coloring the text.
Colors can be added to text using color tags, for example:
My [FG_BLUE]blue[NORMAL] text.
My [BG_BLUE]blue background[NORMAL] text.
"""
color_re = re.compile(r"\[(?P<color>[FB]G_[A-Z_]+|NORMAL)\]")
width = max([len(strip_colors(x)) for x in text.splitlines()])
print("\n" + hbar(width))
for line in text.splitlines():
if line == "[HBAR]":
print(hbar(width))
continue
tail = width - len(strip_colors(line))
sys.stdout.write("| ")
previous = 0
end = len(line)
for match in color_re.finditer(line):
sys.stdout.write(line[previous : match.start()])
set_color(match.groupdict()["color"])
previous = match.end()
sys.stdout.write(line[previous:end])
sys.stdout.write(" " * tail + " |\n")
print(hbar(width)) | 0.001916 |
def schedule(cls, mapreduce_spec):
"""Schedule finalize task.
Args:
mapreduce_spec: mapreduce specification as MapreduceSpec.
"""
task_name = mapreduce_spec.mapreduce_id + "-finalize"
finalize_task = taskqueue.Task(
name=task_name,
url=(mapreduce_spec.params["base_path"] + "/finalizejob_callback/" +
mapreduce_spec.mapreduce_id),
params={"mapreduce_id": mapreduce_spec.mapreduce_id},
headers=util._get_task_headers(mapreduce_spec.mapreduce_id))
queue_name = util.get_queue_name(None)
if not _run_task_hook(mapreduce_spec.get_hooks(),
"enqueue_controller_task",
finalize_task,
queue_name):
try:
finalize_task.add(queue_name)
except (taskqueue.TombstonedTaskError,
taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r already exists. %s: %s",
task_name, e.__class__, e) | 0.002994 |
def send(self, message):
"""
Sends message to *mod-host*.
.. note::
Uses :class:`.ProtocolParser` for a high-level management.
As example, view :class:`.Host`
:param string message: Message that will be sent for *mod-host*
"""
print(message.encode('utf-8'))
self.client.send(message.encode('utf-8'))
received = self.client.recv(1024)
return received | 0.004454 |
def location(ip=None, key=None, field=None):
''' Get geolocation data for a given IP address
If field is specified, get specific field as text
Else get complete location data as JSON
'''
if field and (field not in field_list):
return 'Invalid field'
if field:
if ip:
url = 'https://ipapi.co/{}/{}/'.format(ip, field)
else:
url = 'https://ipapi.co/{}/'.format(field)
else:
if ip:
url = 'https://ipapi.co/{}/json/'.format(ip)
else:
url = 'https://ipapi.co/json/'
if key or API_KEY:
url = '{}?key={}'.format(url, (key or API_KEY))
response = get(url, headers=headers)
if field:
return response.text
else:
return response.json() | 0.003774 |
def absolute(self):
"""
The FQDN as a string in absolute form
"""
if not self.is_valid:
raise ValueError('invalid FQDN `{0}`'.format(self.fqdn))
if self.is_valid_absolute:
return self.fqdn
return '{0}.'.format(self.fqdn) | 0.006803 |
def cmd_guess_labels(*args):
"""
Arguments: <document id> [-- [--apply]]
Guess the labels that should be set on the document.
Example: paperwork-shell guess_labels -- 20161207_1144_00_8 --apply
Possible JSON replies:
--
{
"status": "error", "exception": "yyy",
"reason": "xxxx", "args": "(xxxx, )"
}
--
{
"status": "ok",
"docid": "xxxx",
"current_labels": ["label_a", "label_b"],
"guessed_labels": ["label_b", "label_c"],
"applied": "yes",
}
"""
args = list(args)
apply_labels = False
if "--apply" in args:
apply_labels = True
args.remove("--apply")
docid = args[0]
dsearch = get_docsearch()
doc = dsearch.get(docid)
if doc is None:
raise Exception(
"Document {} not found. Cannot guess labels".format(
docid
)
)
verbose("Current labels: {}".format(
", ".join([label.name for label in doc.labels])
))
guessed = dsearch.guess_labels(doc)
verbose("Guessed labels: {}".format(
", ".join([label.name for label in guessed])
))
r = {
'docid': doc.docid,
'current_labels': [label.name for label in doc.labels],
'guessed_labels': [label.name for label in guessed],
'applied': "yes" if apply_labels else "no",
}
changed = False
if apply_labels:
for label in guessed:
if label not in doc.labels:
dsearch.add_label(doc, label, update_index=False)
changed = True
for label in doc.labels:
if label not in guessed:
dsearch.remove_label(doc, label, update_index=False)
changed = True
if changed:
index_updater = dsearch.get_index_updater(optimize=False)
index_updater.upd_doc(doc)
index_updater.commit()
verbose("Document {} updated".format(docid))
elif apply_labels:
verbose("Document {} unchanged".format(docid))
reply(r) | 0.000473 |
def send_request(self, job_request, message_expiry_in_seconds=None):
"""
Send a JobRequest, and return a request ID.
The context and control_extra arguments may be used to include extra values in the
context and control headers, respectively.
:param job_request: The job request object to send
:type job_request: JobRequest
:param message_expiry_in_seconds: How soon the message will expire if not received by a server (defaults to
sixty seconds unless the settings are otherwise)
:type message_expiry_in_seconds: int
:return: The request ID
:rtype: int
:raise: ConnectionError, InvalidField, MessageSendError, MessageSendTimeout, MessageTooLarge
"""
request_id = self.request_counter
self.request_counter += 1
meta = {}
wrapper = self._make_middleware_stack(
[m.request for m in self.middleware],
self._base_send_request,
)
try:
with self.metrics.timer('client.send.including_middleware', resolution=TimerResolution.MICROSECONDS):
wrapper(request_id, meta, job_request, message_expiry_in_seconds)
return request_id
finally:
self.metrics.commit() | 0.006061 |
def record_set(session_factory, bucket, key_prefix, start_date, specify_hour=False):
"""Retrieve all s3 records for the given policy output url
From the given start date.
"""
s3 = local_session(session_factory).client('s3')
records = []
key_count = 0
date = start_date.strftime('%Y/%m/%d')
if specify_hour:
date += "/{}".format(start_date.hour)
else:
date += "/00"
marker = "{}/{}/resources.json.gz".format(key_prefix.strip("/"), date)
p = s3.get_paginator('list_objects_v2').paginate(
Bucket=bucket,
Prefix=key_prefix.strip('/') + '/',
StartAfter=marker,
)
with ThreadPoolExecutor(max_workers=20) as w:
for key_set in p:
if 'Contents' not in key_set:
continue
keys = [k for k in key_set['Contents']
if k['Key'].endswith('resources.json.gz')]
key_count += len(keys)
futures = map(lambda k: w.submit(
get_records, bucket, k, session_factory), keys)
for f in as_completed(futures):
records.extend(f.result())
log.info("Fetched %d records across %d files" % (
len(records), key_count))
return records | 0.0016 |
def load_class_by_name(name: str):
"""Given a dotted path, returns the class"""
mod_path, _, cls_name = name.rpartition('.')
mod = importlib.import_module(mod_path)
cls = getattr(mod, cls_name)
return cls | 0.004464 |
def do(self, command, files=None, use_long_polling=False, request_timeout=None, **query):
"""
Send a request to the api.
If the bot is set to return the json objects, it will look like this:
```json
{
"ok": bool,
"result": {...},
# optionally present:
"description": "human-readable description of the result",
"error_code": int
}
```
:param command: The Url command parameter
:type command: str
:param request_timeout: When the request should time out. Default: `None`
:type request_timeout: int
:param files: if it needs to send files.
:param use_long_polling: if it should use long polling. Default: `False`
(see http://docs.python-requests.org/en/latest/api/#requests.Response.iter_content)
:type use_long_polling: bool
:param query: all the other `**kwargs` will get json encoded.
:return: The json response from the server, or, if `self.return_python_objects` is `True`, a parsed return type.
:rtype: DictObject.DictObject | pytgbot.api_types.receivable.Receivable
"""
import requests
url, params = self._prepare_request(command, query)
r = requests.post(url, params=params, files=files, stream=use_long_polling,
verify=True, # No self signed certificates. Telegram should be trustworthy anyway...
timeout=request_timeout)
return self._postprocess_request(r) | 0.006258 |
def pull_blob(self, digest, size=False, chunk_size=None):
"""
Download a blob from the registry given the hash of its content.
:param digest: Hash of the blob's content (prefixed by ``sha256:``).
:type digest: str
:param size: Whether to return the size of the blob too.
:type size: bool
:param chunk_size: Number of bytes to download at a time. Defaults to 8192.
:type chunk_size: int
:rtype: iterator
:returns: If ``size`` is falsey, a byte string iterator over the blob's content. If ``size`` is truthy, a tuple containing the iterator and the blob's size.
"""
if chunk_size is None:
chunk_size = 8192
r = self._request('get', 'blobs/' + digest, stream=True)
class Chunks(object):
# pylint: disable=too-few-public-methods
def __iter__(self):
sha256 = hashlib.sha256()
for chunk in r.iter_content(chunk_size):
sha256.update(chunk)
yield chunk
dgst = 'sha256:' + sha256.hexdigest()
if dgst != digest:
raise exceptions.DXFDigestMismatchError(dgst, digest)
return (Chunks(), long(r.headers['content-length'])) if size else Chunks() | 0.004566 |
def format_sql_workload_type_update_settings(result):
'''
Formats the SqlWorkloadTypeUpdateSettings object removing arguments that are empty
'''
from collections import OrderedDict
# Only display parameters that have content
order_dict = OrderedDict()
if result.sql_workload_type is not None:
order_dict['sqlWorkloadType'] = result.sql_workload_type
return order_dict | 0.004902 |
def convert_to_wav(files):
'''Converts files to a format that pocketsphinx can deal wtih (16khz mono 16bit wav)'''
converted = []
for f in files:
new_name = f + '.temp.wav'
print(new_name)
if (os.path.exists(f + '.transcription.txt') is False) and (os.path.exists(new_name) is False):
subprocess.call(['ffmpeg', '-y', '-i', f, '-acodec', 'pcm_s16le', '-ac', '1', '-ar', '16000', new_name])
converted.append(new_name)
return converted | 0.008114 |
def dict_find_keys(dict_, val_list):
r"""
Args:
dict_ (dict):
val_list (list):
Returns:
dict: found_dict
CommandLine:
python -m utool.util_dict --test-dict_find_keys
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'default': 1, 'hierarchical': 5, 'linear': 0, 'kdtree': 1,
... 'composite': 3, 'autotuned': 255, 'saved': 254, 'kmeans': 2,
... 'lsh': 6, 'kdtree_single': 4}
>>> val_list = [1]
>>> found_dict = dict_find_keys(dict_, val_list)
>>> result = ut.repr2(ut.map_vals(sorted, found_dict))
>>> print(result)
{1: ['default', 'kdtree']}
"""
found_dict = {
search_val: [key for key, val in six.iteritems(dict_)
if val == search_val]
for search_val in val_list
}
return found_dict | 0.001047 |
def last(self, n=1):
"""
Get the last element of an array. Passing **n** will return the last N
values in the array.
The **guard** check allows it to work with `_.map`.
"""
res = self.obj[-n:]
if len(res) is 1:
res = res[0]
return self._wrap(res) | 0.006211 |
def _feature_file(self, parallel = None, index = None):
"""Returns the name of an intermediate file for storing features."""
if index is None:
index = 0 if parallel is None or "SGE_TASK_ID" not in os.environ else int(os.environ["SGE_TASK_ID"])
return os.path.join(self.feature_directory, "Features_%02d.hdf5" % index) | 0.020896 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.