code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def mod_repo(repo, **kwargs):
'''
Modify one or more values for a repo. If the repo does not exist, it will
be created, so long as the following values are specified:
repo or alias
alias by which Zypper refers to the repo
url, mirrorlist or baseurl
the URL for Zypper to reference
enabled
Enable or disable (True or False) repository,
but do not remove if disabled.
refresh
Enable or disable (True or False) auto-refresh of the repository.
cache
Enable or disable (True or False) RPM files caching.
gpgcheck
Enable or disable (True or False) GPG check for this repository.
gpgautoimport : False
If set to True, automatically trust and import public GPG key for
the repository.
root
operate on a different root directory.
Key/Value pairs may also be removed from a repo's configuration by setting
a key to a blank value. Bear in mind that a name cannot be deleted, and a
URL can only be deleted if a ``mirrorlist`` is specified (or vice versa).
CLI Examples:
.. code-block:: bash
salt '*' pkg.mod_repo alias alias=new_alias
salt '*' pkg.mod_repo alias url= mirrorlist=http://host.com/
'''
root = kwargs.get('root') or None
repos_cfg = _get_configured_repos(root=root)
added = False
# An attempt to add new one?
if repo not in repos_cfg.sections():
url = kwargs.get('url', kwargs.get('mirrorlist', kwargs.get('baseurl')))
if not url:
raise CommandExecutionError(
'Repository \'{0}\' not found, and neither \'baseurl\' nor '
'\'mirrorlist\' was specified'.format(repo)
)
if not _urlparse(url).scheme:
raise CommandExecutionError(
'Repository \'{0}\' not found and URL for baseurl/mirrorlist '
'is malformed'.format(repo)
)
# Is there already such repo under different alias?
for alias in repos_cfg.sections():
repo_meta = _get_repo_info(alias, repos_cfg=repos_cfg, root=root)
# Complete user URL, in case it is not
new_url = _urlparse(url)
if not new_url.path:
new_url = _urlparse.ParseResult(scheme=new_url.scheme, # pylint: disable=E1123
netloc=new_url.netloc,
path='/',
params=new_url.params,
query=new_url.query,
fragment=new_url.fragment)
base_url = _urlparse(repo_meta['baseurl'])
if new_url == base_url:
raise CommandExecutionError(
'Repository \'{0}\' already exists as \'{1}\'.'.format(
repo,
alias
)
)
# Add new repo
__zypper__(root=root).xml.call('ar', url, repo)
# Verify the repository has been added
repos_cfg = _get_configured_repos(root=root)
if repo not in repos_cfg.sections():
raise CommandExecutionError(
'Failed add new repository \'{0}\' for unspecified reason. '
'Please check zypper logs.'.format(repo))
added = True
repo_info = _get_repo_info(repo, root=root)
if (
not added and 'baseurl' in kwargs and
not (kwargs['baseurl'] == repo_info['baseurl'])
):
# Note: zypper does not support changing the baseurl
# we need to remove the repository and add it again with the new baseurl
repo_info.update(kwargs)
repo_info.setdefault('cache', False)
del_repo(repo, root=root)
return mod_repo(repo, root=root, **repo_info)
# Modify added or existing repo according to the options
cmd_opt = []
global_cmd_opt = []
call_refresh = False
if 'enabled' in kwargs:
cmd_opt.append(kwargs['enabled'] and '--enable' or '--disable')
if 'refresh' in kwargs:
cmd_opt.append(kwargs['refresh'] and '--refresh' or '--no-refresh')
if 'cache' in kwargs:
cmd_opt.append(
kwargs['cache'] and '--keep-packages' or '--no-keep-packages'
)
if 'gpgcheck' in kwargs:
cmd_opt.append(kwargs['gpgcheck'] and '--gpgcheck' or '--no-gpgcheck')
if 'priority' in kwargs:
cmd_opt.append("--priority={0}".format(kwargs.get('priority', DEFAULT_PRIORITY)))
if 'humanname' in kwargs:
cmd_opt.append("--name='{0}'".format(kwargs.get('humanname')))
if kwargs.get('gpgautoimport') is True:
global_cmd_opt.append('--gpg-auto-import-keys')
call_refresh = True
if cmd_opt:
cmd_opt = global_cmd_opt + ['mr'] + cmd_opt + [repo]
__zypper__(root=root).refreshable.xml.call(*cmd_opt)
comment = None
if call_refresh:
# when used with "zypper ar --refresh" or "zypper mr --refresh"
# --gpg-auto-import-keys is not doing anything
# so we need to specifically refresh here with --gpg-auto-import-keys
refresh_opts = global_cmd_opt + ['refresh'] + [repo]
__zypper__(root=root).xml.call(*refresh_opts)
elif not added and not cmd_opt:
comment = 'Specified arguments did not result in modification of repo'
repo = get_repo(repo, root=root)
if comment:
repo['comment'] = comment
return repo | Modify one or more values for a repo. If the repo does not exist, it will
be created, so long as the following values are specified:
repo or alias
alias by which Zypper refers to the repo
url, mirrorlist or baseurl
the URL for Zypper to reference
enabled
Enable or disable (True or False) repository,
but do not remove if disabled.
refresh
Enable or disable (True or False) auto-refresh of the repository.
cache
Enable or disable (True or False) RPM files caching.
gpgcheck
Enable or disable (True or False) GPG check for this repository.
gpgautoimport : False
If set to True, automatically trust and import public GPG key for
the repository.
root
operate on a different root directory.
Key/Value pairs may also be removed from a repo's configuration by setting
a key to a blank value. Bear in mind that a name cannot be deleted, and a
URL can only be deleted if a ``mirrorlist`` is specified (or vice versa).
CLI Examples:
.. code-block:: bash
salt '*' pkg.mod_repo alias alias=new_alias
salt '*' pkg.mod_repo alias url= mirrorlist=http://host.com/ |
def cleanup(arctic_lib, symbol, version_ids, versions_coll, shas_to_delete=None, pointers_cfgs=None):
"""
Helper method for cleaning up chunks from a version store
"""
pointers_cfgs = set(pointers_cfgs) if pointers_cfgs else set()
collection = arctic_lib.get_top_level_collection()
version_ids = list(version_ids)
# Iterate versions to check if they are created only with fw pointers, parent pointers (old), or mixed
# Keep in mind that the version is not yet inserted.
all_symbol_pointers_cfgs = _get_symbol_pointer_cfgs(symbol, versions_coll)
all_symbol_pointers_cfgs.update(pointers_cfgs)
# All the versions of the symbol have been created with old arctic or with disabled forward pointers.
# Preserves backwards compatibility and regression for old pointers implementation.
if all_symbol_pointers_cfgs == {FwPointersCfg.DISABLED} or not all_symbol_pointers_cfgs:
_cleanup_parent_pointers(collection, symbol, version_ids)
return
# All the versions of the symbol we wish to delete have been created with forward pointers
if FwPointersCfg.DISABLED not in all_symbol_pointers_cfgs:
_cleanup_fw_pointers(collection, symbol, version_ids, versions_coll,
shas_to_delete=shas_to_delete, do_clean=True)
return
# Reaching here means the symbol has versions with mixed forward pointers and legacy/parent pointer configurations
_cleanup_mixed(symbol, collection, version_ids, versions_coll) | Helper method for cleaning up chunks from a version store |
def setDataFrame(self, dataFrame, copyDataFrame=False, filePath=None):
"""
Setter function to _dataFrame. Holds all data.
Note:
It's not implemented with python properties to keep Qt conventions.
Raises:
TypeError: if dataFrame is not of type pandas.core.frame.DataFrame.
Args:
dataFrame (pandas.core.frame.DataFrame): assign dataFrame to _dataFrame. Holds all the data displayed.
copyDataFrame (bool, optional): create a copy of dataFrame or use it as is. defaults to False.
If you use it as is, you can change it from outside otherwise you have to reset the dataFrame
after external changes.
"""
if not isinstance(dataFrame, pandas.core.frame.DataFrame):
raise TypeError("not of type pandas.core.frame.DataFrame")
self.layoutAboutToBeChanged.emit()
if copyDataFrame:
self._dataFrame = dataFrame.copy()
else:
self._dataFrame = dataFrame
self._columnDtypeModel = ColumnDtypeModel(dataFrame)
self._columnDtypeModel.dtypeChanged.connect(self.propagateDtypeChanges)
self._columnDtypeModel.changeFailed.connect(
lambda columnName, index, dtype: self.changingDtypeFailed.emit(columnName, index, dtype)
)
if filePath is not None:
self._filePath = filePath
self.layoutChanged.emit()
self.dataChanged.emit()
self.dataFrameChanged.emit() | Setter function to _dataFrame. Holds all data.
Note:
It's not implemented with python properties to keep Qt conventions.
Raises:
TypeError: if dataFrame is not of type pandas.core.frame.DataFrame.
Args:
dataFrame (pandas.core.frame.DataFrame): assign dataFrame to _dataFrame. Holds all the data displayed.
copyDataFrame (bool, optional): create a copy of dataFrame or use it as is. defaults to False.
If you use it as is, you can change it from outside otherwise you have to reset the dataFrame
after external changes. |
def getAttributeNode(self, attr: str) -> Optional[Attr]:
"""Get attribute of this node as Attr format.
If this node does not have ``attr``, return None.
"""
return self.attributes.getNamedItem(attr) | Get attribute of this node as Attr format.
If this node does not have ``attr``, return None. |
def processes(self, plantuml_text):
"""Processes the plantuml text into the raw PNG image data.
:param str plantuml_text: The plantuml markup to render
:returns: the raw image data
"""
url = self.get_url(plantuml_text)
try:
response, content = self.http.request(url, **self.request_opts)
except self.HttpLib2Error as e:
raise PlantUMLConnectionError(e)
if response.status != 200:
raise PlantUMLHTTPError(response, content)
return content | Processes the plantuml text into the raw PNG image data.
:param str plantuml_text: The plantuml markup to render
:returns: the raw image data |
def add(name, function_name, cron):
""" Create an event """
lambder.add_event(name=name, function_name=function_name, cron=cron) | Create an event |
def verify(self, verifier, consumer_key=None, consumer_secret=None,
access_token=None, access_token_secret=None):
"""
After converting the token into verifier, call this to finalize the
authorization.
"""
# Stored values can be supplied to verify
self.consumer_key = consumer_key or self.consumer_key
self.consumer_secret = consumer_secret or self.consumer_secret
self.access_token = access_token or self.access_token
self.access_token_secret = access_token_secret or self.access_token_secret
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret,
verifier=verifier)
r = requests.post(self.url('ACCESS_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
else:
return (False, r.content)
self.finalize_oauth(creds.get('oauth_token')[0],
creds.get('oauth_token_secret')[0])
return (True, None) | After converting the token into verifier, call this to finalize the
authorization. |
def disvecinf(self, x, y, aq=None):
'''Can be called with only one x,y value'''
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((2, self.nparam, aq.naq))
if aq == self.aq:
qxqy = np.zeros((2, aq.naq))
qxqy[:, :] = self.bessel.disbeslsho(float(x), float(y), self.z1, self.z2, aq.lab,
0, aq.ilap, aq.naq)
rv[0] = self.aq.coef[self.layers] * qxqy[0]
rv[1] = self.aq.coef[self.layers] * qxqy[1]
return rv | Can be called with only one x,y value |
def desaturate(c, k=0):
"""
Utility function to desaturate a color c by an amount k.
"""
from matplotlib.colors import ColorConverter
c = ColorConverter().to_rgb(c)
intensity = 0.299 * c[0] + 0.587 * c[1] + 0.114 * c[2]
return [intensity * k + i * (1 - k) for i in c] | Utility function to desaturate a color c by an amount k. |
def add_marker_to_qtls(qtlfile, mapfile, outputfile='qtls_with_mk.csv'):
"""This function adds to a list of QTLs, the closest marker to the
QTL peak.
:arg qtlfile: a CSV list of all the QTLs found.
The file should be structured as follow::
Trait, Linkage group, position, other columns
The other columns will not matter as long as the first three
columns are as such.
:arg mapfile: a CSV representation of the map used for the QTL
mapping analysis.
The file should be structured as follow::
Marker, Linkage group, position
:kwarg outputfile: the name of the output file in which the list of
QTLs with their closest marker will be written.
"""
qtl_list = read_input_file(qtlfile, ',')
map_list = read_input_file(mapfile, ',')
if not qtl_list or not map_list: # pragma: no cover
return
qtl_list[0].append('Closest marker')
qtls = []
qtls.append(qtl_list[0])
for qtl in qtl_list[1:]:
qtl.append(add_marker_to_qtl(qtl, map_list))
qtls.append(qtl)
LOG.info('- %s QTLs processed in %s' % (len(qtls), qtlfile))
write_matrix(outputfile, qtls) | This function adds to a list of QTLs, the closest marker to the
QTL peak.
:arg qtlfile: a CSV list of all the QTLs found.
The file should be structured as follow::
Trait, Linkage group, position, other columns
The other columns will not matter as long as the first three
columns are as such.
:arg mapfile: a CSV representation of the map used for the QTL
mapping analysis.
The file should be structured as follow::
Marker, Linkage group, position
:kwarg outputfile: the name of the output file in which the list of
QTLs with their closest marker will be written. |
def initial(self, request, *args, **kwargs):
"""
Custom initial method:
* ensure node exists and store it in an instance attribute
* change queryset to return only images of current node
"""
super(NodeImageList, self).initial(request, *args, **kwargs)
# ensure node exists
self.node = get_queryset_or_404(
Node.objects.published().accessible_to(request.user),
{'slug': self.kwargs['slug']}
)
# check permissions on node (for image creation)
self.check_object_permissions(request, self.node) | Custom initial method:
* ensure node exists and store it in an instance attribute
* change queryset to return only images of current node |
def _setSampleSizeBytes(self):
"""
updates the current record of the packet size per sample and the relationship between this and the fifo reads.
"""
self.sampleSizeBytes = self.getPacketSize()
if self.sampleSizeBytes > 0:
self.maxBytesPerFifoRead = (32 // self.sampleSizeBytes) | updates the current record of the packet size per sample and the relationship between this and the fifo reads. |
def get_authorizations_for_resource_and_function(self, resource_id, function_id):
"""Gets a list of ``Authorizations`` associated with a given resource.
Authorizations related to the given resource, including those
related through an ``Agent,`` are returned. In plenary mode, the
returned list contains all known authorizations or an error
results. Otherwise, the returned list may contain only those
authorizations that are accessible through this session.
arg: resource_id (osid.id.Id): a resource ``Id``
arg: function_id (osid.id.Id): a function ``Id``
return: (osid.authorization.AuthorizationList) - the returned
``Authorization list``
raise: NullArgument - ``resource_id`` or ``function_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.relationship.RelationshipLookupSession.get_relationships_for_peers
# NOTE: This implementation currently ignores plenary and effective views
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
result = collection.find(
dict({'sourceId': str(resource_id),
'destinationId': str(function_id)},
**self._view_filter())).sort('_id', ASCENDING)
return objects.AuthorizationList(result, runtime=self._runtime) | Gets a list of ``Authorizations`` associated with a given resource.
Authorizations related to the given resource, including those
related through an ``Agent,`` are returned. In plenary mode, the
returned list contains all known authorizations or an error
results. Otherwise, the returned list may contain only those
authorizations that are accessible through this session.
arg: resource_id (osid.id.Id): a resource ``Id``
arg: function_id (osid.id.Id): a function ``Id``
return: (osid.authorization.AuthorizationList) - the returned
``Authorization list``
raise: NullArgument - ``resource_id`` or ``function_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def from_seqfeature(s, **kwargs):
"""
Converts a Bio.SeqFeature object to a gffutils.Feature object.
The GFF fields `source`, `score`, `seqid`, and `frame` are assumed to be
stored as qualifiers. Any other qualifiers will be assumed to be GFF
attributes.
"""
source = s.qualifiers.get('source', '.')[0]
score = s.qualifiers.get('score', '.')[0]
seqid = s.qualifiers.get('seqid', '.')[0]
frame = s.qualifiers.get('frame', '.')[0]
strand = _feature_strand[s.strand]
# BioPython parses 1-based GenBank positions into 0-based for use within
# Python. We need to convert back to 1-based GFF format here.
start = s.location.start.position + 1
stop = s.location.end.position
featuretype = s.type
id = s.id
attributes = dict(s.qualifiers)
attributes.pop('source', '.')
attributes.pop('score', '.')
attributes.pop('seqid', '.')
attributes.pop('frame', '.')
return Feature(seqid, source, featuretype, start, stop, score, strand,
frame, attributes, id=id, **kwargs) | Converts a Bio.SeqFeature object to a gffutils.Feature object.
The GFF fields `source`, `score`, `seqid`, and `frame` are assumed to be
stored as qualifiers. Any other qualifiers will be assumed to be GFF
attributes. |
def remove_straddlers(events, time, s_freq, toler=0.1):
"""Reject an event if it straddles a stitch, by comparing its
duration to its timespan.
Parameters
----------
events : ndarray (dtype='int')
N x M matrix with start, ..., end samples
time : ndarray (dtype='float')
vector with time points
s_freq : float
sampling frequency
toler : float, def=0.1
maximum tolerated difference between event duration and timespan
Returns
-------
ndarray (dtype='int')
N x M matrix with start , ..., end samples
"""
dur = (events[:, -1] - 1 - events[:, 0]) / s_freq
continuous = time[events[:, -1] - 1] - time[events[:, 0]] - dur < toler
return events[continuous, :] | Reject an event if it straddles a stitch, by comparing its
duration to its timespan.
Parameters
----------
events : ndarray (dtype='int')
N x M matrix with start, ..., end samples
time : ndarray (dtype='float')
vector with time points
s_freq : float
sampling frequency
toler : float, def=0.1
maximum tolerated difference between event duration and timespan
Returns
-------
ndarray (dtype='int')
N x M matrix with start , ..., end samples |
def ekopr(fname):
"""
Open an existing E-kernel file for reading.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekopr_c.html
:param fname: Name of EK file.
:type fname: str
:return: Handle attached to EK file.
:rtype: int
"""
fname = stypes.stringToCharP(fname)
handle = ctypes.c_int()
libspice.ekopr_c(fname, ctypes.byref(handle))
return handle.value | Open an existing E-kernel file for reading.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekopr_c.html
:param fname: Name of EK file.
:type fname: str
:return: Handle attached to EK file.
:rtype: int |
def build_machine(system_info,
core_resource=Cores,
sdram_resource=SDRAM,
sram_resource=SRAM):
"""Build a :py:class:`~rig.place_and_route.Machine` object from a
:py:class:`~rig.machine_control.machine_controller.SystemInfo` object.
.. note::
Links are tested by sending a 'PEEK' command down the link which
checks to see if the remote device responds correctly. If the link
is dead, no response will be received and the link will be assumed
dead. Since peripherals do not generally respond to 'PEEK'
commands, working links attached to peripherals will also be marked
as dead.
.. note::
The returned object does not report how much memory is free, nor
how many cores are idle but rather the total number of working cores
and the size of the heap. See :py:func:`.build_resource_constraints`
for a function which can generate a set of
:py:class:`~rig.place_and_route.constraints` which prevent the use of
already in-use cores and memory.
.. note::
This method replaces the deprecated
:py:meth:`rig.machine_control.MachineController.get_machine` method.
Its functionality may be recreated using
:py:meth:`rig.machine_control.MachineController.get_system_info` along
with this function like so::
>> sys_info = mc.get_system_info()
>> machine = build_machine(sys_info)
Parameters
----------
system_info : :py:class:`rig.machine_control.machine_controller.SystemInfo`
The resource availability information for a SpiNNaker machine,
typically produced by
:py:meth:`rig.machine_control.MachineController.get_system_info`.
core_resource : resource (default: :py:class:`rig.place_and_route.Cores`)
The resource type to use to represent the number of working cores on a
chip, including the monitor, those already in use and all idle cores.
sdram_resource : resource (default: :py:class:`rig.place_and_route.SDRAM`)
The resource type to use to represent SDRAM on a chip. This resource
will be set to the number of bytes in the largest free block in the
SDRAM heap. This gives a conservative estimate of the amount of free
SDRAM on the chip which will be an underestimate in the presence of
memory fragmentation.
sram_resource : resource (default: :py:class:`rig.place_and_route.SRAM`)
The resource type to use to represent SRAM (a.k.a. system RAM) on a
chip. This resource will be set to the number of bytes in the largest
free block in the SRAM heap. This gives a conservative estimate of the
amount of free SRAM on the chip which will be an underestimate in the
presence of memory fragmentation.
Returns
-------
:py:class:`rig.place_and_route.Machine`
A :py:class:`~rig.place_and_route.Machine` object representing the
resources available within a SpiNNaker machine in the form used by the
place-and-route infrastructure.
"""
try:
max_cores = max(c.num_cores for c in itervalues(system_info))
except ValueError:
max_cores = 0
try:
max_sdram = max(c.largest_free_sdram_block
for c in itervalues(system_info))
except ValueError:
max_sdram = 0
try:
max_sram = max(c.largest_free_sram_block
for c in itervalues(system_info))
except ValueError:
max_sram = 0
return Machine(width=system_info.width,
height=system_info.height,
chip_resources={
core_resource: max_cores,
sdram_resource: max_sdram,
sram_resource: max_sram,
},
chip_resource_exceptions={
chip: {
core_resource: info.num_cores,
sdram_resource: info.largest_free_sdram_block,
sram_resource: info.largest_free_sram_block,
}
for chip, info in iteritems(system_info)
if (info.num_cores != max_cores or
info.largest_free_sdram_block != max_sdram or
info.largest_free_sram_block != max_sram)
},
dead_chips=set(system_info.dead_chips()),
dead_links=set(system_info.dead_links())) | Build a :py:class:`~rig.place_and_route.Machine` object from a
:py:class:`~rig.machine_control.machine_controller.SystemInfo` object.
.. note::
Links are tested by sending a 'PEEK' command down the link which
checks to see if the remote device responds correctly. If the link
is dead, no response will be received and the link will be assumed
dead. Since peripherals do not generally respond to 'PEEK'
commands, working links attached to peripherals will also be marked
as dead.
.. note::
The returned object does not report how much memory is free, nor
how many cores are idle but rather the total number of working cores
and the size of the heap. See :py:func:`.build_resource_constraints`
for a function which can generate a set of
:py:class:`~rig.place_and_route.constraints` which prevent the use of
already in-use cores and memory.
.. note::
This method replaces the deprecated
:py:meth:`rig.machine_control.MachineController.get_machine` method.
Its functionality may be recreated using
:py:meth:`rig.machine_control.MachineController.get_system_info` along
with this function like so::
>> sys_info = mc.get_system_info()
>> machine = build_machine(sys_info)
Parameters
----------
system_info : :py:class:`rig.machine_control.machine_controller.SystemInfo`
The resource availability information for a SpiNNaker machine,
typically produced by
:py:meth:`rig.machine_control.MachineController.get_system_info`.
core_resource : resource (default: :py:class:`rig.place_and_route.Cores`)
The resource type to use to represent the number of working cores on a
chip, including the monitor, those already in use and all idle cores.
sdram_resource : resource (default: :py:class:`rig.place_and_route.SDRAM`)
The resource type to use to represent SDRAM on a chip. This resource
will be set to the number of bytes in the largest free block in the
SDRAM heap. This gives a conservative estimate of the amount of free
SDRAM on the chip which will be an underestimate in the presence of
memory fragmentation.
sram_resource : resource (default: :py:class:`rig.place_and_route.SRAM`)
The resource type to use to represent SRAM (a.k.a. system RAM) on a
chip. This resource will be set to the number of bytes in the largest
free block in the SRAM heap. This gives a conservative estimate of the
amount of free SRAM on the chip which will be an underestimate in the
presence of memory fragmentation.
Returns
-------
:py:class:`rig.place_and_route.Machine`
A :py:class:`~rig.place_and_route.Machine` object representing the
resources available within a SpiNNaker machine in the form used by the
place-and-route infrastructure. |
def calibrate(self, data, key):
"""Data calibration."""
# logger.debug('Calibration: %s' % key.calibration)
logger.warning('Calibration disabled!')
if key.calibration == 'brightness_temperature':
# self._ir_calibrate(data, key)
pass
elif key.calibration == 'reflectance':
# self._vis_calibrate(data, key)
pass
else:
pass
return data | Data calibration. |
def get_app_name(self):
"""
Return the appname of the APK
This name is read from the AndroidManifest.xml
using the application android:label.
If no label exists, the android:label of the main activity is used.
If there is also no main activity label, an empty string is returned.
:rtype: :class:`str`
"""
app_name = self.get_attribute_value('application', 'label')
if app_name is None:
activities = self.get_main_activities()
main_activity_name = None
if len(activities) > 0:
main_activity_name = activities.pop()
# FIXME: would need to use _format_value inside get_attribute_value for each returned name!
# For example, as the activity name might be foobar.foo.bar but inside the activity it is only .bar
app_name = self.get_attribute_value('activity', 'label', name=main_activity_name)
if app_name is None:
# No App name set
# TODO return packagename instead?
log.warning("It looks like that no app name is set for the main activity!")
return ""
if app_name.startswith("@"):
res_parser = self.get_android_resources()
if not res_parser:
# TODO: What should be the correct return value here?
return app_name
res_id, package = res_parser.parse_id(app_name)
# If the package name is the same as the APK package,
# we should be able to resolve the ID.
if package and package != self.get_package():
if package == 'android':
# TODO: we can not resolve this, as we lack framework-res.apk
# one exception would be when parsing framework-res.apk directly.
log.warning("Resource ID with android package name encountered! "
"Will not resolve, framework-res.apk would be required.")
return app_name
else:
# TODO should look this up, might be in the resources
log.warning("Resource ID with Package name '{}' encountered! Will not resolve".format(package))
return app_name
try:
app_name = res_parser.get_resolved_res_configs(
res_id,
ARSCResTableConfig.default_config())[0][1]
except Exception as e:
log.warning("Exception selecting app name: %s" % e)
return app_name | Return the appname of the APK
This name is read from the AndroidManifest.xml
using the application android:label.
If no label exists, the android:label of the main activity is used.
If there is also no main activity label, an empty string is returned.
:rtype: :class:`str` |
def gather_votes(self, candidates):
"""Gather votes for the given candidates from the agents in the
environment.
Returned votes are anonymous, i.e. they cannot be tracked to any
individual agent afterwards.
:returns:
A list of votes. Each vote is a list of ``(artifact, preference)``
-tuples sorted in a preference order of a single agent.
"""
votes = []
for a in self.get_agents(addr=False):
vote = a.vote(candidates)
votes.append(vote)
return votes | Gather votes for the given candidates from the agents in the
environment.
Returned votes are anonymous, i.e. they cannot be tracked to any
individual agent afterwards.
:returns:
A list of votes. Each vote is a list of ``(artifact, preference)``
-tuples sorted in a preference order of a single agent. |
def _do_link_patterns(self, text):
"""Caveat emptor: there isn't much guarding against link
patterns being formed inside other standard Markdown links, e.g.
inside a [link def][like this].
Dev Notes: *Could* consider prefixing regexes with a negative
lookbehind assertion to attempt to guard against this.
"""
link_from_hash = {}
for regex, repl in self.link_patterns:
replacements = []
for match in regex.finditer(text):
if hasattr(repl, "__call__"):
href = repl(match)
else:
href = match.expand(repl)
replacements.append((match.span(), href))
for (start, end), href in reversed(replacements):
escaped_href = (
href.replace('"', '"') # b/c of attr quote
# To avoid markdown <em> and <strong>:
.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
link = '<a href="%s">%s</a>' % (escaped_href, text[start:end])
hash = _hash_text(link)
link_from_hash[hash] = link
text = text[:start] + hash + text[end:]
for hash, link in list(link_from_hash.items()):
text = text.replace(hash, link)
return text | Caveat emptor: there isn't much guarding against link
patterns being formed inside other standard Markdown links, e.g.
inside a [link def][like this].
Dev Notes: *Could* consider prefixing regexes with a negative
lookbehind assertion to attempt to guard against this. |
def _prompt_choice(var_name, options):
'''
Prompt the user to choose between a list of options, index each one by adding an enumerator
based on https://github.com/audreyr/cookiecutter/blob/master/cookiecutter/prompt.py#L51
:param var_name: The question to ask the user
:type var_name: ``str``
:param options: A list of options
:type options: ``list`` of ``tupple``
:rtype: ``tuple``
:returns: The selected user
'''
choice_map = OrderedDict(
('{0}'.format(i), value) for i, value in enumerate(options, 1) if value[0] != 'test'
)
choices = choice_map.keys()
default = '1'
choice_lines = ['{0} - {1} - {2}'.format(c[0], c[1][0], c[1][1]) for c in choice_map.items()]
prompt = '\n'.join((
'Select {0}:'.format(var_name),
'\n'.join(choice_lines),
'Choose from {0}'.format(', '.join(choices))
))
user_choice = click.prompt(
prompt, type=click.Choice(choices), default=default
)
return choice_map[user_choice] | Prompt the user to choose between a list of options, index each one by adding an enumerator
based on https://github.com/audreyr/cookiecutter/blob/master/cookiecutter/prompt.py#L51
:param var_name: The question to ask the user
:type var_name: ``str``
:param options: A list of options
:type options: ``list`` of ``tupple``
:rtype: ``tuple``
:returns: The selected user |
def find_by_id(cls, id):
"""
Finds a single document by its ID. Throws a
NotFoundException if the document does not exist (the
assumption being if you've got an id you should be
pretty certain the thing exists)
"""
obj = cls.find_one(cls._id_spec(id))
if not obj:
raise NotFoundException(cls.collection, id)
return obj | Finds a single document by its ID. Throws a
NotFoundException if the document does not exist (the
assumption being if you've got an id you should be
pretty certain the thing exists) |
def get_commands(self, command_name, **kwargs):
"""get fe_command from command name and keyword arguments
wrapper for build_commands()
implements FEI4 specific behavior
"""
chip_id = kwargs.pop("ChipID", self.chip_id_bitarray)
commands = []
if command_name == "zeros":
bv = bitarray(endian='little')
if "length" in kwargs:
bv += bitarray(kwargs["length"], endian='little') # initialized from int, bits may be random
elif kwargs:
raise ValueError("Unknown parameter(s): %s" % ", ".join(kwargs.iterkeys()))
bv.setall(0) # all bits to zero
commands.append(bv)
elif command_name == "ones":
bv = bitarray(endian='little')
if "length" in kwargs:
bv += bitarray(kwargs["length"], endian='little') # initialized from int, bits may be random
elif kwargs:
raise ValueError("Unknown parameter(s): %s" % ", ".join(kwargs.iterkeys()))
bv.setall(1) # all bits to one
commands.append(bv)
elif command_name == "WrRegister":
register_addresses = self.get_global_register_attributes("addresses", **kwargs)
register_bitsets = self.get_global_register_bitsets(register_addresses)
commands.extend([self.build_command(command_name, Address=register_address, GlobalData=register_bitset, ChipID=chip_id, **kwargs) for register_address, register_bitset in zip(register_addresses, register_bitsets)])
elif command_name == "RdRegister":
register_addresses = self.get_global_register_attributes('addresses', **kwargs)
commands.extend([self.build_command(command_name, Address=register_address, ChipID=chip_id) for register_address in register_addresses])
elif command_name == "WrFrontEnd":
registers = ["S0", "S1", "SR_Clr", "CalEn", "DIGHITIN_SEL", "GateHitOr", "ReadErrorReq", "StopClkPulse", "SR_Clock", "Efuse_Sense", "HITLD_IN", "Colpr_Mode", "Colpr_Addr"]
if self.fei4a:
registers.append("ReadSkipped")
elif self.fei4b:
registers.append("SR_Read")
self.create_restore_point()
dcs = kwargs.pop("dcs", range(40)) # set the double columns to latch
# in case of empty list
if not dcs:
dcs = range(40)
joint_write = kwargs.pop("joint_write", False)
same_mask_for_all_dc = kwargs.pop("same_mask_for_all_dc", False)
register_objects = self.get_pixel_register_objects(do_sort=['pxstrobe'], **kwargs)
# prepare for writing pixel registers
if not self.broadcast:
self.set_global_register_value("Colpr_Mode", 0) # write only to the addressed double-column
self.set_global_register_value("Colpr_Addr", 40) # ivalid address, grounded
# Broadcasting ConfMode not necessary, writing registers is also possible in RunMode
# commands.extend(self.get_commands("ConfMode", ChipID=8)) # set all chips to conf mode to receive commands#
# set all other chips to invalid addresses, to make broadcasting of WrRegister command possible
commands.extend(self.get_commands("WrRegister", name=["Colpr_Mode", "Colpr_Addr"], ChipID=8)) # braodcast
self.set_global_register_value("S0", 0)
self.set_global_register_value("S1", 0)
self.set_global_register_value("SR_Clr", 0)
self.set_global_register_value("CalEn", 0)
self.set_global_register_value("DIGHITIN_SEL", 0)
self.set_global_register_value("GateHitOr", 0)
self.set_global_register_value("ReadErrorReq", 0)
self.set_global_register_value("StopClkPulse", 0)
self.set_global_register_value("SR_Clock", 0)
self.set_global_register_value("Efuse_Sense", 0)
self.set_global_register_value("HITLD_IN", 0)
self.set_global_register_value("Colpr_Mode", 3 if same_mask_for_all_dc else 0) # write only the addressed double-column
self.set_global_register_value("Colpr_Addr", 0)
if self.fei4a:
self.set_global_register_value("ReadSkipped", 0)
elif self.fei4b:
self.set_global_register_value("SR_Read", 0)
commands.extend(self.get_commands("WrRegister", name=registers))
if joint_write:
pxstrobes = 0
first_read = True
do_latch = False
for register_object in register_objects:
if register_object['bitlength'] != 1:
raise ValueError('Pixel register %s: joint write not supported for pixel DACs' % register_object['name'])
pxstrobe = register_object['pxstrobe']
if not isinstance(pxstrobe, basestring):
do_latch = True
pxstrobes += 2 ** register_object['pxstrobe']
if first_read:
pixel_reg_value = register_object['value']
first_read = False
else:
if np.array_equal(pixel_reg_value, register_object['value']):
pixel_reg_value = register_object['value']
else:
raise ValueError('Pixel register %s: joint write not supported, pixel register values must be equal' % register_object['name'])
if do_latch:
self.set_global_register_value("Latch_En", 1)
else:
self.set_global_register_value("Latch_En", 0)
self.set_global_register_value("Pixel_Strobes", pxstrobes)
commands.extend(self.get_commands("WrRegister", name=["Pixel_Strobes", "Latch_En"]))
for dc_no in (dcs[:1] if same_mask_for_all_dc else dcs):
self.set_global_register_value("Colpr_Addr", dc_no)
commands.extend(self.get_commands("WrRegister", name=["Colpr_Addr"]))
register_bitset = self.get_pixel_register_bitset(register_objects[0], 0, dc_no)
commands.extend([self.build_command(command_name, PixelData=register_bitset, ChipID=8, **kwargs)]) # broadcast
if do_latch:
commands.extend(self.get_commands("GlobalPulse", Width=0))
else:
for register_object in register_objects:
pxstrobe = register_object['pxstrobe']
if isinstance(pxstrobe, basestring):
do_latch = False
self.set_global_register_value("Pixel_Strobes", 0) # no latch
self.set_global_register_value("Latch_En", 0)
commands.extend(self.get_commands("WrRegister", name=["Pixel_Strobes", "Latch_En"]))
else:
do_latch = True
self.set_global_register_value("Latch_En", 1)
commands.extend(self.get_commands("WrRegister", name=["Latch_En"]))
bitlength = register_object['bitlength']
for bit_no, pxstrobe_bit_no in (enumerate(range(bitlength)) if (register_object['littleendian'] is False) else enumerate(reversed(range(bitlength)))):
if do_latch:
self.set_global_register_value("Pixel_Strobes", 2 ** (pxstrobe + bit_no))
commands.extend(self.get_commands("WrRegister", name=["Pixel_Strobes"]))
for dc_no in (dcs[:1] if same_mask_for_all_dc else dcs):
self.set_global_register_value("Colpr_Addr", dc_no)
commands.extend(self.get_commands("WrRegister", name=["Colpr_Addr"]))
register_bitset = self.get_pixel_register_bitset(register_object, pxstrobe_bit_no, dc_no)
commands.extend([self.build_command(command_name, PixelData=register_bitset, ChipID=8, **kwargs)]) # broadcast
if do_latch:
commands.extend(self.get_commands("GlobalPulse", Width=0))
self.restore(pixel_register=False)
commands.extend(self.get_commands("WrRegister", name=registers))
elif command_name == "RdFrontEnd":
registers = ["Conf_AddrEnable", "S0", "S1", "SR_Clr", "CalEn", "DIGHITIN_SEL", "GateHitOr", "ReadErrorReq", "StopClkPulse", "SR_Clock", "Efuse_Sense", "HITLD_IN", "Colpr_Mode", "Colpr_Addr", "Pixel_Strobes", "Latch_En"]
if self.fei4a:
registers.append("ReadSkipped")
elif self.fei4b:
registers.append("SR_Read")
self.create_restore_point()
dcs = kwargs.pop("dcs", range(40)) # set the double columns to latch
# in case of empty list
if not dcs:
dcs = range(40)
register_objects = self.get_pixel_register_objects(**kwargs)
self.set_global_register_value('Conf_AddrEnable', 1)
self.set_global_register_value("S0", 0)
self.set_global_register_value("S1", 0)
self.set_global_register_value("SR_Clr", 0)
if self.fei4b:
self.set_global_register_value("SR_Read", 0)
self.set_global_register_value("CalEn", 0)
self.set_global_register_value("DIGHITIN_SEL", 0)
self.set_global_register_value("GateHitOr", 0)
if self.fei4a:
self.set_global_register_value("ReadSkipped", 0)
self.set_global_register_value("ReadErrorReq", 0)
self.set_global_register_value("StopClkPulse", 0)
self.set_global_register_value("SR_Clock", 0)
self.set_global_register_value("Efuse_Sense", 0)
self.set_global_register_value("HITLD_IN", 0)
self.set_global_register_value("Colpr_Mode", 0) # write only the addressed double-column
self.set_global_register_value("Colpr_Addr", 0)
self.set_global_register_value("Latch_En", 0)
self.set_global_register_value("Pixel_Strobes", 0)
commands.extend(self.get_commands("WrRegister", name=registers))
for index, register_object in enumerate(register_objects): # make sure that EnableDigInj is first read back, because it is not latched
if register_object['name'] == 'EnableDigInj':
register_objects[0], register_objects[index] = register_objects[index], register_objects[0]
break
for register_object in register_objects:
pxstrobe = register_object['pxstrobe']
bitlength = register_object['bitlength']
for pxstrobe_bit_no in range(bitlength):
logging.debug('Pixel Register %s Bit %d', register_object['name'], pxstrobe_bit_no)
do_latch = True
try:
self.set_global_register_value("Pixel_Strobes", 2 ** (pxstrobe + pxstrobe_bit_no))
except TypeError: # thrown for not latched digInjection
self.set_global_register_value("Pixel_Strobes", 0) # do not latch
do_latch = False
commands.extend(self.get_commands("WrRegister", name=["Pixel_Strobes"]))
for dc_no in dcs:
self.set_global_register_value("Colpr_Addr", dc_no)
commands.extend(self.get_commands("WrRegister", name=["Colpr_Addr"]))
if do_latch is True:
self.set_global_register_value("S0", 1)
self.set_global_register_value("S1", 1)
self.set_global_register_value("SR_Clock", 1)
commands.extend(self.get_commands("WrRegister", name=["S0", "S1", "SR_Clock"]))
commands.extend(self.get_commands("GlobalPulse", Width=0))
self.set_global_register_value("S0", 0)
self.set_global_register_value("S1", 0)
self.set_global_register_value("SR_Clock", 0)
commands.extend(self.get_commands("WrRegister", name=["S0", "S1", "SR_Clock"]))
register_bitset = self.get_pixel_register_bitset(register_object, pxstrobe_bit_no if (register_object['littleendian'] is False) else register_object['bitlength'] - pxstrobe_bit_no - 1, dc_no)
if self.fei4b:
self.set_global_register_value("SR_Read", 1)
commands.extend(self.get_commands("WrRegister", name=["SR_Read"]))
commands.extend([self.build_command("WrFrontEnd", PixelData=register_bitset, ChipID=chip_id)])
if self.fei4b:
self.set_global_register_value("SR_Read", 0)
commands.extend(self.get_commands("WrRegister", name=["SR_Read"]))
self.restore(pixel_register=False)
commands.extend(self.get_commands("WrRegister", name=registers))
else:
commands.append(self.build_command(command_name, ChipID=chip_id, **kwargs))
return commands | get fe_command from command name and keyword arguments
wrapper for build_commands()
implements FEI4 specific behavior |
def sortByColumn(self, column, order=QtCore.Qt.AscendingOrder):
"""
Overloads the default sortByColumn to record the order for later \
reference.
:param column | <int>
order | <QtCore.Qt.SortOrder>
"""
super(XTreeWidget, self).sortByColumn(column, order)
self._sortOrder = order | Overloads the default sortByColumn to record the order for later \
reference.
:param column | <int>
order | <QtCore.Qt.SortOrder> |
def get_service_health(service_id: str) -> str:
"""Get the health of a service using service_id.
Args:
service_id
Returns:
str, health status
"""
# Check if the current and actual replica levels are the same
if DC.get_replicas(service_id) != DC.get_actual_replica(service_id):
health_status = "Unhealthy"
else:
health_status = "Healthy"
return health_status | Get the health of a service using service_id.
Args:
service_id
Returns:
str, health status |
def set_repo_permission(self, repo, permission):
"""
:calls: `PUT /teams/:id/repos/:org/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:param permission: string
:rtype: None
"""
assert isinstance(repo, github.Repository.Repository), repo
put_parameters = {
"permission": permission,
}
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.url + "/repos/" + repo._identity,
input=put_parameters
) | :calls: `PUT /teams/:id/repos/:org/:repo <http://developer.github.com/v3/orgs/teams>`_
:param repo: :class:`github.Repository.Repository`
:param permission: string
:rtype: None |
def colorbar(ax, im, fig=None, loc="right", size="5%", pad="3%"):
"""
Adds a polite colorbar that steals space so :func:`matplotlib.pyplot.tight_layout` works nicely.
.. versionadded:: 1.3
Parameters
----------
ax : :class:`matplotlib.axis.Axis`
The axis to plot to.
im : :class:`matplotlib.image.AxesImage`
The plotted image to use for the colorbar.
fig : :class:`matplotlib.figure.Figure`, optional
The figure to plot to.
loc : str, optional
The location to place the axes.
size : str, optional
The size to allocate for the colorbar.
pad : str, optional
The amount to pad the colorbar.
"""
if fig is None:
fig = ax.get_figure()
# _pdb.set_trace()
if loc == "left" or loc == "right":
width = fig.get_figwidth()
new = width * (1 + _pc2f(size) + _pc2f(pad))
_logger.debug('Setting new figure width: {}'.format(new))
# fig.set_size_inches(new, fig.get_figheight(), forward=True)
elif loc == "top" or loc == "bottom":
height = fig.get_figheight()
new = height * (1 + _pc2f(size) + _pc2f(pad))
_logger.debug('Setting new figure height: {}'.format(new))
# fig.set_figheight(fig.get_figwidth(), new, forward=True)
divider = _ag1.make_axes_locatable(ax)
cax = divider.append_axes(loc, size=size, pad=pad)
return cax, _plt.colorbar(im, cax=cax) | Adds a polite colorbar that steals space so :func:`matplotlib.pyplot.tight_layout` works nicely.
.. versionadded:: 1.3
Parameters
----------
ax : :class:`matplotlib.axis.Axis`
The axis to plot to.
im : :class:`matplotlib.image.AxesImage`
The plotted image to use for the colorbar.
fig : :class:`matplotlib.figure.Figure`, optional
The figure to plot to.
loc : str, optional
The location to place the axes.
size : str, optional
The size to allocate for the colorbar.
pad : str, optional
The amount to pad the colorbar. |
def init_layout(self):
""" Set the checked state after all children have
been populated.
"""
super(AndroidRadioGroup, self).init_layout()
d = self.declaration
w = self.widget
if d.checked:
self.set_checked(d.checked)
else:
#: Check if any of the children have "checked = True"
for c in d.children:
if c.checked:
d.checked = c
w.setOnCheckedChangeListener(w.getId())
w.onCheckedChanged.connect(self.on_checked_changed) | Set the checked state after all children have
been populated. |
def detailxy(self, canvas, button, data_x, data_y):
"""Motion event in the pick fits window. Show the pointing
information under the cursor.
"""
if button == 0:
# TODO: we could track the focus changes to make this check
# more efficient
chviewer = self.fv.getfocus_viewer()
# Don't update global information if our chviewer isn't focused
if chviewer != self.fitsimage:
return True
# Add offsets from cutout
data_x = data_x + self.pick_x1
data_y = data_y + self.pick_y1
return self.fv.showxy(chviewer, data_x, data_y) | Motion event in the pick fits window. Show the pointing
information under the cursor. |
def make_back_author_contributions(self, body):
"""
Though this goes in the back of the document with the rest of the back
matter, it is not an element found under <back>.
I don't expect to see more than one of these. Compare this method to
make_article_info_competing_interests()
"""
cont_expr = "./front/article-meta/author-notes/fn[@fn-type='con']"
contribution = self.article.root.xpath(cont_expr)
if contribution:
author_contrib = deepcopy(contribution[0])
remove_all_attributes(author_contrib)
author_contrib.tag = 'div'
author_contrib.attrib['id'] = 'author-contributions'
#This title element will be parsed later
title = etree.Element('title')
title.text = 'Author Contributions'
author_contrib.insert(0, title)
body.append(author_contrib) | Though this goes in the back of the document with the rest of the back
matter, it is not an element found under <back>.
I don't expect to see more than one of these. Compare this method to
make_article_info_competing_interests() |
def get_usedby_aql(self, params):
"""
Возвращает запрос AQL (без репозитория), из файла конфигурации
:param params:
:return:
"""
if self._usedby is None:
return None
_result = {}
params = self.merge_valued(params)
for k, v in self._usedby['AQL'].items():
if isinstance(v, str):
k = k.format(**params)
v = v.format(**params)
_result[k] = v
return _result | Возвращает запрос AQL (без репозитория), из файла конфигурации
:param params:
:return: |
def _multiplyThroughputs(self):
''' Overrides base class in order to deal with opaque components.
'''
index = 0
for component in self.components:
if component.throughput != None:
break
index += 1
return BaseObservationMode._multiplyThroughputs(self, index) | Overrides base class in order to deal with opaque components. |
def _set_LED(self, status):
"""
_set_LED: boolean -> None
Sets the status of the remote LED
"""
# DIO pin 1 (LED), active low
self.hw.remote_at(
dest_addr=self.remote_addr,
command='D1',
parameter='\x04' if status else '\x05') | _set_LED: boolean -> None
Sets the status of the remote LED |
def remove_unweighted_sources(graph: BELGraph, key: Optional[str] = None) -> None:
"""Prune unannotated nodes on the periphery of the sub-graph.
:param graph: A BEL graph
:param key: The key in the node data dictionary representing the experimental data. Defaults to
:data:`pybel_tools.constants.WEIGHT`.
"""
nodes = list(get_unweighted_sources(graph, key=key))
graph.remove_nodes_from(nodes) | Prune unannotated nodes on the periphery of the sub-graph.
:param graph: A BEL graph
:param key: The key in the node data dictionary representing the experimental data. Defaults to
:data:`pybel_tools.constants.WEIGHT`. |
def open(self, url):
"""
Open a WSDL at the specified I{url}.
First, the WSDL attempted to be retrieved from
the I{object cache}. After unpickled from the cache, the
I{options} attribute is restored.
If not found, it is downloaded and instantiated using the
I{fn} constructor and added to the cache for the next open().
@param url: A WSDL url.
@type url: str.
@return: The WSDL object.
@rtype: I{Definitions}
"""
cache = self.cache()
id = self.mangle(url, 'wsdl')
d = cache.get(id)
if d is None:
d = self.fn(url, self.options)
cache.put(id, d)
else:
d.options = self.options
for imp in d.imports:
imp.imported.options = self.options
return d | Open a WSDL at the specified I{url}.
First, the WSDL attempted to be retrieved from
the I{object cache}. After unpickled from the cache, the
I{options} attribute is restored.
If not found, it is downloaded and instantiated using the
I{fn} constructor and added to the cache for the next open().
@param url: A WSDL url.
@type url: str.
@return: The WSDL object.
@rtype: I{Definitions} |
def weld_standard_deviation(array, weld_type):
"""Returns the *sample* standard deviation of the array.
Parameters
----------
array : numpy.ndarray or WeldObject
Input array.
weld_type : WeldType
Type of each element in the input array.
Returns
-------
WeldObject
Representation of this computation.
"""
weld_obj_var = weld_variance(array, weld_type)
obj_id, weld_obj = create_weld_object(weld_obj_var)
weld_obj_var_id = get_weld_obj_id(weld_obj, weld_obj_var)
weld_template = _weld_std_code
weld_obj.weld_code = weld_template.format(var=weld_obj_var_id)
return weld_obj | Returns the *sample* standard deviation of the array.
Parameters
----------
array : numpy.ndarray or WeldObject
Input array.
weld_type : WeldType
Type of each element in the input array.
Returns
-------
WeldObject
Representation of this computation. |
def same(*values):
"""
Check if all values in a sequence are equal.
Returns True on empty sequences.
Examples
--------
>>> same(1, 1, 1, 1)
True
>>> same(1, 2, 1)
False
>>> same()
True
"""
if not values:
return True
first, rest = values[0], values[1:]
return all(value == first for value in rest) | Check if all values in a sequence are equal.
Returns True on empty sequences.
Examples
--------
>>> same(1, 1, 1, 1)
True
>>> same(1, 2, 1)
False
>>> same()
True |
def update_path(self, path):
"""
There are EXTENDED messages which don't include any routers at
all, and any of the EXTENDED messages may have some arbitrary
flags in them. So far, they're all upper-case and none start
with $ luckily. The routers in the path should all be
LongName-style router names (this depends on them starting
with $).
For further complication, it's possible to extend a circuit to
a router which isn't in the consensus. nickm via #tor thought
this might happen in the case of hidden services choosing a
rendevouz point not in the current consensus.
"""
oldpath = self.path
self.path = []
for p in path:
if p[0] != '$':
break
# this will create a Router if we give it a router
# LongName that doesn't yet exist
router = self.router_container.router_from_id(p)
self.path.append(router)
# if the path grew, notify listeners
if len(self.path) > len(oldpath):
for x in self.listeners:
x.circuit_extend(self, router)
oldpath = self.path | There are EXTENDED messages which don't include any routers at
all, and any of the EXTENDED messages may have some arbitrary
flags in them. So far, they're all upper-case and none start
with $ luckily. The routers in the path should all be
LongName-style router names (this depends on them starting
with $).
For further complication, it's possible to extend a circuit to
a router which isn't in the consensus. nickm via #tor thought
this might happen in the case of hidden services choosing a
rendevouz point not in the current consensus. |
def _attributeLinesToDict(attributeLines):
"""Converts a list of obo 'Term' lines to a dictionary.
:param attributeLines: a list of obo 'Term' lines. Each line contains a key
and a value part which are separated by a ':'.
:return: a dictionary containing the attributes of an obo 'Term' entry.
NOTE: Some attributes can occur multiple times in one single term, for
example 'is_a' or 'relationship'. However, currently only the last
occurence is stored.
"""
attributes = dict()
for line in attributeLines:
attributeId, attributeValue = line.split(':', 1)
attributes[attributeId.strip()] = attributeValue.strip()
return attributes | Converts a list of obo 'Term' lines to a dictionary.
:param attributeLines: a list of obo 'Term' lines. Each line contains a key
and a value part which are separated by a ':'.
:return: a dictionary containing the attributes of an obo 'Term' entry.
NOTE: Some attributes can occur multiple times in one single term, for
example 'is_a' or 'relationship'. However, currently only the last
occurence is stored. |
def is_cozy_registered():
'''
Check if a Cozy is registered
'''
req = curl_couchdb('/cozy/_design/user/_view/all')
users = req.json()['rows']
if len(users) > 0:
return True
else:
return False | Check if a Cozy is registered |
def update(self, membershipId, isModerator=None, **request_parameters):
"""Update a team membership, by ID.
Args:
membershipId(basestring): The team membership ID.
isModerator(bool): Set to True to make the person a team moderator.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
TeamMembership: A TeamMembership object with the updated Webex
Teams team-membership details.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
"""
check_type(membershipId, basestring, may_be_none=False)
check_type(isModerator, bool)
put_data = dict_from_items_with_values(
request_parameters,
isModerator=isModerator,
)
# API request
json_data = self._session.put(API_ENDPOINT + '/' + membershipId,
json=put_data)
# Return a team membership object created from the response JSON data
return self._object_factory(OBJECT_TYPE, json_data) | Update a team membership, by ID.
Args:
membershipId(basestring): The team membership ID.
isModerator(bool): Set to True to make the person a team moderator.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
TeamMembership: A TeamMembership object with the updated Webex
Teams team-membership details.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error. |
def get_asset_form_for_update(self, asset_id=None):
"""Gets the asset form for updating an existing asset.
A new asset form should be requested for each update
transaction.
:param asset_id: the ``Id`` of the ``Asset``
:type asset_id: ``osid.id.Id``
:return: the asset form
:rtype: ``osid.repository.AssetForm``
:raise: ``NotFound`` -- ``asset_id`` is not found
:raise: ``NullArgument`` -- ``asset_id`` is null
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
if asset_id is None:
raise NullArgument()
try:
url_path = construct_url('assets',
bank_id=self._catalog_idstr,
asset_id=asset_id)
asset = objects.Asset(self._get_request(url_path))
except Exception:
raise
asset_form = objects.AssetForm(asset._my_map)
self._forms[asset_form.get_id().get_identifier()] = not UPDATED
return asset_form | Gets the asset form for updating an existing asset.
A new asset form should be requested for each update
transaction.
:param asset_id: the ``Id`` of the ``Asset``
:type asset_id: ``osid.id.Id``
:return: the asset form
:rtype: ``osid.repository.AssetForm``
:raise: ``NotFound`` -- ``asset_id`` is not found
:raise: ``NullArgument`` -- ``asset_id`` is null
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.* |
def create_initial_tree(channel):
""" create_initial_tree: Create initial tree structure
Args:
channel (Channel): channel to construct
Returns: tree manager to run rest of steps
"""
# Create channel manager with channel data
config.LOGGER.info(" Setting up initial channel structure... ")
tree = ChannelManager(channel)
# Make sure channel structure is valid
config.LOGGER.info(" Validating channel structure...")
channel.print_tree()
tree.validate()
config.LOGGER.info(" Tree is valid\n")
return tree | create_initial_tree: Create initial tree structure
Args:
channel (Channel): channel to construct
Returns: tree manager to run rest of steps |
def _open_interface(self, client, uuid, iface, key):
"""Open an interface on a connected device.
Args:
client (string): The client id who is requesting this operation
uuid (int): The id of the device we're opening the interface on
iface (string): The name of the interface that we're opening
key (string): The key to authenticate the caller
"""
conn_id = self._validate_connection('open_interface', uuid, key)
if conn_id is None:
return
conn_data = self._connections[uuid]
conn_data['last_touch'] = monotonic()
slug = self._build_device_slug(uuid)
try:
resp = yield self._manager.open_interface(conn_id, iface)
except Exception as exc:
self._logger.exception("Error in manager open interface")
resp = {'success': False, 'reason': "Internal error: %s" % str(exc)}
message = {'type': 'response', 'operation': 'open_interface', 'client': client}
message['success'] = resp['success']
if not message['success']:
message['failure_reason'] = resp['reason']
self._publish_response(slug, message) | Open an interface on a connected device.
Args:
client (string): The client id who is requesting this operation
uuid (int): The id of the device we're opening the interface on
iface (string): The name of the interface that we're opening
key (string): The key to authenticate the caller |
async def handle_exception(self, exc: Exception, action: str, request_id):
"""
Handle any exception that occurs, by sending an appropriate message
"""
if isinstance(exc, APIException):
await self.reply(
action=action,
errors=self._format_errors(exc.detail),
status=exc.status_code,
request_id=request_id
)
elif exc == Http404 or isinstance(exc, Http404):
await self.reply(
action=action,
errors=self._format_errors('Not found'),
status=404,
request_id=request_id
)
else:
raise exc | Handle any exception that occurs, by sending an appropriate message |
def _get_2d_plot(self, label_stable=True, label_unstable=True,
ordering=None, energy_colormap=None, vmin_mev=-60.0,
vmax_mev=60.0, show_colorbar=True,
process_attributes=False, plt=None):
"""
Shows the plot using pylab. Usually I won't do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way.
"""
if plt is None:
plt = pretty_plot(8, 6)
from matplotlib.font_manager import FontProperties
if ordering is None:
(lines, labels, unstable) = self.pd_plot_data
else:
(_lines, _labels, _unstable) = self.pd_plot_data
(lines, labels, unstable) = order_phase_diagram(
_lines, _labels, _unstable, ordering)
if energy_colormap is None:
if process_attributes:
for x, y in lines:
plt.plot(x, y, "k-", linewidth=3, markeredgecolor="k")
# One should think about a clever way to have "complex"
# attributes with complex processing options but with a clear
# logic. At this moment, I just use the attributes to know
# whether an entry is a new compound or an existing (from the
# ICSD or from the MP) one.
for x, y in labels.keys():
if labels[(x, y)].attribute is None or \
labels[(x, y)].attribute == "existing":
plt.plot(x, y, "ko", **self.plotkwargs)
else:
plt.plot(x, y, "k*", **self.plotkwargs)
else:
for x, y in lines:
plt.plot(x, y, "ko-", **self.plotkwargs)
else:
from matplotlib.colors import Normalize, LinearSegmentedColormap
from matplotlib.cm import ScalarMappable
for x, y in lines:
plt.plot(x, y, "k-", markeredgecolor="k")
vmin = vmin_mev / 1000.0
vmax = vmax_mev / 1000.0
if energy_colormap == 'default':
mid = - vmin / (vmax - vmin)
cmap = LinearSegmentedColormap.from_list(
'my_colormap', [(0.0, '#005500'), (mid, '#55FF55'),
(mid, '#FFAAAA'), (1.0, '#FF0000')])
else:
cmap = energy_colormap
norm = Normalize(vmin=vmin, vmax=vmax)
_map = ScalarMappable(norm=norm, cmap=cmap)
_energies = [self._pd.get_equilibrium_reaction_energy(entry)
for coord, entry in labels.items()]
energies = [en if en < 0.0 else -0.00000001 for en in _energies]
vals_stable = _map.to_rgba(energies)
ii = 0
if process_attributes:
for x, y in labels.keys():
if labels[(x, y)].attribute is None or \
labels[(x, y)].attribute == "existing":
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii],
markersize=12)
else:
plt.plot(x, y, "*", markerfacecolor=vals_stable[ii],
markersize=18)
ii += 1
else:
for x, y in labels.keys():
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii],
markersize=15)
ii += 1
font = FontProperties()
font.set_weight("bold")
font.set_size(24)
# Sets a nice layout depending on the type of PD. Also defines a
# "center" for the PD, which then allows the annotations to be spread
# out in a nice manner.
if len(self._pd.elements) == 3:
plt.axis("equal")
plt.xlim((-0.1, 1.2))
plt.ylim((-0.1, 1.0))
plt.axis("off")
center = (0.5, math.sqrt(3) / 6)
else:
all_coords = labels.keys()
miny = min([c[1] for c in all_coords])
ybuffer = max(abs(miny) * 0.1, 0.1)
plt.xlim((-0.1, 1.1))
plt.ylim((miny - ybuffer, ybuffer))
center = (0.5, miny / 2)
plt.xlabel("Fraction", fontsize=28, fontweight='bold')
plt.ylabel("Formation energy (eV/fu)", fontsize=28,
fontweight='bold')
for coords in sorted(labels.keys(), key=lambda x: -x[1]):
entry = labels[coords]
label = entry.name
# The follow defines an offset for the annotation text emanating
# from the center of the PD. Results in fairly nice layouts for the
# most part.
vec = (np.array(coords) - center)
vec = vec / np.linalg.norm(vec) * 10 if np.linalg.norm(vec) != 0 \
else vec
valign = "bottom" if vec[1] > 0 else "top"
if vec[0] < -0.01:
halign = "right"
elif vec[0] > 0.01:
halign = "left"
else:
halign = "center"
if label_stable:
if process_attributes and entry.attribute == 'new':
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font,
color='g')
else:
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font)
if self.show_unstable:
font = FontProperties()
font.set_size(16)
energies_unstable = [self._pd.get_e_above_hull(entry)
for entry, coord in unstable.items()]
if energy_colormap is not None:
energies.extend(energies_unstable)
vals_unstable = _map.to_rgba(energies_unstable)
ii = 0
for entry, coords in unstable.items():
ehull = self._pd.get_e_above_hull(entry)
if ehull < self.show_unstable:
vec = (np.array(coords) - center)
vec = vec / np.linalg.norm(vec) * 10 \
if np.linalg.norm(vec) != 0 else vec
label = entry.name
if energy_colormap is None:
plt.plot(coords[0], coords[1], "ks", linewidth=3,
markeredgecolor="k", markerfacecolor="r",
markersize=8)
else:
plt.plot(coords[0], coords[1], "s", linewidth=3,
markeredgecolor="k",
markerfacecolor=vals_unstable[ii],
markersize=8)
if label_unstable:
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign, color="b",
verticalalignment=valign,
fontproperties=font)
ii += 1
if energy_colormap is not None and show_colorbar:
_map.set_array(energies)
cbar = plt.colorbar(_map)
cbar.set_label(
'Energy [meV/at] above hull (in red)\nInverse energy ['
'meV/at] above hull (in green)',
rotation=-90, ha='left', va='center')
ticks = cbar.ax.get_yticklabels()
# cbar.ax.set_yticklabels(['${v}$'.format(
# v=float(t.get_text().strip('$'))*1000.0) for t in ticks])
f = plt.gcf()
f.set_size_inches((8, 6))
plt.subplots_adjust(left=0.09, right=0.98, top=0.98, bottom=0.07)
return plt | Shows the plot using pylab. Usually I won't do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way. |
def interface_endpoints(self):
"""Instance depends on the API version:
* 2018-08-01: :class:`InterfaceEndpointsOperations<azure.mgmt.network.v2018_08_01.operations.InterfaceEndpointsOperations>`
"""
api_version = self._get_api_version('interface_endpoints')
if api_version == '2018-08-01':
from .v2018_08_01.operations import InterfaceEndpointsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | Instance depends on the API version:
* 2018-08-01: :class:`InterfaceEndpointsOperations<azure.mgmt.network.v2018_08_01.operations.InterfaceEndpointsOperations>` |
def auth(self, encoded):
""" Validate integrity of encoded bytes """
message, signature = self.split(encoded)
computed = self.sign(message)
if not hmac.compare_digest(signature, computed):
raise AuthenticatorInvalidSignature | Validate integrity of encoded bytes |
def stopService(self):
"""
Gracefully stop the service.
Returns:
defer.Deferred: a Deferred which is triggered when the service has
finished shutting down.
"""
self._service.factory.stopTrying()
yield self._service.factory.stopFactory()
yield service.MultiService.stopService(self) | Gracefully stop the service.
Returns:
defer.Deferred: a Deferred which is triggered when the service has
finished shutting down. |
def create(self, request, *args, **kwargs):
"""
Run **POST** against */api/alerts/* to create or update alert. If alert with posted scope and
alert_type already exists - it will be updated. Only users with staff privileges can create alerts.
Request example:
.. code-block:: javascript
POST /api/alerts/
Accept: application/json
Content-Type: application/json
Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4
Host: example.com
{
"scope": "http://testserver/api/projects/b9e8a102b5ff4469b9ac03253fae4b95/",
"message": "message#1",
"alert_type": "first_alert",
"severity": "Debug"
}
"""
return super(AlertViewSet, self).create(request, *args, **kwargs) | Run **POST** against */api/alerts/* to create or update alert. If alert with posted scope and
alert_type already exists - it will be updated. Only users with staff privileges can create alerts.
Request example:
.. code-block:: javascript
POST /api/alerts/
Accept: application/json
Content-Type: application/json
Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4
Host: example.com
{
"scope": "http://testserver/api/projects/b9e8a102b5ff4469b9ac03253fae4b95/",
"message": "message#1",
"alert_type": "first_alert",
"severity": "Debug"
} |
def check_date_str_format(s, default_time="00:00:00"):
"""Check the format of date string"""
try:
str_fmt = s
if ":" not in s:
str_fmt = '{} {}'.format(s, default_time)
dt_obj = datetime.strptime(str_fmt, "%Y-%m-%d %H:%M:%S")
return RET_OK, dt_obj
except ValueError:
error_str = ERROR_STR_PREFIX + "wrong time or time format"
return RET_ERROR, error_str | Check the format of date string |
def keywords_for(*args):
"""
Return a list of ``Keyword`` objects for the given model instance
or a model class. In the case of a model class, retrieve all
keywords for all instances of the model and apply a ``weight``
attribute that can be used to create a tag cloud.
"""
# Handle a model instance.
if isinstance(args[0], Model):
obj = args[0]
if getattr(obj, "content_model", None):
obj = obj.get_content_model()
keywords_name = obj.get_keywordsfield_name()
keywords_queryset = getattr(obj, keywords_name).all()
# Keywords may have been prefetched already. If not, we
# need select_related for the actual keywords.
prefetched = getattr(obj, "_prefetched_objects_cache", {})
if keywords_name not in prefetched:
keywords_queryset = keywords_queryset.select_related("keyword")
return [assigned.keyword for assigned in keywords_queryset]
# Handle a model class.
try:
app_label, model = args[0].split(".", 1)
except ValueError:
return []
content_type = ContentType.objects.get(app_label=app_label, model=model)
assigned = AssignedKeyword.objects.filter(content_type=content_type)
keywords = Keyword.objects.filter(assignments__in=assigned)
keywords = keywords.annotate(item_count=Count("assignments"))
if not keywords:
return []
counts = [keyword.item_count for keyword in keywords]
min_count, max_count = min(counts), max(counts)
factor = (settings.TAG_CLOUD_SIZES - 1.)
if min_count != max_count:
factor /= (max_count - min_count)
for kywd in keywords:
kywd.weight = int(round((kywd.item_count - min_count) * factor)) + 1
return keywords | Return a list of ``Keyword`` objects for the given model instance
or a model class. In the case of a model class, retrieve all
keywords for all instances of the model and apply a ``weight``
attribute that can be used to create a tag cloud. |
def fix_symbol_store_path(symbol_store_path = None,
remote = True,
force = False):
"""
Fix the symbol store path. Equivalent to the C{.symfix} command in
Microsoft WinDbg.
If the symbol store path environment variable hasn't been set, this
method will provide a default one.
@type symbol_store_path: str or None
@param symbol_store_path: (Optional) Symbol store path to set.
@type remote: bool
@param remote: (Optional) Defines the symbol store path to set when the
C{symbol_store_path} is C{None}.
If C{True} the default symbol store path is set to the Microsoft
symbol server. Debug symbols will be downloaded through HTTP.
This gives the best results but is also quite slow.
If C{False} the default symbol store path is set to the local
cache only. This prevents debug symbols from being downloaded and
is faster, but unless you've installed the debug symbols on this
machine or downloaded them in a previous debugging session, some
symbols may be missing.
If the C{symbol_store_path} argument is not C{None}, this argument
is ignored entirely.
@type force: bool
@param force: (Optional) If C{True} the new symbol store path is set
always. If C{False} the new symbol store path is only set if
missing.
This allows you to call this method preventively to ensure the
symbol server is always set up correctly when running your script,
but without messing up whatever configuration the user has.
Example::
from winappdbg import Debug, System
def simple_debugger( argv ):
# Instance a Debug object
debug = Debug( MyEventHandler() )
try:
# Make sure the remote symbol store is set
System.fix_symbol_store_path(remote = True,
force = False)
# Start a new process for debugging
debug.execv( argv )
# Wait for the debugee to finish
debug.loop()
# Stop the debugger
finally:
debug.stop()
@rtype: str or None
@return: The previously set symbol store path if any,
otherwise returns C{None}.
"""
try:
if symbol_store_path is None:
local_path = "C:\\SYMBOLS"
if not path.isdir(local_path):
local_path = "C:\\Windows\\Symbols"
if not path.isdir(local_path):
local_path = path.abspath(".")
if remote:
symbol_store_path = (
"cache*;SRV*"
+ local_path +
"*"
"http://msdl.microsoft.com/download/symbols"
)
else:
symbol_store_path = "cache*;SRV*" + local_path
previous = os.environ.get("_NT_SYMBOL_PATH", None)
if not previous or force:
os.environ["_NT_SYMBOL_PATH"] = symbol_store_path
return previous
except Exception:
e = sys.exc_info()[1]
warnings.warn("Cannot fix symbol path, reason: %s" % str(e),
RuntimeWarning) | Fix the symbol store path. Equivalent to the C{.symfix} command in
Microsoft WinDbg.
If the symbol store path environment variable hasn't been set, this
method will provide a default one.
@type symbol_store_path: str or None
@param symbol_store_path: (Optional) Symbol store path to set.
@type remote: bool
@param remote: (Optional) Defines the symbol store path to set when the
C{symbol_store_path} is C{None}.
If C{True} the default symbol store path is set to the Microsoft
symbol server. Debug symbols will be downloaded through HTTP.
This gives the best results but is also quite slow.
If C{False} the default symbol store path is set to the local
cache only. This prevents debug symbols from being downloaded and
is faster, but unless you've installed the debug symbols on this
machine or downloaded them in a previous debugging session, some
symbols may be missing.
If the C{symbol_store_path} argument is not C{None}, this argument
is ignored entirely.
@type force: bool
@param force: (Optional) If C{True} the new symbol store path is set
always. If C{False} the new symbol store path is only set if
missing.
This allows you to call this method preventively to ensure the
symbol server is always set up correctly when running your script,
but without messing up whatever configuration the user has.
Example::
from winappdbg import Debug, System
def simple_debugger( argv ):
# Instance a Debug object
debug = Debug( MyEventHandler() )
try:
# Make sure the remote symbol store is set
System.fix_symbol_store_path(remote = True,
force = False)
# Start a new process for debugging
debug.execv( argv )
# Wait for the debugee to finish
debug.loop()
# Stop the debugger
finally:
debug.stop()
@rtype: str or None
@return: The previously set symbol store path if any,
otherwise returns C{None}. |
def feeds(self):
"""List GitHub's timeline resources in Atom format.
:returns: dictionary parsed to include URITemplates
"""
url = self._build_url('feeds')
json = self._json(self._get(url), 200)
del json['ETag']
del json['Last-Modified']
urls = [
'timeline_url', 'user_url', 'current_user_public_url',
'current_user_url', 'current_user_actor_url',
'current_user_organization_url',
]
for url in urls:
json[url] = URITemplate(json[url])
links = json.get('_links', {})
for d in links.values():
d['href'] = URITemplate(d['href'])
return json | List GitHub's timeline resources in Atom format.
:returns: dictionary parsed to include URITemplates |
def add_to_matching_blacklist(db, entity):
"""Add entity to the matching blacklist.
This function adds an 'entity' o term to the matching blacklist.
The term to add cannot have a None or empty value, in this case
a InvalidValueError will be raised. If the given 'entity' exists in the
registry, the function will raise an AlreadyExistsError exception.
:param db: database manager
:param entity: term, word or value to blacklist
:raises InvalidValueError: raised when entity is None or an empty string
:raises AlreadyExistsError: raised when the entity already exists
in the registry.
"""
with db.connect() as session:
try:
add_to_matching_blacklist_db(session, entity)
except ValueError as e:
raise InvalidValueError(e) | Add entity to the matching blacklist.
This function adds an 'entity' o term to the matching blacklist.
The term to add cannot have a None or empty value, in this case
a InvalidValueError will be raised. If the given 'entity' exists in the
registry, the function will raise an AlreadyExistsError exception.
:param db: database manager
:param entity: term, word or value to blacklist
:raises InvalidValueError: raised when entity is None or an empty string
:raises AlreadyExistsError: raised when the entity already exists
in the registry. |
def connect(self, host='127.0.0.1', port=3306, user='root', password='', database=None):
""" Connect to the database specified """
if database is None:
raise exceptions.RequiresDatabase()
self._db_args = { 'host': host, 'port': port, 'user': user, 'password': password, 'database': database }
with self._db_conn() as conn:
conn.query('SELECT 1')
return self | Connect to the database specified |
def clear_title(self):
"""Removes the title.
:raise: ``NoAccess`` -- ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
metadata = Metadata(**settings.METADATA['title'])
if metadata.is_read_only() or metadata.is_required():
raise NoAccess()
self._my_map['title']['text'] = '' | Removes the title.
:raise: ``NoAccess`` -- ``Metadata.isRequired()`` is ``true`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* |
async def debug(self, client_id, conn_string, command, args):
"""Send a debug command to a device on behalf of a client.
See :meth:`AbstractDeviceAdapter.send_script`.
Args:
client_id (str): The client we are working for.
conn_string (str): A connection string that will be
passed to the underlying device adapter.
command (str): The name of the debug command to run.
args (dict): Any command arguments.
Returns:
object: The response to the debug command.
Raises:
DeviceServerError: There is an issue with your client_id such
as not being connected to the device.
DeviceAdapterError: The adapter had a protocol issue sending the debug
command.
"""
conn_id = self._client_info(client_id, 'connections')[conn_string]
return await self.adapter.debug(conn_id, command, args) | Send a debug command to a device on behalf of a client.
See :meth:`AbstractDeviceAdapter.send_script`.
Args:
client_id (str): The client we are working for.
conn_string (str): A connection string that will be
passed to the underlying device adapter.
command (str): The name of the debug command to run.
args (dict): Any command arguments.
Returns:
object: The response to the debug command.
Raises:
DeviceServerError: There is an issue with your client_id such
as not being connected to the device.
DeviceAdapterError: The adapter had a protocol issue sending the debug
command. |
def print_vertical(vertical_rows, labels, color, args):
"""Print the whole vertical graph."""
if color:
sys.stdout.write(f'\033[{color}m') # Start to write colorized.
for row in vertical_rows:
print(*row)
sys.stdout.write('\033[0m') # End of printing colored
print("-" * len(row) + "Values" + "-" * len(row))
# Print Values
for value in zip_longest(*value_list, fillvalue=' '):
print(" ".join(value))
if args['no_labels'] == False:
print("-" * len(row) + "Labels" + "-" * len(row))
# Print Labels
for label in zip_longest(*labels, fillvalue=''):
print(" ".join(label)) | Print the whole vertical graph. |
def transfer(sendContext, receiveContext, chunkSize):
""" Transfer (large) data from sender to receiver. """
try:
chunkSize = receiveContext.chunkSize
except AttributeError:
pass
if sendContext is not None and receiveContext is not None:
with receiveContext as writer:
# Open reader after writer,
# so any raised errors will abort write before writer closes.
with sendContext as reader:
checkBefore = None
if hasattr(writer, 'skipChunk'):
checkBefore = hasattr(reader, 'checkSum')
while True:
if checkBefore is True:
(size, checkSum) = reader.checkSum(chunkSize)
if writer.skipChunk(size, checkSum):
reader.seek(size, io.SEEK_CUR)
continue
data = reader.read(chunkSize)
if len(data) == 0:
break
if checkBefore is False:
checkSum = hashlib.md5(data).hexdigest()
if writer.skipChunk(len(data), checkSum, data):
continue
writer.write(data) | Transfer (large) data from sender to receiver. |
def of(fixture_classes: Iterable[type], context: Union[None, 'torment.TestContext'] = None) -> Iterable['torment.fixtures.Fixture']:
'''Obtain all Fixture objects of the provided classes.
**Parameters**
:``fixture_classes``: classes inheriting from ``torment.fixtures.Fixture``
:``context``: a ``torment.TestContext`` to initialize Fixtures with
**Return Value(s)**
Instantiated ``torment.fixtures.Fixture`` objects for each individual
fixture class that inherits from one of the provided classes.
'''
classes = list(copy.copy(fixture_classes))
fixtures = [] # type: Iterable[torment.fixtures.Fixture]
while len(classes):
current = classes.pop()
subclasses = current.__subclasses__()
if len(subclasses):
classes.extend(subclasses)
elif current not in fixture_classes:
fixtures.append(current(context))
return fixtures | Obtain all Fixture objects of the provided classes.
**Parameters**
:``fixture_classes``: classes inheriting from ``torment.fixtures.Fixture``
:``context``: a ``torment.TestContext`` to initialize Fixtures with
**Return Value(s)**
Instantiated ``torment.fixtures.Fixture`` objects for each individual
fixture class that inherits from one of the provided classes. |
def routing(routes, request):
"""Definition for route matching : helper"""
# strip trailing slashes from request path
path = request.path.strip('/')
# iterate through routes to match
args = {}
for name, route in routes.items():
if route['path'] == '^':
# this section exists because regex doesn't work for null character as desired
if path == '':
match = [True]
else:
match = []
else:
match = re.findall(route['path'], path)
if match:
# found the matching url, iterate through variables to pass data
# check if method exists
if not request.method in route['method']:
raise TornMethodNotAllowed
values = match[0] # in form of tuples
if type(values) != bool:
for i in range(len(route['variables'])):
# if value is blank, check if default exists and pass it instead
if type(values) == str:
args[route['variables'][i]] = values
else:
if not values[i] and route['variables'][i] in route['defaults']:
values[i] = route['defaults'][route['variables'][i]]
args[route['variables'][i]] = values[i]
# we have the variables we need, args, path, controller
return {
'kwargs' : args,
'controller' : route['controller']
}
raise TornNotFoundError | Definition for route matching : helper |
def _enable_l1_keepalives(self, command):
"""
Enables L1 keepalive messages if supported.
:param command: command line
"""
env = os.environ.copy()
if "IOURC" not in os.environ:
env["IOURC"] = self.iourc_path
try:
output = yield from gns3server.utils.asyncio.subprocess_check_output(self._path, "-h", cwd=self.working_dir, env=env, stderr=True)
if re.search("-l\s+Enable Layer 1 keepalive messages", output):
command.extend(["-l"])
else:
raise IOUError("layer 1 keepalive messages are not supported by {}".format(os.path.basename(self._path)))
except (OSError, subprocess.SubprocessError) as e:
log.warning("could not determine if layer 1 keepalive messages are supported by {}: {}".format(os.path.basename(self._path), e)) | Enables L1 keepalive messages if supported.
:param command: command line |
def __parse_identities(self, json):
"""Parse identities using Sorting Hat format.
The Sorting Hat identities format is a JSON stream on which its
keys are the UUID of the unique identities. Each unique identity
object has a list of identities and enrollments.
When the unique identity does not have a UUID, it will be considered
as an anonymous unique identity. This means that the UUID of these
identities will be regenerated during the loading process.
Next, there is an example of a valid stream:
{
"uidentities": {
"[email protected]": {
"enrollments": [],
"identities": [],
"uuid": null
},
"03e12d00e37fd45593c49a5a5a1652deca4cf302": {
"enrollments": [
{
"end": "2100-01-01T00:00:00",
"start": "1900-01-01T00:00:00",
"organization": "Example",
"uuid": "03e12d00e37fd45593c49a5a5a1652deca4cf302"
}
],
"identities": [
{
"email": "[email protected]",
"id": "03e12d00e37fd45593c49a5a5a1652deca4cf302",
"name": "John Smith",
"source": "scm",
"username": "jsmith",
"uuid": "03e12d00e37fd45593c49a5a5a1652deca4cf302"
},
{
"email": "[email protected]",
"id": "75d95d6c8492fd36d24a18bd45d62161e05fbc97",
"name": "John Smith",
"source": "scm",
"username": null,
"uuid": "03e12d00e37fd45593c49a5a5a1652deca4cf302"
}
],
"profile": {
"country": {
"alpha3": "USA",
"code": "US",
"name": "United States of America"
},
"email": "[email protected]",
"gender": "male",
"gender_acc": 100,
"name": null,
"is_bot": true,
"uuid": "03e12d00e37fd45593c49a5a5a1652deca4cf302"
},
"uuid": "03e12d00e37fd45593c49a5a5a1652deca4cf302"
}
}
}
:param stream: stream to parse
:raises InvalidFormatError: raised when the format of the stream is
not valid.
"""
try:
for uidentity in json['uidentities'].values():
uuid = self.__encode(uidentity['uuid'])
uid = UniqueIdentity(uuid=uuid)
if uidentity['profile']:
profile = uidentity['profile']
if type(profile['is_bot']) != bool:
msg = "invalid json format. 'is_bot' must have a bool value"
raise InvalidFormatError(cause=msg)
is_bot = profile['is_bot']
gender = profile.get('gender', None)
if gender is not None:
gender = self.__encode(gender)
gender_acc = profile.get('gender_acc', None)
if gender_acc is not None:
if type(gender_acc) != int:
msg = "invalid json format. 'gender_acc' must have an integer value"
raise InvalidFormatError(cause=msg)
elif not 0 <= gender_acc <= 100:
msg = "invalid json format. 'gender_acc' is not in range (0,100)"
raise InvalidFormatError(cause=msg)
name = self.__encode(profile['name'])
email = self.__encode(profile['email'])
prf = Profile(uuid=uuid, name=name, email=email,
gender=gender, gender_acc=gender_acc,
is_bot=is_bot)
if profile['country']:
alpha3 = self.__encode(profile['country']['alpha3'])
code = self.__encode(profile['country']['code'])
name = self.__encode(profile['country']['name'])
c = Country(alpha3=alpha3, code=code, name=name)
prf.country_code = code
prf.country = c
uid.profile = prf
for identity in uidentity['identities']:
identity_id = self.__encode(identity['id'])
name = self.__encode(identity['name'])
email = self.__encode(identity['email'])
username = self.__encode(identity['username'])
source = self.__encode(identity['source'])
sh_id = Identity(id=identity_id, name=name,
email=email, username=username,
source=source, uuid=uuid)
uid.identities.append(sh_id)
for enrollment in uidentity['enrollments']:
organization = self.__encode(enrollment['organization'])
org = self._organizations.get(organization, None)
if not org:
org = Organization(name=organization)
self._organizations[organization] = org
try:
start = str_to_datetime(enrollment['start'])
end = str_to_datetime(enrollment['end'])
except InvalidDateError as e:
raise InvalidFormatError(cause=str(e))
rol = Enrollment(start=start, end=end, organization=org)
uid.enrollments.append(rol)
self._identities.append(uid)
except KeyError as e:
msg = "invalid json format. Attribute %s not found" % e.args
raise InvalidFormatError(cause=msg) | Parse identities using Sorting Hat format.
The Sorting Hat identities format is a JSON stream on which its
keys are the UUID of the unique identities. Each unique identity
object has a list of identities and enrollments.
When the unique identity does not have a UUID, it will be considered
as an anonymous unique identity. This means that the UUID of these
identities will be regenerated during the loading process.
Next, there is an example of a valid stream:
{
"uidentities": {
"[email protected]": {
"enrollments": [],
"identities": [],
"uuid": null
},
"03e12d00e37fd45593c49a5a5a1652deca4cf302": {
"enrollments": [
{
"end": "2100-01-01T00:00:00",
"start": "1900-01-01T00:00:00",
"organization": "Example",
"uuid": "03e12d00e37fd45593c49a5a5a1652deca4cf302"
}
],
"identities": [
{
"email": "[email protected]",
"id": "03e12d00e37fd45593c49a5a5a1652deca4cf302",
"name": "John Smith",
"source": "scm",
"username": "jsmith",
"uuid": "03e12d00e37fd45593c49a5a5a1652deca4cf302"
},
{
"email": "[email protected]",
"id": "75d95d6c8492fd36d24a18bd45d62161e05fbc97",
"name": "John Smith",
"source": "scm",
"username": null,
"uuid": "03e12d00e37fd45593c49a5a5a1652deca4cf302"
}
],
"profile": {
"country": {
"alpha3": "USA",
"code": "US",
"name": "United States of America"
},
"email": "[email protected]",
"gender": "male",
"gender_acc": 100,
"name": null,
"is_bot": true,
"uuid": "03e12d00e37fd45593c49a5a5a1652deca4cf302"
},
"uuid": "03e12d00e37fd45593c49a5a5a1652deca4cf302"
}
}
}
:param stream: stream to parse
:raises InvalidFormatError: raised when the format of the stream is
not valid. |
def retrieve(self, key):
"""Retrieves a cached array if possible."""
column_file = os.path.join(self._hash_dir, '%s.json' % key)
cache_file = os.path.join(self._hash_dir, '%s.npy' % key)
if os.path.exists(cache_file):
data = np.load(cache_file)
if os.path.exists(column_file):
with open(column_file, 'r') as json_file:
columns = json.load(json_file)
data = pd.DataFrame(data, columns=columns)
else:
return None
return data | Retrieves a cached array if possible. |
def load_image(name, n, m=None, gpu=None, square=None):
"""Function to load images with certain size."""
if m is None:
m = n
if gpu is None:
gpu = 0
if square is None:
square = 0
command = ('Shearlab.load_image("{}", {}, {}, {}, {})'.format(name,
n, m, gpu, square))
return j.eval(command) | Function to load images with certain size. |
def tablestructure(tablename, dataman=True, column=True, subtable=False,
sort=False):
"""Print the structure of a table.
It is the same as :func:`table.showstructure`, but without the need to open
the table first.
"""
t = table(tablename, ack=False)
six.print_(t.showstructure(dataman, column, subtable, sort)) | Print the structure of a table.
It is the same as :func:`table.showstructure`, but without the need to open
the table first. |
def get_all_preordered_namespace_hashes( self ):
"""
Get all oustanding namespace preorder hashes that have not expired.
Used for testing
"""
cur = self.db.cursor()
namespace_hashes = namedb_get_all_preordered_namespace_hashes( cur, self.lastblock )
return namespace_hashes | Get all oustanding namespace preorder hashes that have not expired.
Used for testing |
def feed_arthur():
""" Feed Ocean with backend data collected from arthur redis queue"""
logger.info("Collecting items from redis queue")
db_url = 'redis://localhost/8'
conn = redis.StrictRedis.from_url(db_url)
logger.debug("Redis connection stablished with %s.", db_url)
# Get and remove queued items in an atomic transaction
pipe = conn.pipeline()
pipe.lrange(Q_STORAGE_ITEMS, 0, -1)
pipe.ltrim(Q_STORAGE_ITEMS, 1, 0)
items = pipe.execute()[0]
for item in items:
arthur_item = pickle.loads(item)
if arthur_item['tag'] not in arthur_items:
arthur_items[arthur_item['tag']] = []
arthur_items[arthur_item['tag']].append(arthur_item)
for tag in arthur_items:
logger.debug("Items for %s: %i", tag, len(arthur_items[tag])) | Feed Ocean with backend data collected from arthur redis queue |
def path(self):
''' Get the path of the wrapped folder '''
if isinstance(self.dir, Directory):
return self.dir._path
elif isinstance(self.dir, ROOT.TDirectory):
return self.dir.GetPath()
elif isinstance(self.dir, _FolderView):
return self.dir.path()
else:
return str(self.dir) | Get the path of the wrapped folder |
def list(self, entity=None):
"""
Returns a dictionary of data, optionally filtered for a given entity.
"""
uri = "/%s" % self.uri_base
if entity:
uri = "%s?entityId=%s" % (uri, utils.get_id(entity))
resp, resp_body = self._list(uri, return_raw=True)
return resp_body | Returns a dictionary of data, optionally filtered for a given entity. |
def start_server_background(port):
"""Start the newtab server as a background process."""
if sys.version_info[0] == 2:
lines = ('import pydoc\n'
'pydoc.serve({port})')
cell = lines.format(port=port)
else:
# The location of newtabmagic (normally $IPYTHONDIR/extensions)
# needs to be added to sys.path.
path = repr(os.path.dirname(os.path.realpath(__file__)))
lines = ('import sys\n'
'sys.path.append({path})\n'
'import newtabmagic\n'
'newtabmagic.pydoc_cli_monkey_patched({port})')
cell = lines.format(path=path, port=port)
# Use script cell magic so that shutting down IPython stops
# the server process.
line = "python --proc proc --bg --err error --out output"
ip = get_ipython()
ip.run_cell_magic("script", line, cell)
return ip.user_ns['proc'] | Start the newtab server as a background process. |
def write(self, output_io):
'''Write a taxonomy to an open stream out in GG format. Code calling this
function must open and close the io object.'''
for name, tax in self.taxonomy.items():
output_io.write("%s\t%s\n" % (name, '; '.join(tax))) | Write a taxonomy to an open stream out in GG format. Code calling this
function must open and close the io object. |
def set_triggered_by_event(self, value):
"""
Setter for 'triggered_by_event' field.
:param value - a new value of 'triggered_by_event' field. Must be a boolean type. Does not accept None value.
"""
if value is None or not isinstance(value, bool):
raise TypeError("TriggeredByEvent must be set to a bool")
else:
self.__triggered_by_event = value | Setter for 'triggered_by_event' field.
:param value - a new value of 'triggered_by_event' field. Must be a boolean type. Does not accept None value. |
def bind(self, attribute, cls, buffer, fmt, *, offset=0, stride=0, divisor=0, normalize=False) -> None:
'''
Bind individual attributes to buffers.
Args:
location (int): The attribute location.
cls (str): The attribute class. Valid values are ``f``, ``i`` or ``d``.
buffer (Buffer): The buffer.
format (str): The buffer format.
Keyword Args:
offset (int): The offset.
stride (int): The stride.
divisor (int): The divisor.
normalize (bool): The normalize parameter, if applicable.
'''
self.mglo.bind(attribute, cls, buffer.mglo, fmt, offset, stride, divisor, normalize) | Bind individual attributes to buffers.
Args:
location (int): The attribute location.
cls (str): The attribute class. Valid values are ``f``, ``i`` or ``d``.
buffer (Buffer): The buffer.
format (str): The buffer format.
Keyword Args:
offset (int): The offset.
stride (int): The stride.
divisor (int): The divisor.
normalize (bool): The normalize parameter, if applicable. |
def plotloc(data, circleinds=[], crossinds=[], edgeinds=[], url_path=None, fileroot=None,
tools="hover,tap,pan,box_select,wheel_zoom,reset", plot_width=450, plot_height=400):
""" Make a light-weight loc figure """
fields = ['l1', 'm1', 'sizes', 'colors', 'snrs', 'key']
if not circleinds: circleinds = range(len(data['snrs']))
# set ranges
datalen = len(data['dm'])
inds = circleinds + crossinds + edgeinds
l1 = [data['l1'][i] for i in inds]
l1_min = min(l1)
l1_max = max(l1)
m1 = [data['m1'][i] for i in inds]
m1_min = min(m1)
m1_max = max(m1)
source = ColumnDataSource(data = dict({(key, tuple([value[i] for i in circleinds if i not in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
loc = Figure(plot_width=plot_width, plot_height=plot_height, toolbar_location="left", x_axis_label='l1 (rad)', y_axis_label='m1 (rad)',
x_range=(l1_min, l1_max), y_range=(m1_min,m1_max), tools=tools, output_backend='webgl')
loc.circle('l1', 'm1', size='sizes', line_color=None, fill_color='colors', fill_alpha=0.2, source=source)
if crossinds:
sourceneg = ColumnDataSource(data = dict({(key, tuple([value[i] for i in crossinds]))
for (key, value) in data.iteritems() if key in fields}))
loc.cross('l1', 'm1', size='sizes', line_color='colors', line_alpha=0.3, source=sourceneg)
if edgeinds:
sourceedge = ColumnDataSource(data = dict({(key, tuple([value[i] for i in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
loc.circle('l1', 'm1', size='sizes', line_color='colors', fill_color='colors', source=sourceedge, line_alpha=0.5, fill_alpha=0.2)
hover = loc.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([('SNR', '@snrs'), ('key', '@key')])
if url_path and fileroot:
url = '{}/cands_{}[email protected]'.format(url_path, fileroot)
taptool = loc.select(type=TapTool)
taptool.callback = OpenURL(url=url)
return loc | Make a light-weight loc figure |
def _add_rg(unmapped_file, config, names):
"""Add the missing RG header."""
picard = broad.runner_from_path("picard", config)
rg_fixed = picard.run_fn("picard_fix_rgs", unmapped_file, names)
return rg_fixed | Add the missing RG header. |
def format_help(self):
"""Override help doc to add cell args. """
if not self._cell_args:
return super(CommandParser, self).format_help()
else:
# Print the standard argparse info, the cell arg block, and then the epilog
# If we don't remove epilog before calling the super, then epilog will
# be printed before the 'Cell args' block.
epilog = self.epilog
self.epilog = None
orig_help = super(CommandParser, self).format_help()
cell_args_help = '\nCell args:\n\n'
for cell_arg, v in six.iteritems(self._cell_args):
required = 'Required' if v['required'] else 'Optional'
cell_args_help += '%s: %s. %s.\n\n' % (cell_arg, required, v['help'])
orig_help += cell_args_help
if epilog:
orig_help += epilog + '\n\n'
return orig_help | Override help doc to add cell args. |
def HandleSimpleResponses(
self, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):
"""Accepts normal responses from the device.
Args:
timeout_ms: Timeout in milliseconds to wait for each response.
info_cb: Optional callback for text sent from the bootloader.
Returns:
OKAY packet's message.
"""
return self._AcceptResponses(b'OKAY', info_cb, timeout_ms=timeout_ms) | Accepts normal responses from the device.
Args:
timeout_ms: Timeout in milliseconds to wait for each response.
info_cb: Optional callback for text sent from the bootloader.
Returns:
OKAY packet's message. |
def mkdir(self, mdir, parents=False):
"""Make a directory.
Note that this will not error out if the directory already exists
(that is how the PutDirectory Manta API behaves).
@param mdir {str} A manta path, e.g. '/trent/stor/mydir'.
@param parents {bool} Optional. Default false. Like 'mkdir -p', this
will create parent dirs as necessary.
"""
assert mdir.startswith('/'), "%s: invalid manta path" % mdir
parts = mdir.split('/')
assert len(parts) > 3, "%s: cannot create top-level dirs" % mdir
if not parents:
self.put_directory(mdir)
else:
# Find the first non-existant dir: binary search. Because
# PutDirectory doesn't error on 'mkdir .../already-exists' we
# don't have a way to detect a miss on `start`. So basically we
# keep doing the binary search until we hit and close the `start`
# to `end` gap.
# Example:
# - mdir: /trent/stor/builds/a/b/c (need to mk a/b/c)
# parts: ['', 'trent', 'stor', 'builds', 'a', 'b', 'c']
# start: 4
# end: 8
# - idx: 6
# d: /trent/stor/builds/a/b (put_directory fails)
# end: 6
# - idx: 5
# d: /trent/stor/builds/a (put_directory succeeds)
# start: 5
# (break out of loop)
# - for i in range(6, 8):
# i=6 -> d: /trent/stor/builds/a/b
# i=7 -> d: /trent/stor/builds/a/b/c
end = len(parts) + 1
start = 3 # Index of the first possible dir to create.
while start < end - 1:
idx = int((end - start) // 2 + start)
d = '/'.join(parts[:idx])
try:
self.put_directory(d)
except errors.MantaAPIError:
_, ex, _ = sys.exc_info()
if ex.code == 'DirectoryDoesNotExist':
end = idx
else:
raise
else:
start = idx
# Now need to create from (end-1, len(parts)].
for i in range(end, len(parts) + 1):
d = '/'.join(parts[:i])
self.put_directory(d) | Make a directory.
Note that this will not error out if the directory already exists
(that is how the PutDirectory Manta API behaves).
@param mdir {str} A manta path, e.g. '/trent/stor/mydir'.
@param parents {bool} Optional. Default false. Like 'mkdir -p', this
will create parent dirs as necessary. |
def visible(self, request):
'''
Checks the both, check_visible and apply_visible, against the owned model and it's instance set
'''
return self.apply_visible(self.get_queryset(), request) if self.check_visible(self.model, request) is not False else self.get_queryset().none() | Checks the both, check_visible and apply_visible, against the owned model and it's instance set |
def DictProduct(dictionary):
"""Computes a cartesian product of dict with iterable values.
This utility function, accepts a dictionary with iterable values, computes
cartesian products of these values and yields dictionaries of expanded values.
Examples:
>>> list(DictProduct({"a": [1, 2], "b": [3, 4]}))
[{"a": 1, "b": 3}, {"a": 1, "b": 4}, {"a": 2, "b": 3}, {"a": 2, "b": 4}]
Args:
dictionary: A dictionary with iterable values.
Yields:
Dictionaries with values being a result of cartesian product of values of
the input dictionary.
"""
keys, values = Unzip(iteritems(dictionary))
for product_values in itertools.product(*values):
yield dict(zip(keys, product_values)) | Computes a cartesian product of dict with iterable values.
This utility function, accepts a dictionary with iterable values, computes
cartesian products of these values and yields dictionaries of expanded values.
Examples:
>>> list(DictProduct({"a": [1, 2], "b": [3, 4]}))
[{"a": 1, "b": 3}, {"a": 1, "b": 4}, {"a": 2, "b": 3}, {"a": 2, "b": 4}]
Args:
dictionary: A dictionary with iterable values.
Yields:
Dictionaries with values being a result of cartesian product of values of
the input dictionary. |
def from_table(table, fields=None):
""" Return a Query for the given Table object
Args:
table: the Table object to construct a Query out of
fields: the fields to return. If None, all fields will be returned. This can be a string
which will be injected into the Query after SELECT, or a list of field names.
Returns:
A Query object that will return the specified fields from the records in the Table.
"""
if fields is None:
fields = '*'
elif isinstance(fields, list):
fields = ','.join(fields)
return Query('SELECT %s FROM %s' % (fields, table._repr_sql_())) | Return a Query for the given Table object
Args:
table: the Table object to construct a Query out of
fields: the fields to return. If None, all fields will be returned. This can be a string
which will be injected into the Query after SELECT, or a list of field names.
Returns:
A Query object that will return the specified fields from the records in the Table. |
def dictionary_merge(a, b):
"""merges dictionary b into a
Like dict.update, but recursive
"""
for key, value in b.items():
if key in a and isinstance(a[key], dict) and isinstance(value, dict):
dictionary_merge(a[key], b[key])
continue
a[key] = b[key]
return a | merges dictionary b into a
Like dict.update, but recursive |
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Adds the "Send to Twitter" checkbox after the "status" field,
provided by any ``Displayable`` models. The approach here is
quite a hack, however the sane approach of using a custom
form with a boolean field defined, and then adding it to the
formssets attribute of the admin class fell apart quite
horrifically.
"""
formfield = super(TweetableAdminMixin,
self).formfield_for_dbfield(db_field, **kwargs)
if Api and db_field.name == "status" and get_auth_settings():
def wrapper(render):
def wrapped(*args, **kwargs):
rendered = render(*args, **kwargs)
label = _("Send to Twitter")
return mark_safe(rendered + FORMFIELD_HTML % label)
return wrapped
formfield.widget.render = wrapper(formfield.widget.render)
return formfield | Adds the "Send to Twitter" checkbox after the "status" field,
provided by any ``Displayable`` models. The approach here is
quite a hack, however the sane approach of using a custom
form with a boolean field defined, and then adding it to the
formssets attribute of the admin class fell apart quite
horrifically. |
def collect_conflicts_between(
context: ValidationContext,
conflicts: List[Conflict],
cached_fields_and_fragment_names: Dict,
compared_fragment_pairs: "PairSet",
parent_fields_are_mutually_exclusive: bool,
field_map1: NodeAndDefCollection,
field_map2: NodeAndDefCollection,
) -> None:
"""Collect all Conflicts between two collections of fields.
This is similar to, but different from the `collectConflictsWithin` function above.
This check assumes that `collectConflictsWithin` has already been called on each
provided collection of fields. This is true because this validator traverses each
individual selection set.
"""
# A field map is a keyed collection, where each key represents a response name and
# the value at that key is a list of all fields which provide that response name.
# For any response name which appears in both provided field maps, each field from
# the first field map must be compared to every field in the second field map to
# find potential conflicts.
for response_name, fields1 in field_map1.items():
fields2 = field_map2.get(response_name)
if fields2:
for field1 in fields1:
for field2 in fields2:
conflict = find_conflict(
context,
cached_fields_and_fragment_names,
compared_fragment_pairs,
parent_fields_are_mutually_exclusive,
response_name,
field1,
field2,
)
if conflict:
conflicts.append(conflict) | Collect all Conflicts between two collections of fields.
This is similar to, but different from the `collectConflictsWithin` function above.
This check assumes that `collectConflictsWithin` has already been called on each
provided collection of fields. This is true because this validator traverses each
individual selection set. |
def reconnect(self):
'''
Try to reconnect and re-authenticate with the server.
'''
log.debug('Closing the SSH socket.')
try:
self.ssl_skt.close()
except socket.error:
log.error('The socket seems to be closed already.')
log.debug('Re-opening the SSL socket.')
self.authenticate() | Try to reconnect and re-authenticate with the server. |
def get_children_graph(self, item_ids=None, language=None, forbidden_item_ids=None):
"""
Get a subgraph of items reachable from the given set of items through
the 'child' relation.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (child items), root items are
referenced by None key
"""
if forbidden_item_ids is None:
forbidden_item_ids = set()
def _children(item_ids):
if item_ids is None:
items = Item.objects.filter(active=True).prefetch_related('children')
else:
item_ids = [ii for iis in item_ids.values() for ii in iis]
items = Item.objects.filter(id__in=item_ids, active=True).prefetch_related('children')
return {
item.id: sorted([
_item.id for _item in item.children.all()
if _item.active and _item.id not in forbidden_item_ids
])
for item in items if item.id not in forbidden_item_ids
}
if item_ids is None:
return self._reachable_graph(None, _children, language=language)
else:
graph = self.get_children_graph(None, language, forbidden_item_ids=forbidden_item_ids)
return self._subset_graph(graph, set(item_ids) - set(forbidden_item_ids)) | Get a subgraph of items reachable from the given set of items through
the 'child' relation.
Args:
item_ids (list): items which are taken as roots for the reachability
language (str): if specified, filter out items which are not
available in the given language
Returns:
dict: item id -> list of items (child items), root items are
referenced by None key |
def _write_nex(self, mdict, nlocus):
"""
function that takes a dictionary mapping names to sequences,
and a locus number, and writes it as a NEXUS file with a mrbayes
analysis block given a set of mcmc arguments.
"""
## create matrix as a string
max_name_len = max([len(i) for i in mdict])
namestring = "{:<" + str(max_name_len+1) + "} {}\n"
matrix = ""
for i in mdict.items():
matrix += namestring.format(i[0], i[1])
## ensure dir
minidir = os.path.realpath(os.path.join(self.workdir, self.name))
if not os.path.exists(minidir):
os.makedirs(minidir)
## write nexus block
handle = os.path.join(minidir, "{}.nex".format(nlocus))
with open(handle, 'w') as outnex:
outnex.write(NEXBLOCK.format(**{
"ntax": len(mdict),
"nchar": len(mdict.values()[0]),
"matrix": matrix,
"ngen": self.params.mb_mcmc_ngen,
"sfreq": self.params.mb_mcmc_sample_freq,
"burnin": self.params.mb_mcmc_burnin,
})) | function that takes a dictionary mapping names to sequences,
and a locus number, and writes it as a NEXUS file with a mrbayes
analysis block given a set of mcmc arguments. |
def print_common_terms(common_terms):
"""Print common terms for each pair of word sets.
:param common_terms: Output of get_common_terms().
"""
if not common_terms:
print('No duplicates')
else:
for set_pair in common_terms:
set1, set2, terms = set_pair
print('{} and {} have in common:'.format(set1, set2))
for term in terms:
print(' {}'.format(term)) | Print common terms for each pair of word sets.
:param common_terms: Output of get_common_terms(). |
def clean(self):
"""
Authenticate the given username/email and password. If the fields
are valid, store the authenticated user for returning via save().
"""
username = self.cleaned_data.get("username")
password = self.cleaned_data.get("password")
self._user = authenticate(username=username, password=password)
if self._user is None:
raise forms.ValidationError(
ugettext("Invalid username/email and password"))
elif not self._user.is_active:
raise forms.ValidationError(ugettext("Your account is inactive"))
return self.cleaned_data | Authenticate the given username/email and password. If the fields
are valid, store the authenticated user for returning via save(). |
def profile(self, profile):
"""Set the current profile.
Args:
profile (dict): The profile data.
"""
# clear staging data
self._staging_data = None
# retrieve language from install.json or assume Python
lang = profile.get('install_json', {}).get('programLanguage', 'PYTHON')
# load instance of ArgBuilder
profile_args = ArgBuilder(lang, self.profile_args(profile.get('args')))
# set current profile
self._profile = profile
# attach instance to current profile
self._profile['profile_args'] = profile_args
# load tcex module after current profile is set
self.load_tcex()
# select report for current profile
self.reports.profile(profile.get('profile_name'))
# create required directories for tcrun to function
self._create_tc_dirs() | Set the current profile.
Args:
profile (dict): The profile data. |
def draw_triangle(a, b, c, color, draw):
"""Draws a triangle with the given vertices in the given color."""
draw.polygon([a, b, c], fill=color) | Draws a triangle with the given vertices in the given color. |
def clear_n_of_m(self):
"""stub"""
if (self.get_n_of_m_metadata().is_read_only() or
self.get_n_of_m_metadata().is_required()):
raise NoAccess()
self.my_osid_object_form._my_map['nOfM'] = \
int(self._n_of_m_metadata['default_object_values'][0]) | stub |
def genome_alignment_iterator(fn, reference_species, index_friendly=False,
verbose=False):
"""
build an iterator for an MAF file of genome alignment blocks.
:param fn: filename or stream-like object to iterate over.
:param reference_species: which species in the alignment should be treated
as the reference?
:param index_friendly: if True, buffering is disabled to support using
the iterator to build an index.
:return an iterator that yields GenomeAlignment objects
"""
kw_args = {"reference_species": reference_species}
for e in maf.maf_iterator(fn, index_friendly=index_friendly,
yield_class=GenomeAlignmentBlock,
yield_kw_args=kw_args,
verbose=verbose):
yield e | build an iterator for an MAF file of genome alignment blocks.
:param fn: filename or stream-like object to iterate over.
:param reference_species: which species in the alignment should be treated
as the reference?
:param index_friendly: if True, buffering is disabled to support using
the iterator to build an index.
:return an iterator that yields GenomeAlignment objects |
def get_structure_seqs(pdb_file, file_type):
"""Get a dictionary of a PDB file's sequences.
Special cases include:
- Insertion codes. In the case of residue numbers like "15A", "15B", both residues are written out. Example: 9LPR
- HETATMs. Currently written as an "X", or unknown amino acid.
Args:
pdb_file: Path to PDB file
Returns:
dict: Dictionary of:
{chain_id: sequence}
"""
# TODO: Please check out capitalization of chain IDs in mmcif files. example: 5afi - chain "l" is present but
# it seems like biopython capitalizes it to chain L
# Get the first model
my_structure = StructureIO(pdb_file)
model = my_structure.first_model
structure_seqs = {}
# Loop over each chain of the PDB
for chain in model:
chain_seq = ''
tracker = 0
# Loop over the residues
for res in chain.get_residues():
# NOTE: you can get the residue number too
# res_num = res.id[1]
# Double check if the residue name is a standard residue
# If it is not a standard residue (ie. selenomethionine),
# it will be filled in with an X on the next iteration)
if Polypeptide.is_aa(res, standard=True):
full_id = res.get_full_id()
end_tracker = full_id[3][1]
i_code = full_id[3][2]
aa = Polypeptide.three_to_one(res.get_resname())
# Tracker to fill in X's
if end_tracker != (tracker + 1):
if i_code != ' ':
chain_seq += aa
tracker = end_tracker + 1
continue
else:
chain_seq += 'X' * (end_tracker - tracker - 1)
chain_seq += aa
tracker = end_tracker
else:
continue
structure_seqs[chain.get_id()] = chain_seq
return structure_seqs | Get a dictionary of a PDB file's sequences.
Special cases include:
- Insertion codes. In the case of residue numbers like "15A", "15B", both residues are written out. Example: 9LPR
- HETATMs. Currently written as an "X", or unknown amino acid.
Args:
pdb_file: Path to PDB file
Returns:
dict: Dictionary of:
{chain_id: sequence} |
def add_motifs(self, args):
"""Add motifs to the result object."""
self.lock.acquire()
# Callback function for motif programs
if args is None or len(args) != 2 or len(args[1]) != 3:
try:
job = args[0]
logger.warn("job %s failed", job)
self.finished.append(job)
except Exception:
logger.warn("job failed")
return
job, (motifs, stdout, stderr) = args
logger.info("%s finished, found %s motifs", job, len(motifs))
for motif in motifs:
if self.do_counter:
self.counter += 1
motif.id = "gimme_{}_".format(self.counter) + motif.id
f = open(self.outfile, "a")
f.write("%s\n" % motif.to_pfm())
f.close()
self.motifs.append(motif)
if self.do_stats and len(motifs) > 0:
#job_id = "%s_%s" % (motif.id, motif.to_consensus())
logger.debug("Starting stats job of %s motifs", len(motifs))
for bg_name, bg_fa in self.background.items():
job = self.job_server.apply_async(
mp_calc_stats,
(motifs, self.fg_fa, bg_fa, bg_name),
callback=self.add_stats
)
self.stat_jobs.append(job)
logger.debug("stdout %s: %s", job, stdout)
logger.debug("stdout %s: %s", job, stderr)
self.finished.append(job)
self.lock.release() | Add motifs to the result object. |
def __get_jp(self, extractor_processor, sub_output=None):
"""Tries to get name from ExtractorProcessor to filter on first.
Otherwise falls back to filtering based on its metadata"""
if sub_output is None and extractor_processor.output_field is None:
raise ValueError(
"ExtractorProcessors input paths cannot be unioned across fields. Please specify either a sub_output or use a single scalar output_field")
if extractor_processor.get_output_jsonpath_with_name(sub_output) is not None:
return extractor_processor.get_output_jsonpath_with_name(sub_output)
else:
return extractor_processor.get_output_jsonpath(sub_output) | Tries to get name from ExtractorProcessor to filter on first.
Otherwise falls back to filtering based on its metadata |
def _find_penultimate_layer(model, layer_idx, penultimate_layer_idx):
"""Searches for the nearest penultimate `Conv` or `Pooling` layer.
Args:
model: The `keras.models.Model` instance.
layer_idx: The layer index within `model.layers`.
penultimate_layer_idx: The pre-layer to `layer_idx`. If set to None, the nearest penultimate
`Conv` or `Pooling` layer is used.
Returns:
The penultimate layer.
"""
if penultimate_layer_idx is None:
for idx, layer in utils.reverse_enumerate(model.layers[:layer_idx - 1]):
if isinstance(layer, Wrapper):
layer = layer.layer
if isinstance(layer, (_Conv, _Pooling1D, _Pooling2D, _Pooling3D)):
penultimate_layer_idx = idx
break
if penultimate_layer_idx is None:
raise ValueError('Unable to determine penultimate `Conv` or `Pooling` '
'layer for layer_idx: {}'.format(layer_idx))
# Handle negative indexing otherwise the next check can fail.
if layer_idx < 0:
layer_idx = len(model.layers) + layer_idx
if penultimate_layer_idx > layer_idx:
raise ValueError('`penultimate_layer_idx` needs to be before `layer_idx`')
return model.layers[penultimate_layer_idx] | Searches for the nearest penultimate `Conv` or `Pooling` layer.
Args:
model: The `keras.models.Model` instance.
layer_idx: The layer index within `model.layers`.
penultimate_layer_idx: The pre-layer to `layer_idx`. If set to None, the nearest penultimate
`Conv` or `Pooling` layer is used.
Returns:
The penultimate layer. |
Subsets and Splits