text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def categories_to_colors(cats, colormap=None):
"""
Map categorical data to colors.
Parameters
----------
cats : pandas.Series or list
Categorical data as a list or in a Series.
colormap : list
List of RGB triples. If not provided, the tableau20 colormap defined in
this module will be used.
Returns
-------
legend : pd.Series
Series whose values are colors and whose index are the original
categories that correspond to those colors.
"""
if colormap is None:
colormap = tableau20
if type(cats) != pd.Series:
cats = pd.Series(cats)
legend = pd.Series(dict(zip(set(cats), colormap)))
# colors = pd.Series([legend[x] for x in cats.values], index=cats.index)
# I've removed this output:
# colors : pd.Series
# Series whose values are the colors for each category. If cats was a
# Series, then out will have the same index as cats.
return(legend) | 0.002028 |
def set_terminal_width(self, command="", delay_factor=1):
"""CLI terminals try to automatically adjust the line based on the width of the terminal.
This causes the output to get distorted when accessed programmatically.
Set terminal width to 511 which works on a broad set of devices.
:param command: Command string to send to the device
:type command: str
:param delay_factor: See __init__: global_delay_factor
:type delay_factor: int
"""
if not command:
return ""
delay_factor = self.select_delay_factor(delay_factor)
command = self.normalize_cmd(command)
self.write_channel(command)
output = self.read_until_prompt()
if self.ansi_escape_codes:
output = self.strip_ansi_escape_codes(output)
return output | 0.003517 |
def get(hostname,
username=None,
fallback=None,
detect_sudo=True,
use_rhceph=False,
callbacks=None):
"""
Retrieve the module that matches the distribution of a ``hostname``. This
function will connect to that host and retrieve the distribution
information, then return the appropriate module and slap a few attributes
to that module defining the information it found from the hostname.
For example, if host ``node1.example.com`` is an Ubuntu server, the
``debian`` module would be returned and the following would be set::
module.name = 'ubuntu'
module.release = '12.04'
module.codename = 'precise'
:param hostname: A hostname that is reachable/resolvable over the network
:param fallback: Optional fallback to use if no supported distro is found
:param use_rhceph: Whether or not to install RH Ceph on a RHEL machine or
the community distro. Changes what host module is
returned for RHEL.
:params callbacks: A list of callables that accept one argument (the actual
module that contains the connection) that will be
called, in order at the end of the instantiation of the
module.
"""
conn = get_connection(
hostname,
username=username,
logger=logging.getLogger(hostname),
detect_sudo=detect_sudo
)
try:
conn.import_module(remotes)
except IOError as error:
if 'already closed' in getattr(error, 'message', ''):
raise RuntimeError('remote connection got closed, ensure ``requiretty`` is disabled for %s' % hostname)
distro_name, release, codename = conn.remote_module.platform_information()
if not codename or not _get_distro(distro_name):
raise exc.UnsupportedPlatform(
distro=distro_name,
codename=codename,
release=release)
machine_type = conn.remote_module.machine_type()
module = _get_distro(distro_name, use_rhceph=use_rhceph)
module.name = distro_name
module.normalized_name = _normalized_distro_name(distro_name)
module.normalized_release = _normalized_release(release)
module.distro = module.normalized_name
module.is_el = module.normalized_name in ['redhat', 'centos', 'fedora', 'scientific', 'oracle', 'virtuozzo']
module.is_rpm = module.normalized_name in ['redhat', 'centos',
'fedora', 'scientific', 'suse', 'oracle', 'virtuozzo', 'alt']
module.is_deb = module.normalized_name in ['debian', 'ubuntu']
module.is_pkgtarxz = module.normalized_name in ['arch']
module.release = release
module.codename = codename
module.conn = conn
module.machine_type = machine_type
module.init = module.choose_init(module)
module.packager = module.get_packager(module)
# execute each callback if any
if callbacks:
for c in callbacks:
c(module)
return module | 0.001311 |
def aes_ecb_encrypt(self, key_handle, plaintext):
"""
AES ECB encrypt using a key handle.
@warning: Please be aware of the known limitations of AES ECB mode before using it!
@param key_handle: Key handle to use for AES ECB encryption
@param plaintext: Data to encrypt
@type key_handle: integer or string
@type plaintext: string
@returns: Ciphertext
@rtype: string
@see: L{pyhsm.aes_ecb_cmd.YHSM_Cmd_AES_ECB_Encrypt}
"""
return pyhsm.aes_ecb_cmd.YHSM_Cmd_AES_ECB_Encrypt( \
self.stick, key_handle, plaintext).execute() | 0.006359 |
def update_task(task):
"""Update a task for a given task ID.
:param task: PYBOSSA task
"""
try:
task_id = task.id
task = _forbidden_attributes(task)
res = _pybossa_req('put', 'task', task_id, payload=task.data)
if res.get('id'):
return Task(res)
else:
return res
except: # pragma: no cover
raise | 0.005128 |
def fromdelta(args):
"""
%prog fromdelta deltafile
Convert deltafile to coordsfile.
"""
p = OptionParser(fromdelta.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
deltafile, = args
coordsfile = deltafile.rsplit(".", 1)[0] + ".coords"
cmd = "show-coords -rclH {0}".format(deltafile)
sh(cmd, outfile=coordsfile)
return coordsfile | 0.002336 |
def get_parent_books(self, book_id):
"""Gets the parent books of the given ``id``.
arg: book_id (osid.id.Id): the ``Id`` of the ``Book`` to
query
return: (osid.commenting.BookList) - the parent books of the
``id``
raise: NotFound - a ``Book`` identified by ``Id is`` not found
raise: NullArgument - ``book_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchySession.get_parent_bins
if self._catalog_session is not None:
return self._catalog_session.get_parent_catalogs(catalog_id=book_id)
return BookLookupSession(
self._proxy,
self._runtime).get_books_by_ids(
list(self.get_parent_book_ids(book_id))) | 0.00304 |
def add(self, **kwargs):
"""
Add objects to the environment.
"""
for key in kwargs:
if type(kwargs[key]) == str:
self._children[key] = Directory(kwargs[key])
else:
self._children[key] = kwargs[key]
self._children[key]._env = self
self._children[key].apply_config(ConfigApplicator(self.config))
self._children[key].prepare() | 0.004474 |
def reset_all(self, suppress_logging=False):
""" iterates thru the list of established connections and resets them by disconnecting and reconnecting """
pool_names = list(self.pools)
for name in pool_names:
self.reset(name, suppress_logging) | 0.01083 |
def _choose_float_dtype(dtype, has_offset):
"""Return a float dtype that can losslessly represent `dtype` values."""
# Keep float32 as-is. Upcast half-precision to single-precision,
# because float16 is "intended for storage but not computation"
if dtype.itemsize <= 4 and np.issubdtype(dtype, np.floating):
return np.float32
# float32 can exactly represent all integers up to 24 bits
if dtype.itemsize <= 2 and np.issubdtype(dtype, np.integer):
# A scale factor is entirely safe (vanishing into the mantissa),
# but a large integer offset could lead to loss of precision.
# Sensitivity analysis can be tricky, so we just use a float64
# if there's any offset at all - better unoptimised than wrong!
if not has_offset:
return np.float32
# For all other types and circumstances, we just use float64.
# (safe because eg. complex numbers are not supported in NetCDF)
return np.float64 | 0.001022 |
def apply(self, node):
""" Apply transformation and return if an update happened. """
new_node = self.run(node)
return self.update, new_node | 0.012195 |
def parallelize(self, seconds_to_wait=2):
"""Start a parallel thread for receiving messages.
If :meth:`start` was no called before, start will be called in the
thread.
The thread calls :meth:`receive_message` until the :attr:`state`
:meth:`~AYABInterface.communication.states.State.is_connection_closed`.
:param float seconds_to_wait: A time in seconds to wait with the
parallel execution. This is useful to allow the controller time to
initialize.
.. seealso:: :attr:`lock`, :meth:`runs_in_parallel`
"""
with self.lock:
thread = Thread(target=self._parallel_receive_loop,
args=(seconds_to_wait,))
thread.deamon = True
thread.start()
self._thread = thread | 0.002421 |
def histogram(args):
"""
%prog histogram *.gff
Plot gene statistics based on output of stats. For each gff file, look to
see if the metrics folder (i.e. Exon_Length) contains the data and plot
them.
"""
from jcvi.graphics.histogram import histogram_multiple
p = OptionParser(histogram.__doc__)
p.add_option("--bins", dest="bins", default=40, type="int",
help="number of bins to plot in the histogram [default: %default]")
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
gff_files = args
# metrics = ("Exon_Length", "Intron_Length", "Gene_Length", "Exon_Count")
colors = ("red", "green", "blue", "black")
vmaxes = (1000, 1000, 4000, 20)
xlabels = ("bp", "bp", "bp", "number")
for metric, color, vmax, xlabel in zip(metrics, colors, vmaxes, xlabels):
logging.debug("Parsing files in `{0}`..".format(metric))
numberfiles = [op.join(metric, op.basename(x).split(".")[0] + ".txt") \
for x in gff_files]
histogram_multiple(numberfiles, 0, vmax, xlabel, metric,
bins=opts.bins, facet=True, fill=color,
prefix=metric + ".") | 0.004866 |
def read_hatpi_binnedlc(binnedpklf, textlcf, timebinsec):
'''This reads a binnedlc pickle produced by the HATPI prototype pipeline.
Converts it into a standard lcdict as produced by the read_hatpi_textlc
function above by using the information in unbinnedtextlc for the same
object.
Adds a 'binned' key to the standard lcdict containing the binned mags, etc.
'''
LOGINFO('reading binned LC %s' % binnedpklf)
# read the textlc
lcdict = read_hatpi_textlc(textlcf)
# read the binned LC
if binnedpklf.endswith('.gz'):
infd = gzip.open(binnedpklf,'rb')
else:
infd = open(binnedpklf,'rb')
try:
binned = pickle.load(infd)
except Exception as e:
infd.seek(0)
binned = pickle.load(infd, encoding='latin1')
infd.close()
# now that we have both, pull out the required columns from the binnedlc
blckeys = binned.keys()
lcdict['binned'] = {}
for key in blckeys:
# get EPD stuff
if (key == 'epdlc' and
'AP0' in binned[key] and
'AP1' in binned[key] and
'AP2' in binned[key]):
# we'll have to generate errors because we don't have any in the
# generated binned LC.
ap0mad = np.nanmedian(np.abs(binned[key]['AP0'] -
np.nanmedian(binned[key]['AP0'])))
ap1mad = np.nanmedian(np.abs(binned[key]['AP1'] -
np.nanmedian(binned[key]['AP1'])))
ap2mad = np.nanmedian(np.abs(binned[key]['AP2'] -
np.nanmedian(binned[key]['AP2'])))
lcdict['binned']['iep1'] = {'times':binned[key]['RJD'],
'mags':binned[key]['AP0'],
'errs':np.full_like(binned[key]['AP0'],
ap0mad),
'nbins':binned[key]['nbins'],
'timebins':binned[key]['jdbins'],
'timebinsec':timebinsec}
lcdict['binned']['iep2'] = {'times':binned[key]['RJD'],
'mags':binned[key]['AP1'],
'errs':np.full_like(binned[key]['AP1'],
ap1mad),
'nbins':binned[key]['nbins'],
'timebins':binned[key]['jdbins'],
'timebinsec':timebinsec}
lcdict['binned']['iep3'] = {'times':binned[key]['RJD'],
'mags':binned[key]['AP2'],
'errs':np.full_like(binned[key]['AP2'],
ap2mad),
'nbins':binned[key]['nbins'],
'timebins':binned[key]['jdbins'],
'timebinsec':timebinsec}
# get TFA stuff for aperture 1
if ((key == 'tfalc.TF1' or key == 'tfalc.TF1.gz') and
'AP0' in binned[key]):
# we'll have to generate errors because we don't have any in the
# generated binned LC.
ap0mad = np.nanmedian(np.abs(binned[key]['AP0'] -
np.nanmedian(binned[key]['AP0'])))
lcdict['binned']['itf1'] = {'times':binned[key]['RJD'],
'mags':binned[key]['AP0'],
'errs':np.full_like(binned[key]['AP0'],
ap0mad),
'nbins':binned[key]['nbins'],
'timebins':binned[key]['jdbins'],
'timebinsec':timebinsec}
# get TFA stuff for aperture 1
if ((key == 'tfalc.TF2' or key == 'tfalc.TF2.gz') and
'AP0' in binned[key]):
# we'll have to generate errors because we don't have any in the
# generated binned LC.
ap0mad = np.nanmedian(np.abs(binned[key]['AP0'] -
np.nanmedian(binned[key]['AP0'])))
lcdict['binned']['itf2'] = {'times':binned[key]['RJD'],
'mags':binned[key]['AP0'],
'errs':np.full_like(binned[key]['AP0'],
ap0mad),
'nbins':binned[key]['nbins'],
'timebins':binned[key]['jdbins'],
'timebinsec':timebinsec}
# get TFA stuff for aperture 1
if ((key == 'tfalc.TF3' or key == 'tfalc.TF3.gz') and
'AP0' in binned[key]):
# we'll have to generate errors because we don't have any in the
# generated binned LC.
ap0mad = np.nanmedian(np.abs(binned[key]['AP0'] -
np.nanmedian(binned[key]['AP0'])))
lcdict['binned']['itf3'] = {'times':binned[key]['RJD'],
'mags':binned[key]['AP0'],
'errs':np.full_like(binned[key]['AP0'],
ap0mad),
'nbins':binned[key]['nbins'],
'timebins':binned[key]['jdbins'],
'timebinsec':timebinsec}
# all done, check if we succeeded
if lcdict['binned']:
return lcdict
else:
LOGERROR('no binned measurements found in %s!' % binnedpklf)
return None | 0.008013 |
def _rmv_pkg(self, package):
"""Remove one signle package
"""
removes = []
if GetFromInstalled(package).name() and package not in self.skip:
ver = GetFromInstalled(package).version()
removes.append(package + ver)
self._removepkg(package)
return removes | 0.006098 |
def _qInstallMessageHandler(handler):
"""Install a message handler that works in all bindings
Args:
handler: A function that takes 3 arguments, or None
"""
def messageOutputHandler(*args):
# In Qt4 bindings, message handlers are passed 2 arguments
# In Qt5 bindings, message handlers are passed 3 arguments
# The first argument is a QtMsgType
# The last argument is the message to be printed
# The Middle argument (if passed) is a QMessageLogContext
if len(args) == 3:
msgType, logContext, msg = args
elif len(args) == 2:
msgType, msg = args
logContext = None
else:
raise TypeError(
"handler expected 2 or 3 arguments, got {0}".format(len(args)))
if isinstance(msg, bytes):
# In python 3, some bindings pass a bytestring, which cannot be
# used elsewhere. Decoding a python 2 or 3 bytestring object will
# consistently return a unicode object.
msg = msg.decode()
handler(msgType, logContext, msg)
passObject = messageOutputHandler if handler else handler
if Qt.IsPySide or Qt.IsPyQt4:
return Qt._QtCore.qInstallMsgHandler(passObject)
elif Qt.IsPySide2 or Qt.IsPyQt5:
return Qt._QtCore.qInstallMessageHandler(passObject) | 0.000732 |
def set_game_score(
self,
user_id: Union[int, str],
score: int,
force: bool = None,
disable_edit_message: bool = None,
chat_id: Union[int, str] = None,
message_id: int = None
):
# inline_message_id: str = None): TODO Add inline_message_id
"""Use this method to set the score of the specified user in a game.
Args:
user_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
score (``int``):
New score, must be non-negative.
force (``bool``, *optional*):
Pass True, if the high score is allowed to decrease.
This can be useful when fixing mistakes or banning cheaters.
disable_edit_message (``bool``, *optional*):
Pass True, if the game message should not be automatically edited to include the current scoreboard.
chat_id (``int`` | ``str``, *optional*):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
Required if inline_message_id is not specified.
message_id (``int``, *optional*):
Identifier of the sent message.
Required if inline_message_id is not specified.
Returns:
On success, if the message was sent by the bot, returns the edited :obj:`Message <pyrogram.Message>`,
otherwise returns True.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
:class:`BotScoreNotModified` if the new score is not greater than the user's current score in the chat and force is False.
"""
r = self.send(
functions.messages.SetGameScore(
peer=self.resolve_peer(chat_id),
score=score,
id=message_id,
user_id=self.resolve_peer(user_id),
force=force or None,
edit_message=not disable_edit_message or None
)
)
for i in r.updates:
if isinstance(i, (types.UpdateEditMessage, types.UpdateEditChannelMessage)):
return pyrogram.Message._parse(
self, i.message,
{i.id: i for i in r.users},
{i.id: i for i in r.chats}
)
return True | 0.00461 |
def absent(
name,
zone,
record_type,
identifier=None,
region=None,
key=None,
keyid=None,
profile=None,
wait_for_sync=True,
split_dns=False,
private_zone=False):
'''
Ensure the Route53 record is deleted.
name
Name of the record.
zone
The zone to delete the record from.
record_type
The record type (A, NS, MX, TXT, etc.)
identifier
An identifier to match for deletion.
region
The region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
wait_for_sync
Wait for an INSYNC change status from Route53.
split_dns
Route53 supports a public and private DNS zone with the same
names.
private_zone
If using split_dns, specify if this is the private zone.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
record = __salt__['boto_route53.get_record'](name, zone, record_type,
False, region, key, keyid,
profile, split_dns,
private_zone, identifier)
if record:
if __opts__['test']:
ret['comment'] = 'Route53 record {0} set to be deleted.'.format(name)
ret['result'] = None
return ret
deleted = __salt__['boto_route53.delete_record'](name, zone,
record_type,
identifier, False,
region, key, keyid,
profile,
wait_for_sync,
split_dns,
private_zone)
if deleted:
ret['changes']['old'] = record
ret['changes']['new'] = None
ret['comment'] = 'Deleted {0} Route53 record.'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Failed to delete {0} Route53 record.'.format(name)
else:
ret['comment'] = '{0} does not exist.'.format(name)
return ret | 0.001176 |
def submit(**kwargs):
"""Shortcut that takes an alert to evaluate and makes the appropriate API
call based on the results.
:param kwargs: A list of keyword arguments
:type kwargs: dict
"""
if 'alert' not in kwargs:
raise ValueError('Alert required')
if 'value' not in kwargs:
raise ValueError('Value required')
alert = kwargs.pop('alert')
value = kwargs['value']
if alert(value):
fail(kwargs)
else:
ok(kwargs) | 0.002045 |
def data_iterator_concat_datasets(data_source_list,
batch_size,
shuffle=False,
rng=None,
with_memory_cache=True,
with_file_cache=False,
cache_dir=None,
epoch_begin_callbacks=[],
epoch_end_callbacks=[]):
'''data_iterator_concat_datasets
Get data from multiple datasets.
For example,
.. code-block:: python
batch = data_iterator_concat_datasets([DataSource0, DataSource1, ...], batch_size)
Args:
data_source_list (list of DataSource): list of datasets.
batch_size (int): Size of data unit.
shuffle (bool):
Indicates whether the dataset is shuffled or not.
Default value is False.
rng (None or :obj:`numpy.random.RandomState`): Numpy random number
generator.
with_memory_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithMemoryCache`
to wrap ``data_source``. It is a good idea to set this as true unless
data_source provides on-memory data.
Default value is True.
with_file_cache (bool):
If ``True``, use :py:class:`.data_source.DataSourceWithFileCache`
to wrap ``data_source``.
If ``data_source`` is slow, enabling this option a is good idea.
Default value is False.
cache_dir (str):
Location of file_cache.
If this value is None, :py:class:`.data_source.DataSourceWithFileCache`
creates file caches implicitly on temporary directory and erases them all
when data_iterator is finished.
Otherwise, :py:class:`.data_source.DataSourceWithFileCache` keeps created cache.
Default is None.
epoch_begin_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the beginning of an epoch.
epoch_end_callbacks (list of functions): An item is a function
which takes an epoch index as an argument. These are called
at the end of an epoch.
Returns:
:py:class:`DataIterator <nnabla.utils.data_iterator.DataIterator>`:
Instance of DataIterator
'''
ds = ConcatDataSource(data_source_list,
shuffle=shuffle,
rng=rng)
return data_iterator(ds,
batch_size=batch_size,
with_memory_cache=with_memory_cache,
with_file_cache=with_file_cache,
epoch_begin_callbacks=epoch_begin_callbacks,
epoch_end_callbacks=epoch_end_callbacks) | 0.002394 |
def get_family_lookup_session(self, proxy=None, *args, **kwargs):
"""Gets the ``OsidSession`` associated with the family lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.relationship.FamilyLookupSession) - a
``FamilyLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_family_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if ``supports_family_lookup()`` is ``true``.*
"""
if not self.supports_family_lookup():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.FamilyLookupSession(proxy=proxy, runtime=self._runtime, **kwargs)
except AttributeError:
raise OperationFailed()
return session | 0.003781 |
def get_model_info(self):
'''
:return: dictionary of information about this model
'''
info = {}
info['model_name'] = self.name
info['stages'] = '->'.join([repr(s) for s in self._stages])
info['sequence'] = {
'index': self._current_index
}
return info | 0.005988 |
def batch_reverse(self, points, **kwargs):
"""
Method for identifying the addresses from a list of lat/lng tuples
"""
fields = ",".join(kwargs.pop("fields", []))
response = self._req(
"post", verb="reverse", params={"fields": fields}, data=json_points(points)
)
if response.status_code != 200:
return error_response(response)
logger.debug(response)
return LocationCollection(response.json()["results"]) | 0.006012 |
def create_model(modelfunc, fname='', listw=[], outfname='',
limit=int(3e6), min_pwlen=6, topk=10000, sep=r'\s+'):
""":modelfunc: is a function that takes a word and returns its
splits. for ngram model this function returns all the ngrams of a
word, for PCFG it will return splits of the password.
@modelfunc: func: string -> [list of strings]
@fname: name of the file to read from
@listw: list of passwords. Used passwords from both the files and
listw if provided.
@outfname: the file to write down the model.
"""
def length_filter(pw):
pw = ''.join(c for c in pw if c in VALID_CHARS)
return len(pw) >= min_pwlen
pws = []
if fname:
pws = helper.open_get_line(fname, limit=limit, pw_filter=length_filter, sep=sep)
big_dict = defaultdict(int)
total_f, total_e = 0, 0
# Add topk passwords from the input dataset to the list
topk_pws = []
for pw, c in itertools.chain(pws, listw):
for ng in modelfunc(pw):
big_dict[ng] += c
total_f += c
total_e += 1
if len(big_dict) % 100000 == 0:
print(("Dictionary size: {} (Total_freq: {}; Total_pws: {}"\
.format(len(big_dict), total_f, total_e)))
if len(topk_pws) >= topk:
heapq.heappushpop(topk_pws, (c, pw))
else:
heapq.heappush(topk_pws, (c, pw))
# Adding topk password to deal with probability reduction of popular
# passwords. Mostly effective for n-gram models
print("topk={}".format(topk))
if topk > 0:
for c, pw in topk_pws:
tpw = helper.START + pw + helper.END
big_dict[tpw] += c
total_f += c
total_e += 1
big_dict[NPWS_W] = total_e
big_dict[TOTALF_W] = total_f
nDawg = dawg.IntCompletionDAWG(big_dict)
if not outfname:
outfname = 'tmpmodel.dawg.gz'
elif not outfname.endswith('.gz'):
outfname += '.gz'
pathlib.Path(outfname).parent.mkdir(parents=True, exist_ok=True)
helper.save_dawg(nDawg, outfname)
return nDawg | 0.001416 |
def column(self, model=None):
"""
Returns the column instance for this query.
:return <orb.Column>
"""
try:
schema = (self.__model or model).schema()
except AttributeError:
return None
else:
return schema.column(self.__column) | 0.009063 |
def is_planar(graph):
"""Determines whether a graph is planar or not."""
# Determine connected components as subgraphs; their planarity is independent of each other
connected_components = get_connected_components_as_subgraphs(graph)
for component in connected_components:
# Biconnected components likewise have independent planarity
biconnected_components = find_biconnected_components_as_subgraphs(component)
for bi_component in biconnected_components:
planarity = __is_subgraph_planar(bi_component)
if not planarity:
return False
return True | 0.004785 |
def threads_bt(self):
"""Display thread backtraces."""
import threading
import traceback
threads = {}
for thread in threading.enumerate():
frames = sys._current_frames().get(thread.ident)
if frames:
stack = traceback.format_stack(frames)
else:
stack = []
threads[thread] = "".join(stack)
return flask.render_template("gourde/threads.html", threads=threads) | 0.004124 |
def from_string(dir_string):
'''Returns the correct constant for a given string.
@raises InvalidDirectionError
'''
dir_string = dir_string.upper()
if dir_string == UP:
return UP
elif dir_string == DOWN:
return DOWN
elif dir_string == LEFT:
return LEFT
elif dir_string == RIGHT:
return RIGHT
else:
raise InvalidDirectionError(dir_string) | 0.002415 |
def get_annotationdefault(self):
"""
The AnnotationDefault attribute, only present upon fields in an
annotaion.
reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.20
""" # noqa
buff = self.get_attribute("AnnotationDefault")
if buff is None:
return None
with unpack(buff) as up:
(ti, ) = up.unpack_struct(_H)
return ti | 0.004454 |
def create_wiki_page():
"""
for http://archive.worldofdragon.org/index.php?title=CharMap
"""
print (
'{| class="wikitable"'
' style="font-family: monospace;'
' background-color:#ffffcc;"'
' cellpadding="10"'
)
print("|-")
print("! POKE")
print("value")
print("! ")
print("! unicode")
print("codepoint")
print("! type")
print("|-")
for no, data in enumerate(DRAGON_CHARS_MAP):
item, item_type = data
codepoint = ord(item)
print("|%i" % no)
foreground, background = get_rgb_color(item_type)
foreground = "#%02x%02x%02x" % foreground
background = "#%02x%02x%02x" % background
style = "color: #%s;"
print('| style="color:%s; background-color:%s;" | &#x%x;' % (
foreground, background, codepoint
))
print("|%i" % codepoint)
print("|%s" % item_type)
print("|-")
print("|}") | 0.002062 |
def list_key_policies(key_id, limit=None, marker=None, region=None, key=None,
keyid=None, profile=None):
'''
List key_policies for the specified key.
CLI example::
salt myminion boto_kms.list_key_policies 'alias/mykey'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if key_id.startswith('alias/'):
key_id = _get_key_id(key_id)
r = {}
try:
key_policies = conn.list_key_policies(
key_id,
limit=limit,
marker=marker
)
# TODO: handle limit, marker and truncation automatically.
r['key_policies'] = key_policies['PolicyNames']
except boto.exception.BotoServerError as e:
r['error'] = __utils__['boto.get_error'](e)
return r | 0.001245 |
def patch_cluster_custom_object_scale(self, group, version, plural, name, body, **kwargs): # noqa: E501
"""patch_cluster_custom_object_scale # noqa: E501
partially update scale of the specified cluster scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_cluster_custom_object_scale(group, version, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param UNKNOWN_BASE_TYPE body: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_cluster_custom_object_scale_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
else:
(data) = self.patch_cluster_custom_object_scale_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
return data | 0.001369 |
def row_to_dictionary(header_row_web_element, row_webelement):
"""
Converts a row into a dictionary of key/values.
(Note: assumes all rows/columns have uniform cells. Does not
account for any row or column spans)
Args:
header_row_web_element (WebElement): WebElement reference to the column headers.
row_webelement (WebElement): WebElement reference to row.
Returns:
Returns a dictionary object containing keys consistenting of the column headers
and values consisting of the row contents.
Usage::
self.webdriver.get("http://the-internet.herokuapp.com/tables")
header = self.webdriver.find_element_by_css_selector("#table1 thead tr")
target_row = self.webdriver.find_element_by_css_selector("#table1 tbody tr")
row_values = WebUtils.row_to_dictionary(header, target_row)
row_values == {'Last Name': 'Smith',
'Due': '$50.00',
'First Name': 'John',
'Web Site': 'http://www.jsmith.com',
'Action': 'edit delete',
'Email': '[email protected]'}
"""
headers = header_row_web_element.find_elements_by_tag_name("th")
data_cells = row_webelement.find_elements_by_tag_name("td")
value_dictionary = {}
for i in range(len(data_cells)):
value_dictionary[ headers[i].text ] = data_cells[i].text
return value_dictionary | 0.011091 |
def read(cls, iprot):
'''
Read a new object from the given input protocol and return the object.
:type iprot: thryft.protocol._input_protocol._InputProtocol
:rtype: pastpy.gen.database.impl.dummy.dummy_database_configuration.DummyDatabaseConfiguration
'''
init_kwds = {}
iprot.read_struct_begin()
while True:
ifield_name, ifield_type, _ifield_id = iprot.read_field_begin()
if ifield_type == 0: # STOP
break
elif ifield_name == 'images_per_object':
init_kwds['images_per_object'] = iprot.read_i32()
elif ifield_name == 'objects':
init_kwds['objects'] = iprot.read_i32()
iprot.read_field_end()
iprot.read_struct_end()
return cls(**init_kwds) | 0.003606 |
def _trim_zeros_complex(str_complexes, na_rep='NaN'):
"""
Separates the real and imaginary parts from the complex number, and
executes the _trim_zeros_float method on each of those.
"""
def separate_and_trim(str_complex, na_rep):
num_arr = str_complex.split('+')
return (_trim_zeros_float([num_arr[0]], na_rep) +
['+'] +
_trim_zeros_float([num_arr[1][:-1]], na_rep) +
['j'])
return [''.join(separate_and_trim(x, na_rep)) for x in str_complexes] | 0.001876 |
def convert_to_unit(self, unit):
"""Convert the Data Collection to the input unit."""
self._values = self._header.data_type.to_unit(
self._values, unit, self._header.unit)
self._header._unit = unit | 0.008584 |
def on_message(self, name):
"""
Decorator for message listener callback functions.
.. tip::
This is the most elegant way to define message listener callback functions.
Use :py:func:`add_message_listener` only if you need to be able to
:py:func:`remove the listener <remove_message_listener>` later.
A decorated message listener function is called with three arguments every time the
specified message is received:
* ``self`` - the current vehicle.
* ``name`` - the name of the message that was intercepted.
* ``message`` - the actual message (a `pymavlink <http://www.qgroundcontrol.org/mavlink/pymavlink>`_
`class <https://www.samba.org/tridge/UAV/pymavlink/apidocs/classIndex.html>`_).
For example, in the fragment below ``my_method`` will be called for every heartbeat message:
.. code:: python
@vehicle.on_message('HEARTBEAT')
def my_method(self, name, msg):
pass
See :ref:`mavlink_messages` for more information.
:param String name: The name of the message to be intercepted by the decorated listener function (or '*' to get all messages).
"""
def decorator(fn):
if isinstance(name, list):
for n in name:
self.add_message_listener(n, fn)
else:
self.add_message_listener(name, fn)
return decorator | 0.005355 |
def send_message(self, msg):
"""
Internal method used to send messages through Clementine remote network protocol.
"""
if self.socket is not None:
msg.version = self.PROTOCOL_VERSION
serialized = msg.SerializeToString()
data = struct.pack(">I", len(serialized)) + serialized
#print("Sending message: %s" % msg)
try:
self.socket.send(data)
except Exception as e:
#self.state = "Disconnected"
pass | 0.009091 |
def _extract_operation_from_view(self, view, args):
"""
Extract swagger operation details from colander view definitions.
:param view:
View to extract information from.
:param args:
Arguments from the view decorator.
:rtype: dict
:returns: Operation definition.
"""
op = {
'responses': {
'default': {
'description': 'UNDOCUMENTED RESPONSE'
}
},
}
# If 'produces' are not defined in the view, try get from renderers
renderer = args.get('renderer', '')
if "json" in renderer: # allows for "json" or "simplejson"
produces = ['application/json']
elif renderer == 'xml':
produces = ['text/xml']
else:
produces = None
if produces:
op.setdefault('produces', produces)
# Get explicit accepted content-types
consumes = args.get('content_type')
if consumes is not None:
# convert to a list, if it's not yet one
consumes = to_list(consumes)
# It is possible to add callables for content_type, so we have to
# to filter those out, since we cannot evaluate those here.
consumes = [x for x in consumes if not callable(x)]
op['consumes'] = consumes
# Get parameters from view schema
is_colander = self._is_colander_schema(args)
if is_colander:
schema = self._extract_transform_colander_schema(args)
parameters = self.parameters.from_schema(schema)
else:
# Bail out for now
parameters = None
if parameters:
op['parameters'] = parameters
# Get summary from docstring
if isinstance(view, six.string_types):
if 'klass' in args:
ob = args['klass']
view_ = getattr(ob, view.lower())
docstring = trim(view_.__doc__)
else:
docstring = str(trim(view.__doc__))
if docstring and self.summary_docstrings:
op['summary'] = docstring
# Get response definitions
if 'response_schemas' in args:
op['responses'] = self.responses.from_schema_mapping(args['response_schemas'])
# Get response tags
if 'tags' in args:
op['tags'] = args['tags']
# Get response operationId
if 'operation_id' in args:
op['operationId'] = args['operation_id']
# Get security policies
if 'api_security' in args:
op['security'] = args['api_security']
return op | 0.001104 |
def authenticate_compute(client_secrets):
"""Authenticates a service account for the compute engine.
TODO: docstring"""
scopes = ['https://www.googleapis.com/auth/compute']
credentials = ServiceAccountCredentials.from_json_keyfile_dict(
client_secrets, scopes=scopes)
return credentials | 0.009119 |
def fixPath(path):
"""
Ensures paths are correct for linux and windows
"""
path = os.path.abspath(os.path.expanduser(path))
if path.startswith("\\"):
return "C:" + path
return path | 0.025381 |
def get_post_replies(self, *args, **kwargs):
"""Return a get_content generator for inboxed submission replies.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['post_replies'], *args, **kwargs) | 0.005814 |
def fit(self, X, y):
"""Build an accelerated failure time model.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Data matrix.
y : structured array, shape = (n_samples,)
A structured array containing the binary event indicator
as first field, and time of event or time of censoring as
second field.
Returns
-------
self
"""
X, event, time = check_arrays_survival(X, y)
weights = ipc_weights(event, time)
super().fit(X, numpy.log(time), sample_weight=weights)
return self | 0.003086 |
def set_environment_variable(self, key, val):
""" Sets a variable if that variable is not already set """
if self.get_environment_variable(key) in [None, val]:
self.__dict__['environment_variables'][key] = val
else:
raise Contradiction("Could not set environment variable %s" % (key)) | 0.009036 |
def initialize(self, *args, **kwargs):
"""
Call self._initialize with `self` made available to Zipline API
functions.
"""
with ZiplineAPI(self):
self._initialize(self, *args, **kwargs) | 0.008475 |
def auth(username, password):
'''
File based authentication
^filename
The path to the file to use for authentication.
^filetype
The type of file: ``text``, ``htpasswd``, ``htdigest``.
Default: ``text``
^realm
The realm required by htdigest authentication.
.. note::
The following parameters are only used with the ``text`` filetype.
^hashtype
The digest format of the password. Can be ``plaintext`` or any digest
available via :py:func:`hashutil.digest <salt.modules.hashutil.digest>`.
Default: ``plaintext``
^field_separator
The character to use as a delimiter between fields in a text file.
Default: ``:``
^username_field
The numbered field in the text file that contains the username, with
numbering beginning at 1 (one).
Default: ``1``
^password_field
The numbered field in the text file that contains the password, with
numbering beginning at 1 (one).
Default: ``2``
'''
config = _get_file_auth_config()
if not config:
return False
auth_function = FILETYPE_FUNCTION_MAP.get(config['filetype'], 'text')
return auth_function(username, password, **config) | 0.001575 |
def update(self, *args, **kwargs):
"""Update the last section record"""
self.augment_args(args, kwargs)
kwargs['log_action'] = kwargs.get('log_action', 'update')
if not self.rec:
return self.add(**kwargs)
else:
for k, v in kwargs.items():
# Don't update object; use whatever was set in the original record
if k not in ('source', 's_vid', 'table', 't_vid', 'partition', 'p_vid'):
setattr(self.rec, k, v)
self._session.merge(self.rec)
if self._logger:
self._logger.info(self.rec.log_str)
self._session.commit()
self._ai_rec_id = None
return self.rec.id | 0.005348 |
def __clip(val, minimum, maximum):
"""
:param val: input value
:param minimum: min value
:param maximum: max value
:return: val clipped to range [minimum, maximum]
"""
if val is None or minimum is None or maximum is None:
return None
if val < minimum:
return minimum
if val > maximum:
return maximum
return val | 0.009174 |
def import_by_path(dotted_path, error_prefix=''):
"""
Import a dotted module path and return the attribute/class designated by
the last name in the path. Raise ImproperlyConfigured if something goes
wrong. This has come straight from Django 1.6
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
raise ImproperlyConfigured("%s%s doesn't look like a module path" % (
error_prefix, dotted_path))
try:
module = import_module(module_path)
except ImportError as e:
raise ImproperlyConfigured('%sError importing module %s: "%s"' % (
error_prefix, module_path, e))
try:
attr = getattr(module, class_name)
except AttributeError:
raise ImproperlyConfigured(
'%sModule "%s" does not define a "%s" attribute/class' % (
error_prefix, module_path, class_name
)
)
return attr | 0.001043 |
def end_time(self):
"""End timestamp of the dataset"""
try:
return self.start_time + SCAN_DURATION[self.sector]
except KeyError:
return self.start_time | 0.01005 |
def cmd_set(context):
"""
Set the new "current" value for a key.
If the existing current version and the new version have identical /value/ and /status,
then nothing is written, to avoid stacking up redundant entreis in the version table.
Args:
context: a populated EFVersionContext object
"""
# If key value is a special symbol, see if this env allows it
if context.value in EFConfig.SPECIAL_VERSIONS and context.env_short not in EFConfig.SPECIAL_VERSION_ENVS:
fail("special version: {} not allowed in env: {}".format(context.value, context.env_short))
# If key value is a special symbol, the record cannot be marked "stable"
if context.value in EFConfig.SPECIAL_VERSIONS and context.stable:
fail("special versions such as: {} cannot be marked 'stable'".format(context.value))
# Resolve any references
if context.value == "=prod":
context.value = context.versionresolver.lookup("{},{}/{}".format(context.key, "prod", context.service_name))
elif context.value == "=staging":
context.value = context.versionresolver.lookup("{},{}/{}".format(context.key, "staging", context.service_name))
elif context.value == "=latest":
if not EFConfig.VERSION_KEYS[context.key]["allow_latest"]:
fail("=latest cannot be used with key: {}".format(context.key))
func_name = "_getlatest_" + context.key.replace("-", "_")
if func_name in globals() and isfunction(globals()[func_name]):
context.value = globals()[func_name](context)
else:
raise RuntimeError("{} version for {}/{} is '=latest' but can't look up because method not found: {}".format(
context.key, context.env, context.service_name, func_name))
# precheck to confirm coherent world state before attempting set - whatever that means for the current key type
try:
precheck(context)
except Exception as e:
fail("Precheck failed: {}".format(e.message))
s3_key = "{}/{}/{}".format(context.service_name, context.env, context.key)
s3_version_status = EFConfig.S3_VERSION_STATUS_STABLE if context.stable else EFConfig.S3_VERSION_STATUS_UNDEFINED
# If the set would put a value and status that are the same as the existing 'current' value/status, don't do it
context.limit = 1
current_version = get_versions(context)
# If there is no 'current version' it's ok, just means the set will write the first entry
if len(current_version) == 1 and current_version[0].status == s3_version_status and \
current_version[0].value == context.value:
print("Version not written because current version and new version have identical value and status: {} {}"
.format(current_version[0].value, current_version[0].status))
return
if not context.commit:
print("=== DRY RUN ===\nUse --commit to set value\n=== DRY RUN ===")
print("would set key: {} with value: {} {} {} {} {}".format(
s3_key, context.value, context.build_number, context.commit_hash, context.location, s3_version_status))
else:
context.aws_client("s3").put_object(
ACL='bucket-owner-full-control',
Body=context.value,
Bucket=EFConfig.S3_VERSION_BUCKET,
ContentEncoding=EFConfig.S3_VERSION_CONTENT_ENCODING,
Key=s3_key,
Metadata={
EFConfig.S3_VERSION_BUILDNUMBER_KEY: context.build_number,
EFConfig.S3_VERSION_COMMITHASH_KEY: context.commit_hash,
EFConfig.S3_VERSION_LOCATION_KEY: context.location,
EFConfig.S3_VERSION_MODIFIEDBY_KEY: context.aws_client("sts").get_caller_identity()["Arn"],
EFConfig.S3_VERSION_STATUS_KEY: s3_version_status
},
StorageClass='STANDARD'
)
print("set key: {} with value: {} {} {} {} {}".format(
s3_key, context.value, context.build_number, context.commit_hash, context.location, s3_version_status)) | 0.011688 |
def submit_query_request(end_point, *args, **kwargs):
"""Low level function to format the query string."""
ev_limit = kwargs.pop('ev_limit', 10)
best_first = kwargs.pop('best_first', True)
tries = kwargs.pop('tries', 2)
# This isn't handled by requests because of the multiple identical agent
# keys, e.g. {'agent': 'MEK', 'agent': 'ERK'} which is not supported in
# python, but is allowed and necessary in these query strings.
# TODO because we use the API Gateway, this feature is not longer needed.
# We should just use the requests parameters dict.
query_str = '?' + '&'.join(['%s=%s' % (k, v) for k, v in kwargs.items()
if v is not None]
+ list(args))
return submit_statement_request('get', end_point, query_str,
ev_limit=ev_limit, best_first=best_first,
tries=tries) | 0.001052 |
def batch_normalize_with_arguments(x, arguments):
"""Applies batch normalization to x as specified in arguments.
Args:
x: A Pretty Tensor.
arguments: Either a boolean to batch_normalize or a
BatchNormalizationArguments
Returns:
x with batch normalization applied.
"""
x = prettytensor.wrap(x)
# Backwards compatibility.
if isinstance(arguments, bool):
if arguments:
return x.batch_normalize()
else:
return x
# pylint: disable=protected-access
kwargs = arguments._asdict()
defaults = prettytensor._defaults
# pylint: enable=protected-access
for arg in ('learned_moments_update_rate', 'variance_epsilon',
'scale_after_normalization'):
if kwargs.get(arg, None) is None:
if arg in defaults:
kwargs[arg] = defaults[arg]
else:
del kwargs[arg]
return x.batch_normalize(**kwargs) | 0.016968 |
def _set_vowel(self, vowel):
'''
Sets the currently active vowel, e.g. ア.
Vowels act slightly differently from other characters. If one
succeeds the same vowel (or consonant-vowel pair with the same vowel)
then it acts like a long vowel marker. E.g. おねえ becomes onē.
Hence, either we increment the long vowel marker count, or we
flush the current character and set the active character to this.
In some cases, the ウ becomes a consonant-vowel if it's
paired with a small vowel. We will not know this until we see
what comes after the ウ, so there's some backtracking
if that's the case.
'''
vowel_info = kana_lt[vowel]
vowel_ro = self.active_vowel_ro
if self._is_long_vowel(vowel_ro, vowel_info[0]):
# Check to see if the current vowel is ウ. If so,
# we might need to backtrack later on in case the 'u'
# turns into 'w' when ウ is coupled with a small vowel.
if vowel_ro == 'u':
self.has_u_lvm = True
self._inc_lvmarker()
else:
# Not the same, so flush the active character and continue.
self._set_char(vowel, VOWEL)
self.active_vowel_info = vowel_info
self.active_vowel = vowel | 0.001509 |
def post_url(self, url, form):
"""
Internally used to retrieve the contents of a URL using
the POST request method.
The `form` parameter is a mechanize.HTMLForm object
This method will use a POST request type regardless of the method
used in the `form`.
"""
_r = self.br.open(url, form.click_request_data()[1])
# check that we've not been redirected to the login page or an error occured
if self.br.geturl().startswith(self.AUTH_URL):
raise AuthRequiredException
elif self.br.geturl().startswith(self.ERROR_URL):
raise RequestErrorException
else:
return _r.read() | 0.00431 |
def InputSplines1D(seq_length, n_bases=10, name=None, **kwargs):
"""Input placeholder for array returned by `encodeSplines`
Wrapper for: `keras.layers.Input((seq_length, n_bases), name=name, **kwargs)`
"""
return Input((seq_length, n_bases), name=name, **kwargs) | 0.007168 |
def afni_copy(filename):
''' creates a ``+orig`` copy of the given dataset and returns the filename as a string '''
if nl.pkg_available('afni',True):
afni_filename = "%s+orig" % nl.prefix(filename)
if not os.path.exists(afni_filename + ".HEAD"):
nl.calc(filename,'a',prefix=nl.prefix(filename))
return afni_filename | 0.013928 |
def __create_user_default_pipe_prop(self, extra_info):
"""for internal usage only"""
p = UserDefaultPipeProp()
doc = extra_info.pop('doc', None)
if doc is not None:
extra_info['description'] = doc
for k, v in extra_info.items():
k_lower = k.lower()
method_name = "set_%s" % k_lower.replace(' ', '_')
if hasattr(p, method_name):
method = getattr(p, method_name)
method(str(v))
else:
msg = "Wrong definition of pipe. " \
"The object extra information '%s' " \
"is not recognized!" % (k,)
Except.throw_exception("PyDs_WrongPipeDefinition", msg,
"create_user_default_pipe_prop()")
return p | 0.002375 |
def pubsubhubbub(self, mode, topic, callback, secret=''):
"""Create/update a pubsubhubbub hook.
:param str mode: (required), accepted values: ('subscribe',
'unsubscribe')
:param str topic: (required), form:
https://github.com/:user/:repo/events/:event
:param str callback: (required), the URI that receives the updates
:param str secret: (optional), shared secret key that generates a
SHA1 HMAC of the payload content.
:returns: bool
"""
from re import match
m = match('https?://[\w\d\-\.\:]+/\w+/[\w\._-]+/events/\w+', topic)
status = False
if mode and topic and callback and m:
data = [('hub.mode', mode), ('hub.topic', topic),
('hub.callback', callback)]
if secret:
data.append(('hub.secret', secret))
url = self._build_url('hub')
# This is not JSON data. It is meant to be form data
# application/x-www-form-urlencoded works fine here, no need for
# multipart/form-data
status = self._boolean(self._post(url, data=data, json=False), 204,
404)
return status | 0.008814 |
def set_url (self, url):
"""Set the URL referring to a robots.txt file."""
self.url = url
self.host, self.path = urlparse.urlparse(url)[1:3] | 0.018293 |
async def fetchrow(self, *args, timeout=None):
"""Execute the statement and return the first row.
:param str query: Query text
:param args: Query arguments
:param float timeout: Optional timeout value in seconds.
:return: The first row as a :class:`Record` instance.
"""
data = await self.__bind_execute(args, 1, timeout)
if not data:
return None
return data[0] | 0.004474 |
def write_local_file(fp, name_bytes, writer, dt):
"""
Writes a zip file local file header structure at the current file position.
Returns data_len, crc32 for the data.
:param fp: the file point to which to write the header
:param name: the name of the file
:param writer: a function taking an fp parameter to do the writing, returns crc32
:param dt: the datetime to write to the archive
"""
fp.write(struct.pack('I', 0x04034b50)) # local file header
fp.write(struct.pack('H', 10)) # extract version (default)
fp.write(struct.pack('H', 0)) # general purpose bits
fp.write(struct.pack('H', 0)) # compression method
msdos_date = int(dt.year - 1980) << 9 | int(dt.month) << 5 | int(dt.day)
msdos_time = int(dt.hour) << 11 | int(dt.minute) << 5 | int(dt.second)
fp.write(struct.pack('H', msdos_time)) # extract version (default)
fp.write(struct.pack('H', msdos_date)) # extract version (default)
crc32_pos = fp.tell()
fp.write(struct.pack('I', 0)) # crc32 placeholder
data_len_pos = fp.tell()
fp.write(struct.pack('I', 0)) # compressed length placeholder
fp.write(struct.pack('I', 0)) # uncompressed length placeholder
fp.write(struct.pack('H', len(name_bytes))) # name length
fp.write(struct.pack('H', 0)) # extra length
fp.write(name_bytes)
data_start_pos = fp.tell()
crc32 = writer(fp)
data_end_pos = fp.tell()
data_len = data_end_pos - data_start_pos
fp.seek(crc32_pos)
fp.write(struct.pack('I', crc32)) # crc32
fp.seek(data_len_pos)
fp.write(struct.pack('I', data_len)) # compressed length placeholder
fp.write(struct.pack('I', data_len)) # uncompressed length placeholder
fp.seek(data_end_pos)
return data_len, crc32 | 0.001605 |
def parse_lines(self, lines):
"""
Do the work of parsing each line
"""
indent = ''
level = 0
if lines[0][0] == ' ' or lines[0][0] == '\t':
raise CommandError("The first line in the file cannot start with a space or tab.")
# This keeps track of the current parents at a given level
current_parents = {0: None}
for line in lines:
if len(line) == 0:
continue
if line[0] == ' ' or line[0] == '\t':
if indent == '':
indent = self.get_indent(line)
elif not line[0] in indent:
raise CommandError("You can't mix spaces and tabs for indents")
level = line.count(indent)
current_parents[level] = self.make_category(line, parent=current_parents[level - 1])
else:
# We are back to a zero level, so reset the whole thing
current_parents = {0: self.make_category(line)}
current_parents[0]._tree_manager.rebuild() | 0.004625 |
def _load(self, filename):
"""Import all filters from a text file"""
filename = pathlib.Path(filename)
with filename.open() as fd:
data = fd.readlines()
# Get the strings that correspond to self.fileid
bool_head = [l.strip().startswith("[") for l in data]
int_head = np.squeeze(np.where(bool_head))
int_head = np.atleast_1d(int_head)
start = int_head[self.fileid]+1
if len(int_head) > self.fileid+1:
end = int_head[self.fileid+1]
else:
end = len(data)
subdata = data[start:end]
# separate all elements and strip them
subdata = [[it.strip() for it in l.split("=")] for l in subdata]
points = []
for var, val in subdata:
if var.lower() == "x axis":
xaxis = val.lower()
elif var.lower() == "y axis":
yaxis = val.lower()
elif var.lower() == "name":
self.name = val
elif var.lower() == "inverted":
if val == "True":
self.inverted = True
elif var.lower().startswith("point"):
val = np.array(val.strip("[]").split(), dtype=float)
points.append([int(var[5:]), val])
else:
raise KeyError("Unknown variable: {} = {}".
format(var, val))
self.axes = (xaxis, yaxis)
# sort points
points.sort()
# get only coordinates from points
self.points = np.array([p[1] for p in points])
# overwrite unique id
unique_id = int(data[start-1].strip().strip("Polygon []"))
self._set_unique_id(unique_id) | 0.002301 |
def _row_should_be_placed(self, row, position):
""":return: whether to place this instruction"""
placed_row = self._rows_in_grid.get(row)
return placed_row is None or placed_row.y < position.y | 0.009259 |
def write_backup_state_to_json_file(self):
"""Periodically write a JSON state file to disk"""
start_time = time.time()
state_file_path = self.config["json_state_file_path"]
self.state["walreceivers"] = {
key: {"latest_activity": value.latest_activity, "running": value.running,
"last_flushed_lsn": value.last_flushed_lsn}
for key, value in self.walreceivers.items()
}
self.state["pg_receivexlogs"] = {
key: {"latest_activity": value.latest_activity, "running": value.running}
for key, value in self.receivexlogs.items()
}
self.state["pg_basebackups"] = {
key: {"latest_activity": value.latest_activity, "running": value.running}
for key, value in self.basebackups.items()
}
self.state["compressors"] = [compressor.state for compressor in self.compressors]
self.state["transfer_agents"] = [ta.state for ta in self.transfer_agents]
self.state["queues"] = {
"compression_queue": self.compression_queue.qsize(),
"transfer_queue": self.transfer_queue.qsize(),
}
self.log.debug("Writing JSON state file to %r", state_file_path)
write_json_file(state_file_path, self.state)
self.log.debug("Wrote JSON state file to disk, took %.4fs", time.time() - start_time) | 0.005739 |
def split(args):
"""
%prog split split.bed evidences.bed predictor1.gff predictor2.gff fastafile
Split MAKER models by checking against predictors (such as AUGUSTUS and
FGENESH). For each region covered by a working model. Find out the
combination of predictors that gives the best accuracy against evidences
(such as PASA).
`split.bed` can be generated by pulling out subset from a list of ids
$ python -m jcvi.formats.base join split.ids working.bed
--column=0,3 --noheader | cut -f2-7 > split.bed
"""
from jcvi.formats.bed import Bed
p = OptionParser(split.__doc__)
p.add_option("--key", default="Name",
help="Key in the attributes to extract predictor.gff [default: %default]")
p.add_option("--parents", default="match",
help="list of features to extract, use comma to separate (e.g."
"'gene,mRNA') [default: %default]")
p.add_option("--children", default="match_part",
help="list of features to extract, use comma to separate (e.g."
"'five_prime_UTR,CDS,three_prime_UTR') [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 5:
sys.exit(not p.print_help())
split_bed, evidences_bed, p1_gff, p2_gff, fastafile = args
parents = opts.parents
children = opts.children
key = opts.key
bed = Bed(split_bed)
s1 = get_splits(split_bed, p1_gff, parents, key)
s2 = get_splits(split_bed, p2_gff, parents, key)
for b in bed:
query = "{0}:{1}-{2}".format(b.seqid, b.start, b.end)
b1 = get_accuracy(query, p1_gff, evidences_bed, fastafile, children, key)
b2 = get_accuracy(query, p2_gff, evidences_bed, fastafile, children, key)
accn = b.accn
c1 = "|".join(s1[accn])
c2 = "|".join(s2[accn])
ac1 = b1.accuracy
ac2 = b2.accuracy
tag = p1_gff if ac1 >= ac2 else p2_gff
tag = tag.split(".")[0]
ac1 = "{0:.3f}".format(ac1)
ac2 = "{0:.3f}".format(ac2)
print("\t".join((accn, tag, ac1, ac2, c1, c2))) | 0.004314 |
def with_condition(self, condition: Callable[[MonitorContext], bool]) -> 'MonitorTask':
"""
Sets the task running condition that will be evaluated during the optimisation cycle.
"""
self._condition = condition
return self | 0.015326 |
def inside(self, x, y):
"""
check, if field position is inside map
:param x: x pos
:param y: y pos
:return:
"""
return 0 <= x < self.width and 0 <= y < self.height | 0.009132 |
async def remove(self, device, force=False, detach=False, eject=False,
lock=False):
"""
Unmount or lock the device depending on device type.
:param device: device object, block device path or mount path
:param bool force: recursively remove all child devices
:param bool detach: detach the root drive
:param bool eject: remove media from the root drive
:param bool lock: lock the associated LUKS cleartext slave
:returns: whether all attempted operations succeeded
"""
device = self._find_device(device)
if device.is_filesystem:
if device.is_mounted or not device.is_loop or detach is False:
success = await self.unmount(device)
elif device.is_crypto:
if force and device.is_unlocked:
await self.auto_remove(device.luks_cleartext_holder, force=True)
success = await self.lock(device)
elif (force
and (device.is_partition_table or device.is_drive)
and self.is_handleable(device)):
kw = dict(force=True, detach=detach, eject=eject, lock=lock)
tasks = [
self.auto_remove(child, **kw)
for child in self.get_all_handleable()
if _is_parent_of(device, child)
]
results = await gather(*tasks)
success = all(results)
else:
self._log.info(_('not removing {0}: unhandled device', device))
success = False
# if these operations work, everything is fine, we can return True:
if lock and device.is_luks_cleartext:
device = device.luks_cleartext_slave
if self.is_handleable(device):
success = await self.lock(device)
if eject:
success = await self.eject(device)
if (detach or detach is None) and device.is_loop:
success = await self.delete(device, remove=False)
elif detach:
success = await self.detach(device)
return success | 0.001908 |
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning) | 0.004425 |
def quick_summary(nml2_doc):
'''
Or better just use nml2_doc.summary(show_includes=False)
'''
info = 'Contents of NeuroML 2 document: %s\n'%nml2_doc.id
membs = inspect.getmembers(nml2_doc)
for memb in membs:
if isinstance(memb[1], list) and len(memb[1])>0 \
and not memb[0].endswith('_'):
info+=' %s:\n ['%memb[0]
for entry in memb[1]:
extra = '???'
extra = entry.name if hasattr(entry,'name') else extra
extra = entry.href if hasattr(entry,'href') else extra
extra = entry.id if hasattr(entry,'id') else extra
info+=" %s (%s),"%(entry, extra)
info+=']\n'
return info | 0.018041 |
def _fault_to_exception(f):
""" Converts XML-RPC Fault objects to Pynipap-exceptions.
TODO: Is this one neccesary? Can be done inline...
"""
e = _fault_to_exception_map.get(f.faultCode)
if e is None:
e = NipapError
return e(f.faultString) | 0.003623 |
def _descriptor_changed(self, descriptor):
"""Called when the specified descriptor has changed its value."""
# Tell the descriptor it has a new value to read.
desc = descriptor_list().get(descriptor)
if desc is not None:
desc._value_read.set() | 0.006969 |
def precision_score(gold, pred, pos_label=1, ignore_in_gold=[], ignore_in_pred=[]):
"""
Calculate precision for a single class.
Args:
gold: A 1d array-like of gold labels
pred: A 1d array-like of predicted labels (assuming abstain = 0)
ignore_in_gold: A list of labels for which elements having that gold
label will be ignored.
ignore_in_pred: A list of labels for which elements having that pred
label will be ignored.
pos_label: The class label to treat as positive for precision
Returns:
pre: The (float) precision score
"""
gold, pred = _preprocess(gold, pred, ignore_in_gold, ignore_in_pred)
positives = np.where(pred == pos_label, 1, 0).astype(bool)
trues = np.where(gold == pos_label, 1, 0).astype(bool)
TP = np.sum(positives * trues)
FP = np.sum(positives * np.logical_not(trues))
if TP or FP:
pre = TP / (TP + FP)
else:
pre = 0
return pre | 0.002018 |
def decorate_with_validation(func,
arg_name, # type: str
*validation_func, # type: ValidationFuncs
**kwargs):
# type: (...) -> Callable
"""
This method is the inner method used in `@validate_io`, `@validate_arg` and `@validate_out`.
It can be used if you with to perform decoration manually without a decorator.
:param func:
:param arg_name: the name of the argument to validate or _OUT_KEY for output validation
:param validation_func: the validation function or
list of validation functions to use. A validation function may be a callable, a tuple(callable, help_msg_str),
a tuple(callable, failure_type), or a list of several such elements. Nested lists are supported and indicate an
implicit `and_` (such as the main list). Tuples indicate an implicit `_failure_raiser`.
[mini_lambda](https://smarie.github.io/python-mini-lambda/) expressions can be used instead of callables, they
will be transformed to functions automatically.
:param error_type: a subclass of ValidationError to raise in case of validation failure. By default a
ValidationError will be raised with the provided help_msg
:param help_msg: an optional help message to be used in the raised error in case of validation failure.
:param none_policy: describes how None values should be handled. See `NoneArgPolicy` for the various possibilities.
Default is `NoneArgPolicy.ACCEPT_IF_OPTIONAl_ELSE_REJECT`.
:param kw_context_args: optional contextual information to store in the exception, and that may be also used
to format the help message
:return: the decorated function, that will perform input validation (using `_assert_input_is_valid`) before
executing the function's code everytime it is executed.
"""
error_type, help_msg, none_policy, _constructor_of_cls_ = pop_kwargs(kwargs, [('error_type', None),
('help_msg', None),
('none_policy', None),
('_constructor_of_cls_', None)],
allow_others=True)
# the rest of keyword arguments is used as context.
kw_context_args = kwargs
none_policy = none_policy or NoneArgPolicy.SKIP_IF_NONABLE_ELSE_VALIDATE
# retrieve target function signature
func_sig = signature(func)
# create the new validator
if _constructor_of_cls_ is None:
# standard method: input validator
new_validator = _create_function_validator(func, func_sig, arg_name, *validation_func,
none_policy=none_policy, error_type=error_type,
help_msg=help_msg, **kw_context_args)
else:
# class constructor: field validator
new_validator = _create_function_validator(func, func_sig, arg_name, *validation_func,
none_policy=none_policy, error_type=error_type,
help_msg=help_msg, validated_class=_constructor_of_cls_,
validated_class_field_name=arg_name,
**kw_context_args)
# decorate or update decorator with this new validator
return decorate_with_validators(func, func_signature=func_sig, **{arg_name: new_validator}) | 0.006993 |
def _serve_file(self, abspath, params):
"""Show a file.
The actual content of the file is rendered by _handle_content.
"""
relpath = os.path.relpath(abspath, self._root)
breadcrumbs = self._create_breadcrumbs(relpath)
link_path = urlunparse(['', '', relpath, '', urlencode(params), ''])
args = self._default_template_args('file.html')
args.update({'root_parent': os.path.dirname(self._root),
'breadcrumbs': breadcrumbs,
'link_path': link_path})
content = self._renderer.render_name('base.html', args).encode("utf-8")
self._send_content(content, 'text/html') | 0.001582 |
def is_array(type_):
"""returns True, if type represents C++ array type, False otherwise"""
nake_type = remove_alias(type_)
nake_type = remove_reference(nake_type)
nake_type = remove_cv(nake_type)
return isinstance(nake_type, cpptypes.array_t) | 0.003802 |
def get_argv_for_command(self):
"""
Returns stripped arguments that would be passed into the command.
"""
argv = [a for a in self.argv]
argv.insert(0, self.prog_name)
return argv | 0.00885 |
def get_constraints(self):
"""
Retrieve all of the relevant constraints, aggregated from the pipfile, resolver,
and parent dependencies and their respective conflict resolution where possible.
:return: A set of **InstallRequirement** instances representing constraints
:rtype: Set
"""
constraints = {
c for c in self.resolver.parsed_constraints
if c and c.name == self.entry.name
}
pipfile_constraint = self.get_pipfile_constraint()
if pipfile_constraint:
constraints.add(pipfile_constraint)
return constraints | 0.007874 |
def null_advance_strain(self, blocksize):
""" Advance and insert zeros
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
"""
sample_step = int(blocksize * self.sample_rate)
csize = sample_step + self.corruption * 2
self.strain.roll(-sample_step)
# We should roll this off at some point too...
self.strain[len(self.strain) - csize + self.corruption:] = 0
self.strain.start_time += blocksize
# The next time we need strain will need to be tapered
self.taper_immediate_strain = True | 0.00311 |
def read_xso(src, xsomap):
"""
Read a single XSO from a binary file-like input `src` containing an XML
document.
`xsomap` must be a mapping which maps :class:`~.XSO` subclasses
to callables. These will be registered at a newly created
:class:`.xso.XSOParser` instance which will be used to parse the document
in `src`.
The `xsomap` is thus used to determine the class parsing the root element
of the XML document. This can be used to support multiple versions.
"""
xso_parser = xso.XSOParser()
for class_, cb in xsomap.items():
xso_parser.add_class(class_, cb)
driver = xso.SAXDriver(xso_parser)
parser = xml.sax.make_parser()
parser.setFeature(
xml.sax.handler.feature_namespaces,
True)
parser.setFeature(
xml.sax.handler.feature_external_ges,
False)
parser.setContentHandler(driver)
parser.parse(src) | 0.001085 |
def whos_allowed(self):
"""Returns set containing any entries from principal and condition section.
Example:
statement = Statement(dict(
Effect='Allow',
Principal='arn:aws:iam::*:role/Hello',
Action=['ec2:*'],
Resource='*',
Condition={
'StringLike': {
'AWS:SourceOwner': '012345678910'
}}))
statement.whos_allowed()
> set([
> PrincipalTuple(category='principal', value='arn:aws:iam::*:role/Hello'),
> ConditionTuple(category='account', value='012345678910')])
"""
who = set()
for principal in self.principals:
principal = PrincipalTuple(category='principal', value=principal)
who.add(principal)
who = who.union(self.condition_entries)
return who | 0.006652 |
def render_customizations(self):
"""
Customize template for site user specified customizations
"""
disable_plugins = self.pt.customize_conf.get('disable_plugins', [])
if not disable_plugins:
logger.debug('No site-user specified plugins to disable')
else:
for plugin in disable_plugins:
try:
self.pt.remove_plugin(plugin['plugin_type'], plugin['plugin_name'],
'disabled at user request')
except KeyError:
# Malformed config
logger.info('Invalid custom configuration found for disable_plugins')
enable_plugins = self.pt.customize_conf.get('enable_plugins', [])
if not enable_plugins:
logger.debug('No site-user specified plugins to enable"')
else:
for plugin in enable_plugins:
try:
msg = 'enabled at user request'
self.pt.add_plugin(plugin['plugin_type'], plugin['plugin_name'],
plugin['plugin_args'], msg)
except KeyError:
# Malformed config
logger.info('Invalid custom configuration found for enable_plugins') | 0.004559 |
def dump(self, f, name):
"""Write the attribute to a file-like object"""
array = self.get()
# print the header line
print("% 40s kind=%s shape=(%s)" % (
name,
array.dtype.kind,
",".join([str(int(size_axis)) for size_axis in array.shape]),
), file=f)
# print the numbers
counter = 0
for value in array.flat:
counter += 1
print("% 20s" % value, end=' ', file=f)
if counter % 4 == 0:
print(file=f)
if counter % 4 != 0:
print(file=f) | 0.003317 |
def validate_config(cls, service_config, target):
""" Validate generic options for a particular target """
if service_config.has_option(target, 'only_if_assigned'):
die("[%s] has an 'only_if_assigned' option. Should be "
"'%s.only_if_assigned'." % (target, cls.CONFIG_PREFIX))
if service_config.has_option(target, 'also_unassigned'):
die("[%s] has an 'also_unassigned' option. Should be "
"'%s.also_unassigned'." % (target, cls.CONFIG_PREFIX))
if service_config.has_option(target, 'default_priority'):
die("[%s] has a 'default_priority' option. Should be "
"'%s.default_priority'." % (target, cls.CONFIG_PREFIX))
if service_config.has_option(target, 'add_tags'):
die("[%s] has an 'add_tags' option. Should be "
"'%s.add_tags'." % (target, cls.CONFIG_PREFIX)) | 0.002188 |
def best_representative(d1, d2):
"""
Given two objects each coerced to the most specific type possible, return the one
of the least restrictive type.
>>> best_representative(Decimal('-37.5'), Decimal('0.9999'))
Decimal('-99.9999')
>>> best_representative(None, Decimal('6.1'))
Decimal('6.1')
>>> best_representative(311920, '48-49')
'48-490'
>>> best_representative(6, 'foo')
'foo'
>>> best_representative(Decimal('4.95'), Decimal('6.1'))
Decimal('9.99')
>>> best_representative(Decimal('-1.9'), Decimal('6.1'))
Decimal('-9.9')
"""
if hasattr(d2, 'strip') and not d2.strip():
return d1
if d1 is None:
return d2
elif d2 is None:
return d1
preference = (datetime.datetime, bool, int, Decimal, float, str)
worst_pref = 0
worst = ''
for coerced in (d1, d2):
pref = preference.index(type(coerced))
if pref > worst_pref:
worst_pref = pref
worst = set_worst(worst, coerced)
elif pref == worst_pref:
if isinstance(coerced, Decimal):
worst = set_worst(worst, worst_decimal(coerced, worst))
elif isinstance(coerced, float):
worst = set_worst(worst, max(coerced, worst))
else: # int, str
if len(str(coerced)) > len(str(worst)):
worst = set_worst(worst, coerced)
return worst | 0.002083 |
def when(self):
"""
A string describing when the event occurs (in the local time zone).
"""
offset = 0
timeFrom = dateFrom = timeTo = dateTo = None
fromDt = self._getFromDt()
if fromDt is not None:
offset = timezone.localtime(fromDt).toordinal() - fromDt.toordinal()
dateFrom, timeFrom = getLocalDateAndTime(fromDt.date(), self.time_from,
self.tz, dt.time.min)
daysDelta = dt.timedelta(days=self.num_days - 1)
dateTo, timeTo = getLocalDateAndTime(fromDt.date() + daysDelta,
self.time_to, self.tz)
if dateFrom == dateTo:
retval = _("{repeat} {atTime}").format(
repeat=self.repeat._getWhen(offset),
atTime=timeFormat(timeFrom, timeTo, gettext("at ")))
else:
localNumDays = (dateTo - dateFrom).days + 1
retval = _("{repeat} {startFinishTime}").format(
repeat=self.repeat._getWhen(offset, localNumDays),
startFinishTime=timeFormat(timeFrom, timeTo,
prefix=gettext("starting at "),
infix=gettext("finishing at")))
return retval.strip() | 0.004367 |
def create(type_dict, *type_parameters):
"""
type_parameters should be:
(name, (alternative1, alternative2, ...))
where name is a string, and the alternatives are all valid serialized
types.
"""
assert len(type_parameters) == 2
name = type_parameters[0]
alternatives = type_parameters[1]
assert isinstance(name, Compatibility.stringy)
assert isinstance(alternatives, (list, tuple))
choice_types = []
for c in alternatives:
choice_types.append(TypeFactory.new(type_dict, *c))
return TypeMetaclass(str(name), (ChoiceContainer,), {'CHOICES': choice_types}) | 0.004862 |
def logical_chassis_fwdl_sanity_input_cluster_options_auto_activate_auto_activate(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_sanity = ET.Element("logical_chassis_fwdl_sanity")
config = logical_chassis_fwdl_sanity
input = ET.SubElement(logical_chassis_fwdl_sanity, "input")
cluster_options = ET.SubElement(input, "cluster-options")
auto_activate = ET.SubElement(cluster_options, "auto-activate")
auto_activate = ET.SubElement(auto_activate, "auto-activate")
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004471 |
def convert_content(self) -> dict:
"""Convert content of source file into dict result."""
source_content = self.load_content()
converted = {}
md_extenstions = [
'full_yaml_metadata',
'markdown.extensions.fenced_code',
]
md = markdown.Markdown(md_extenstions)
content = md.convert(source_content)
meta = md.Meta
if meta:
converted.update(meta)
if content:
converted['content'] = content.strip()
return converted | 0.00363 |
def id_mods(obj,modlist,intrinsic_mods={},submodlist=[]):
"""
Match USE statements up with the right modules
"""
for i in range(len(obj.uses)):
for candidate in modlist:
if obj.uses[i][0].lower() == candidate.name.lower():
obj.uses[i] = [candidate, obj.uses[i][1]]
break
else:
if obj.uses[i][0].lower() in intrinsic_mods:
obj.uses[i] = [intrinsic_mods[obj.uses[i][0].lower()], obj.uses[i][1]]
continue
if getattr(obj,'ancestor',None):
for submod in submodlist:
if obj.ancestor.lower() == submod.name.lower():
obj.ancestor = submod
break
if hasattr(obj,'ancestor_mod'):
for mod in modlist:
if obj.ancestor_mod.lower() == mod.name.lower():
obj.ancestor_mod = mod
break
for modproc in getattr(obj,'modprocedures',[]):
id_mods(modproc,modlist,intrinsic_mods)
for func in getattr(obj,'functions',[]):
id_mods(func,modlist,intrinsic_mods)
for subroutine in getattr(obj,'subroutines',[]):
id_mods(subroutine,modlist,intrinsic_mods) | 0.016779 |
def signature_string_parts(posargs, optargs, mod='!r'):
"""Return stringified arguments as tuples.
Parameters
----------
posargs : sequence
Positional argument values, always included in the returned string
tuple.
optargs : sequence of 3-tuples
Optional arguments with names and defaults, given in the form::
[(name1, value1, default1), (name2, value2, default2), ...]
Only those parameters that are different from the given default
are included as ``name=value`` keyword pairs.
**Note:** The comparison is done by using ``if value == default:``,
which is not valid for, e.g., NumPy arrays.
mod : string or callable or sequence, optional
Format modifier(s) for the argument strings.
In its most general form, ``mod`` is a sequence of 2 sequences
``pos_mod, opt_mod`` with ``len(pos_mod) == len(posargs)`` and
``len(opt_mod) == len(optargs)``. Each entry ``m`` in those sequences
can be a string, resulting in the following stringification
of ``arg``::
arg_fmt = {{{}}}.format(m)
arg_str = arg_fmt.format(arg)
For a callable ``to_str``, the stringification is simply
``arg_str = to_str(arg)``.
The entries ``pos_mod, opt_mod`` of ``mod`` can also be strings
or callables instead of sequences, in which case the modifier
applies to all corresponding arguments.
Finally, if ``mod`` is a string or callable, it is applied to
all arguments.
The default behavior is to apply the "{!r}" (``repr``) conversion.
For floating point scalars, the number of digits printed is
determined by the ``precision`` value in NumPy's printing options,
which can be temporarily modified with `npy_printoptions`.
Returns
-------
pos_strings : tuple of str
The stringified positional arguments.
opt_strings : tuple of str
The stringified optional arguments, not including the ones
equal to their respective defaults.
"""
# Convert modifiers to 2-sequence of sequence of strings
if is_string(mod) or callable(mod):
pos_mod = opt_mod = mod
else:
pos_mod, opt_mod = mod
mods = []
for m, args in zip((pos_mod, opt_mod), (posargs, optargs)):
if is_string(m) or callable(m):
mods.append([m] * len(args))
else:
if len(m) == 1:
mods.append(m * len(args))
elif len(m) == len(args):
mods.append(m)
else:
raise ValueError('sequence length mismatch: '
'len({}) != len({})'.format(m, args))
pos_mod, opt_mod = mods
precision = np.get_printoptions()['precision']
# Stringify values, treating strings specially
posargs_conv = []
for arg, modifier in zip(posargs, pos_mod):
if callable(modifier):
posargs_conv.append(modifier(arg))
elif is_string(arg):
# Preserve single quotes for strings by default
if modifier:
fmt = '{{{}}}'.format(modifier)
else:
fmt = "'{}'"
posargs_conv.append(fmt.format(arg))
elif np.isscalar(arg) and str(arg) in ('inf', 'nan'):
# Make sure the string quotes are added
posargs_conv.append("'{}'".format(arg))
elif (np.isscalar(arg) and
np.array(arg).real.astype('int64') != arg and
modifier in ('', '!s', '!r')):
# Floating point value, use Numpy print option 'precision'
fmt = '{{:.{}}}'.format(precision)
posargs_conv.append(fmt.format(arg))
else:
# All non-string types are passed through a format conversion
fmt = '{{{}}}'.format(modifier)
posargs_conv.append(fmt.format(arg))
# Build 'key=value' strings for values that are not equal to default
optargs_conv = []
for (name, value, default), modifier in zip(optargs, opt_mod):
if value == default:
# Don't include
continue
# See above on str and repr
if callable(modifier):
optargs_conv.append('{}={}'.format(name, modifier(value)))
elif is_string(value):
if modifier:
fmt = '{{{}}}'.format(modifier)
else:
fmt = "'{}'"
value_str = fmt.format(value)
optargs_conv.append('{}={}'.format(name, value_str))
elif np.isscalar(value) and str(value) in ('inf', 'nan'):
# Make sure the string quotes are added
optargs_conv.append("{}='{}'".format(name, value))
elif (np.isscalar(value) and
np.array(value).real.astype('int64') != value and
modifier in ('', '!s', '!r')):
fmt = '{{:.{}}}'.format(precision)
value_str = fmt.format(value)
optargs_conv.append('{}={}'.format(name, value_str))
else:
fmt = '{{{}}}'.format(modifier)
value_str = fmt.format(value)
optargs_conv.append('{}={}'.format(name, value_str))
return tuple(posargs_conv), tuple(optargs_conv) | 0.00019 |
def get_limits(self):
"""
Return all known limits for this service, as a dict of their names
to :py:class:`~.AwsLimit` objects.
:returns: dict of limit names to :py:class:`~.AwsLimit` objects
:rtype: dict
"""
if self.limits != {}:
return self.limits
limits = {}
limits.update(self._get_limits_instances())
limits.update(self._get_limits_networking())
limits.update(self._get_limits_spot())
self.limits = limits
return self.limits | 0.003663 |
async def read_frame(self, stream_id: int) -> bytes:
"""Read a single frame of data from the specified stream, waiting until
frames are available if none are present in the local buffer. If the
stream is closed and all buffered frames have been consumed, raises a
StreamConsumedError.
"""
stream = self._get_stream(stream_id)
frame = await stream.read_frame()
if frame.flow_controlled_length > 0:
self._acknowledge_data(frame.flow_controlled_length, stream_id)
return frame.data | 0.003559 |
def validate_none(b):
"""Validate that None is given
Parameters
----------
b: {None, 'none'}
None or string (the case is ignored)
Returns
-------
None
Raises
------
ValueError"""
if isinstance(b, six.string_types):
b = b.lower()
if b is None or b == 'none':
return None
else:
raise ValueError('Could not convert "%s" to None' % b) | 0.002398 |
def get_disk_usage(path):
"""Return disk usage associated with path."""
try:
total, free = _psutil_mswindows.get_disk_usage(path)
except WindowsError:
err = sys.exc_info()[1]
if not os.path.exists(path):
raise OSError(errno.ENOENT, "No such file or directory: '%s'" % path)
raise
used = total - free
percent = usage_percent(used, total, _round=1)
return nt_diskinfo(total, used, free, percent) | 0.004338 |
def writes(notebook, fmt, version=nbformat.NO_CONVERT, **kwargs):
"""Write a notebook to a string"""
metadata = deepcopy(notebook.metadata)
rearrange_jupytext_metadata(metadata)
fmt = copy(fmt)
fmt = long_form_one_format(fmt, metadata)
ext = fmt['extension']
format_name = fmt.get('format_name')
jupytext_metadata = metadata.get('jupytext', {})
if ext == '.ipynb':
# Remove jupytext section if empty
jupytext_metadata.pop('text_representation', {})
if not jupytext_metadata:
metadata.pop('jupytext', {})
return nbformat.writes(new_notebook(cells=notebook.cells, metadata=metadata), version, **kwargs)
if not format_name:
format_name = format_name_for_ext(metadata, ext, explicit_default=False)
if format_name:
fmt['format_name'] = format_name
update_jupytext_formats_metadata(metadata, fmt)
writer = TextNotebookConverter(fmt)
return writer.writes(notebook, metadata) | 0.003024 |
def decode_entities(html):
"""
Remove HTML entities from a string.
Adapted from http://effbot.org/zone/re-sub.htm#unescape-html
"""
def decode(m):
html = m.group(0)
if html[:2] == "&#":
try:
if html[:3] == "&#x":
return chr(int(html[3:-1], 16))
else:
return chr(int(html[2:-1]))
except ValueError:
pass
else:
try:
html = chr(name2codepoint[html[1:-1]])
except KeyError:
pass
return html
return re.sub("&#?\w+;", decode, html.replace("&", "&")) | 0.002981 |
def run_parallel(self):
"""Perform the computation in parallel, reading results from the output
queue and passing them to ``process_result``.
"""
try:
self.start_parallel()
result = self.empty_result(*self.context)
while self.num_processes > 0:
r = self.result_queue.get()
self.maybe_put_task()
if r is POISON_PILL:
self.num_processes -= 1
elif isinstance(r, ExceptionWrapper):
r.reraise()
else:
result = self.process_result(r, result)
self.progress.update(1)
# Did `process_result` decide to terminate early?
if self.done:
self.complete.set()
self.finish_parallel()
except Exception:
raise
finally:
log.debug('Removing progress bar')
self.progress.close()
return result | 0.001919 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.