text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def task_add(self, t, periodic=None):
"""
Register a task in this legion. "periodic" should be None, or
a callback function which will be called periodically when the
legion is otherwise idle.
"""
name = t.get_name()
if name in self._tasknames:
raise TaskError(name, 'Task already exists with %d daemon%s active' %
(len(self._tasknames), ses(len(self._tasknames))))
self._tasknames[name] = (t, periodic)
self._tasks.add(t) | 0.00565 |
def _apply_ide_controller_config(ide_controller_label, operation,
key, bus_number=0):
'''
Returns a vim.vm.device.VirtualDeviceSpec object specifying to add/edit an
IDE controller
ide_controller_label
Controller label of the IDE adapter
operation
Type of operation: add or edit
key
Unique key of the device
bus_number
Device bus number property
'''
log.trace('Configuring IDE controller ide_controller_label=%s',
ide_controller_label)
ide_spec = vim.vm.device.VirtualDeviceSpec()
ide_spec.device = vim.vm.device.VirtualIDEController()
if operation == 'add':
ide_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
if operation == 'edit':
ide_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
ide_spec.device.key = key
ide_spec.device.busNumber = bus_number
if ide_controller_label:
ide_spec.device.deviceInfo = vim.Description()
ide_spec.device.deviceInfo.label = ide_controller_label
ide_spec.device.deviceInfo.summary = ide_controller_label
return ide_spec | 0.000852 |
def _scale_jobs_to_memory(jobs, mem_per_core, sysinfo):
"""When scheduling jobs with single cores, avoid overscheduling due to memory.
"""
if "cores" not in sysinfo:
return jobs, 1.0
sys_mem_per_core = float(sysinfo["memory"]) / float(sysinfo["cores"])
if sys_mem_per_core < mem_per_core:
pct = sys_mem_per_core / float(mem_per_core)
target_jobs = int(math.floor(jobs * pct))
return max(target_jobs, 1), pct
else:
return jobs, 1.0 | 0.004049 |
def get(cls, resource_type):
"""Returns the ResourceType object for `resource_type`. If no existing object was found, a new type will
be created in the database and returned
Args:
resource_type (str): Resource type name
Returns:
:obj:`ResourceType`
"""
if isinstance(resource_type, str):
obj = getattr(db, cls.__name__).find_one(cls.resource_type == resource_type)
elif isinstance(resource_type, int):
obj = getattr(db, cls.__name__).find_one(cls.resource_type_id == resource_type)
elif isinstance(resource_type, cls):
return resource_type
else:
obj = None
if not obj:
obj = cls()
obj.resource_type = resource_type
db.session.add(obj)
db.session.commit()
db.session.refresh(obj)
return obj | 0.005459 |
def run(self, data_cb):
"""Run the event loop."""
if self._error:
err = self._error
if isinstance(self._error, KeyboardInterrupt):
# KeyboardInterrupt is not destructive(it may be used in
# the REPL).
# After throwing KeyboardInterrupt, cleanup the _error field
# so the loop may be started again
self._error = None
raise err
self._on_data = data_cb
if threading.current_thread() == main_thread:
self._setup_signals([signal.SIGINT, signal.SIGTERM])
debug('Entering event loop')
self._run()
debug('Exited event loop')
if threading.current_thread() == main_thread:
self._teardown_signals()
signal.signal(signal.SIGINT, default_int_handler)
self._on_data = None | 0.002268 |
def _is_bugged_tarfile(self):
"""
Check for tar file that tarfile library mistakenly reports as invalid.
Happens with tar files created on FAT systems. See:
http://stackoverflow.com/questions/25552162/tarfile-readerror-file-could-not-be-opened-successfully
"""
try:
output = subprocess.check_output(['file', '-z', self.destination]).decode('utf8')
return 'tar archive' in output and 'gzip compressed data' in output
except subprocess.CalledProcessError:
return False | 0.005367 |
def get_objects_by_offset(self, start):
"""
Find objects covering the given region offset.
:param start:
:return:
"""
_, container = self._get_container(start)
if container is None:
return set()
else:
return container.internal_objects | 0.006173 |
def batch_process_data(file_roots, **kwargs):
"""Process output from many nested sampling runs in parallel with optional
error handling and caching.
The result can be cached using the 'save_name', 'save' and 'load' kwargs
(by default this is not done). See save_load_result docstring for more
details.
Remaining kwargs passed to parallel_utils.parallel_apply (see its
docstring for more details).
Parameters
----------
file_roots: list of strs
file_roots for the runs to load.
base_dir: str, optional
path to directory containing files.
process_func: function, optional
function to use to process the data.
func_kwargs: dict, optional
additional keyword arguments for process_func.
errors_to_handle: error or tuple of errors, optional
which errors to catch when they occur in processing rather than
raising.
save_name: str or None, optional
See nestcheck.io_utils.save_load_result.
save: bool, optional
See nestcheck.io_utils.save_load_result.
load: bool, optional
See nestcheck.io_utils.save_load_result.
overwrite_existing: bool, optional
See nestcheck.io_utils.save_load_result.
Returns
-------
list of ns_run dicts
List of nested sampling runs in dict format (see the module
docstring for more details).
"""
base_dir = kwargs.pop('base_dir', 'chains')
process_func = kwargs.pop('process_func', process_polychord_run)
func_kwargs = kwargs.pop('func_kwargs', {})
func_kwargs['errors_to_handle'] = kwargs.pop('errors_to_handle', ())
data = nestcheck.parallel_utils.parallel_apply(
process_error_helper, file_roots, func_args=(base_dir, process_func),
func_kwargs=func_kwargs, **kwargs)
# Sort processed runs into the same order as file_roots (as parallel_apply
# does not preserve order)
data = sorted(data,
key=lambda x: file_roots.index(x['output']['file_root']))
# Extract error information and print
errors = {}
for i, run in enumerate(data):
if 'error' in run:
try:
errors[run['error']].append(i)
except KeyError:
errors[run['error']] = [i]
for error_name, index_list in errors.items():
message = (error_name + ' processing ' + str(len(index_list)) + ' / '
+ str(len(file_roots)) + ' files')
if len(index_list) != len(file_roots):
message += ('. Roots with errors have (zero based) indexes: '
+ str(index_list))
print(message)
# Return runs which did not have errors
return [run for run in data if 'error' not in run] | 0.000364 |
def venn3_circles(subsets, normalize_to=1.0, alpha=1.0, color='black', linestyle='solid', linewidth=2.0, ax=None, **kwargs):
'''
Plots only the three circles for the corresponding Venn diagram.
Useful for debugging or enhancing the basic venn diagram.
parameters ``subsets``, ``normalize_to`` and ``ax`` are the same as in venn3()
kwargs are passed as-is to matplotlib.patches.Circle.
returns a list of three Circle patches.
>>> plot = venn3_circles({'001': 10, '100': 20, '010': 21, '110': 13, '011': 14})
>>> plot = venn3_circles([set(['A','B','C']), set(['A','D','E','F']), set(['D','G','H'])])
'''
# Prepare parameters
if isinstance(subsets, dict):
subsets = [subsets.get(t, 0) for t in ['100', '010', '110', '001', '101', '011', '111']]
elif len(subsets) == 3:
subsets = compute_venn3_subsets(*subsets)
areas = compute_venn3_areas(subsets, normalize_to)
centers, radii = solve_venn3_circles(areas)
if ax is None:
ax = gca()
prepare_venn_axes(ax, centers, radii)
result = []
for (c, r) in zip(centers, radii):
circle = Circle(c, r, alpha=alpha, edgecolor=color, facecolor='none', linestyle=linestyle, linewidth=linewidth, **kwargs)
ax.add_patch(circle)
result.append(circle)
return result | 0.005275 |
def edge(self, from_node, to_node, edge_type="", **args):
"""draw an edge from a node to another.
"""
self._stream.write(
'%s%sedge: {sourcename:"%s" targetname:"%s"'
% (self._indent, edge_type, from_node, to_node)
)
self._write_attributes(EDGE_ATTRS, **args)
self._stream.write("}\n") | 0.005602 |
def push(cpu, value, size):
"""
Writes a value in the stack.
:param value: the value to put in the stack.
:param size: the size of the value.
"""
assert size in (8, 16, cpu.address_bit_size)
cpu.STACK = cpu.STACK - size // 8
base, _, _ = cpu.get_descriptor(cpu.read_register('SS'))
address = cpu.STACK + base
cpu.write_int(address, value, size) | 0.004706 |
def timescales(self):
r""" Relaxation timescales of the hidden transition matrix
Returns
-------
ts : ndarray(m)
relaxation timescales in units of the input trajectory time step,
defined by :math:`-tau / ln | \lambda_i |, i = 2,...,nstates`, where
:math:`\lambda_i` are the hidden transition matrix eigenvalues.
"""
from msmtools.analysis.dense.decomposition import timescales_from_eigenvalues as _timescales
self._ensure_spectral_decomposition()
ts = _timescales(self._eigenvalues, tau=self._lag)
return ts[1:] | 0.006431 |
def delete_agent_queue(self, queue_id, project=None):
"""DeleteAgentQueue.
[Preview API] Removes an agent queue from a project.
:param int queue_id: The agent queue to remove
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if queue_id is not None:
route_values['queueId'] = self._serialize.url('queue_id', queue_id, 'int')
self._send(http_method='DELETE',
location_id='900fa995-c559-4923-aae7-f8424fe4fbea',
version='5.1-preview.1',
route_values=route_values) | 0.005479 |
def parse_map_Kd(self):
"""Diffuse map"""
Kd = os.path.join(self.dir, " ".join(self.values[1:]))
self.this_material.set_texture(Kd) | 0.012903 |
def _complete_cases(self, text, line, istart, iend):
"""Returns the completion list of possible test cases for the active unit test."""
if text == "":
return list(self.live.keys())
else:
return [c for c in self.live if c.startswith(text)] | 0.01049 |
def insert(self, i, x):
"""Insert an item (x) at a given position (i)."""
if i == len(self): # end of list or empty list: append
self.append(x)
elif len(self.matches) > i:
# create a new xml node at the requested position
insert_index = self.matches[i].getparent().index(self.matches[i])
_create_xml_node(self.xast, self.node, self.context, insert_index)
# then use default set logic
self[i] = x
else:
raise IndexError("Can't insert '%s' at index %d - list length is only %d" \
% (x, i, len(self))) | 0.007776 |
def cluster(data, sample, nthreads, force):
"""
Calls vsearch for clustering. cov varies by data type, values were chosen
based on experience, but could be edited by users
"""
## get the dereplicated reads
if "reference" in data.paramsdict["assembly_method"]:
derephandle = os.path.join(data.dirs.edits, sample.name+"-refmap_derep.fastq")
## In the event all reads for all samples map successfully then clustering
## the unmapped reads makes no sense, so just bail out.
if not os.stat(derephandle).st_size:
## In this case you do have to create empty, dummy vsearch output
## files so building_clusters will not fail.
uhandle = os.path.join(data.dirs.clusts, sample.name+".utemp")
usort = os.path.join(data.dirs.clusts, sample.name+".utemp.sort")
hhandle = os.path.join(data.dirs.clusts, sample.name+".htemp")
for f in [uhandle, usort, hhandle]:
open(f, 'a').close()
return
else:
derephandle = os.path.join(data.dirs.edits, sample.name+"_derep.fastq")
## create handles for the outfiles
uhandle = os.path.join(data.dirs.clusts, sample.name+".utemp")
temphandle = os.path.join(data.dirs.clusts, sample.name+".htemp")
## If derep file doesn't exist then bail out
if not os.path.isfile(derephandle):
LOGGER.warn("Bad derephandle - {}".format(derephandle))
raise IPyradError("Input file for clustering doesn't exist - {}"\
.format(derephandle))
## testing one sample fail
#if sample.name == "1C_0":
# x
## datatype specific optimization
## minsl: the percentage of the seed that must be matched
## smaller values for RAD/ddRAD where we might want to combine, say 50bp
## reads and 100bp reads in the same analysis.
## query_cov: the percentage of the query sequence that must match seed
## smaller values are needed for gbs where only the tips might overlap
## larger values for pairgbs where they should overlap near completely
## small minsl and high query cov allows trimmed reads to match to untrim
## seed for rad/ddrad/pairddrad.
strand = "plus"
cov = 0.75
minsl = 0.5
if data.paramsdict["datatype"] in ["gbs", "2brad"]:
strand = "both"
cov = 0.5
minsl = 0.5
elif data.paramsdict["datatype"] == 'pairgbs':
strand = "both"
cov = 0.75
minsl = 0.75
## If this value is not null (which is the default) then override query cov
if data._hackersonly["query_cov"]:
cov = str(data._hackersonly["query_cov"])
assert float(cov) <= 1, "query_cov must be <= 1.0"
## get call string
cmd = [ipyrad.bins.vsearch,
"-cluster_smallmem", derephandle,
"-strand", strand,
"-query_cov", str(cov),
"-id", str(data.paramsdict["clust_threshold"]),
"-minsl", str(minsl),
"-userout", uhandle,
"-userfields", "query+target+id+gaps+qstrand+qcov",
"-maxaccepts", "1",
"-maxrejects", "0",
"-threads", str(nthreads),
"-notmatched", temphandle,
"-fasta_width", "0",
"-fastq_qmax", "100",
"-fulldp",
"-usersort"]
## not sure what the benefit of this option is exactly, needs testing,
## might improve indel detection on left side, but we don't want to enforce
## aligning on left side if not necessarily, since quality trimmed reads
## might lose bases on left side in step2 and no longer align.
#if data.paramsdict["datatype"] in ["rad", "ddrad", "pairddrad"]:
# cmd += ["-leftjust"]
## run vsearch
LOGGER.debug("%s", cmd)
proc = sps.Popen(cmd, stderr=sps.STDOUT, stdout=sps.PIPE, close_fds=True)
## This is long running so we wrap it to make sure we can kill it
try:
res = proc.communicate()[0]
except KeyboardInterrupt:
proc.kill()
raise KeyboardInterrupt
## check for errors
if proc.returncode:
LOGGER.error("error %s: %s", cmd, res)
raise IPyradWarningExit("cmd {}: {}".format(cmd, res)) | 0.008036 |
def queue_draw_item(self, *items):
"""Extends the base class method to allow Ports to be passed as item
:param items: Items that are to be redrawn
"""
gaphas_items = []
for item in items:
if isinstance(item, Element):
gaphas_items.append(item)
else:
try:
gaphas_items.append(item.parent)
except AttributeError:
pass
super(ExtendedGtkView, self).queue_draw_item(*gaphas_items) | 0.003731 |
def flatten_list(multiply_list):
"""
碾平 list::
>>> a = [1, 2, [3, 4], [[5, 6], [7, 8]]]
>>> flatten_list(a)
[1, 2, 3, 4, 5, 6, 7, 8]
:param multiply_list: 混淆的多层列表
:return: 单层的 list
"""
if isinstance(multiply_list, list):
return [rv for l in multiply_list for rv in flatten_list(l)]
else:
return [multiply_list] | 0.005263 |
def event_handler(msg: EventMsgDict) -> Event:
"""Handle events emitted on browser."""
e = create_event_from_msg(msg)
if e.currentTarget is None:
if e.type not in ['mount', 'unmount']:
id = msg['currentTarget']['id']
logger.warning('No such element: wdom_id={}'.format(id))
return e
e.currentTarget.on_event_pre(e)
e.currentTarget.dispatchEvent(e)
return e | 0.002381 |
def fit_predict(self, y_prob, cost_mat, y_true_cal=None, y_prob_cal=None):
""" Calculate the prediction using the Bayes minimum risk classifier.
Parameters
----------
y_prob : array-like of shape = [n_samples, 2]
Predicted probabilities.
cost_mat : array-like of shape = [n_samples, 4]
Cost matrix of the classification problem
Where the columns represents the costs of: false positives, false negatives,
true positives and true negatives, for each example.
y_true_cal : array-like of shape = [n_samples], optional default = None
True class to be used for calibrating the probabilities
y_prob_cal : array-like of shape = [n_samples, 2], optional default = None
Predicted probabilities to be used for calibrating the probabilities
Returns
-------
y_pred : array-like of shape = [n_samples]
Predicted class
"""
#TODO: Check input
if self.calibration:
self.cal = ROCConvexHull()
if y_prob_cal is None:
y_prob_cal = y_prob
self.cal.fit(y_true_cal, y_prob_cal[:, 1])
y_prob[:, 1] = self.cal.predict_proba(y_prob[:, 1])
y_prob[:, 0] = 1 - y_prob[:, 1]
# t_BMR = (cost_fp - cost_tn) / (cost_fn - cost_tn - cost_tp + cost_fp)
# cost_mat[FP,FN,TP,TN]
t_bmr = (cost_mat[:, 0] - cost_mat[:, 3]) / (cost_mat[:, 1] - cost_mat[:, 3] - cost_mat[:, 2] + cost_mat[:, 0])
y_pred = np.greater(y_prob[:, 1], t_bmr).astype(np.float)
return y_pred | 0.004266 |
def find_revision_id(self, revision=None):
"""Find the global revision id of the given revision."""
# Make sure the local repository exists.
self.create()
# Try to find the revision id of the specified revision.
revision = self.expand_branch_name(revision)
output = self.context.capture('git', 'rev-parse', revision)
# Validate the `git rev-parse' output.
return self.ensure_hexadecimal_string(output, 'git rev-parse') | 0.004149 |
def _query(self, url, **kwargs):
"""
All query methods have the same logic, so don't repeat it! Query the URL, parse the response as JSON,
and check for errors. If all goes well, return the parsed JSON.
"""
parameters = YelpAPI._get_clean_parameters(kwargs)
response = self._yelp_session.get(
url,
headers=self._headers,
params=parameters,
timeout=self._timeout_s,
)
response_json = response.json() # shouldn't happen, but this will raise a ValueError if the response isn't JSON
# Yelp can return one of many different API errors, so check for one of them.
# The Yelp Fusion API does not yet have a complete list of errors, but this is on the TODO list; see
# https://github.com/Yelp/yelp-fusion/issues/95 for more info.
if 'error' in response_json:
raise YelpAPI.YelpAPIError('{}: {}'.format(response_json['error']['code'],
response_json['error']['description']))
# we got a good response, so return
return response_json | 0.006903 |
def maybe_get_common_dtype(arg_list):
"""Return common dtype of arg_list, or None.
Args:
arg_list: an iterable of items which are either `None` or have a `dtype`
property.
Returns:
dtype: The common dtype of items in `arg_list`, or `None` if the list is
empty or all items are `None`.
"""
# Note that `all` defaults to `True` if `arg_list` is empty.
if all(a is None for a in arg_list):
return None
return dtype_util.common_dtype(arg_list, tf.float32) | 0.010183 |
def _check_consumer(self):
"""
Validates the :attr:`.consumer`.
"""
# 'magic' using _kwarg method
# pylint:disable=no-member
if not self.consumer.key:
raise ConfigError(
'Consumer key not specified for provider {0}!'.format(
self.name))
if not self.consumer.secret:
raise ConfigError(
'Consumer secret not specified for provider {0}!'.format(
self.name)) | 0.003937 |
def extract_features(self, phrase):
"""
This function will extract features from the phrase being used.
Currently, the feature we are extracting are unigrams of the text corpus.
"""
words = nltk.word_tokenize(phrase)
features = {}
for word in words:
features['contains(%s)' % word] = (word in words)
return features | 0.012469 |
def setCurrentPlugin( self, plugin ):
"""
Sets the current plugin item to the inputed plugin.
:param plugin | <XConfigPlugin> || None
"""
if ( not plugin ):
self.uiPluginTREE.setCurrentItem(None)
return
for i in range(self.uiPluginTREE.topLevelItemCount()):
item = self.uiPluginTREE.topLevelItem(i)
for c in range(item.childCount()):
pitem = item.child(c)
if ( pitem.plugin() == plugin ):
self.uiPluginTREE.setCurrentItem(pitem) | 0.017742 |
def _setup_ssh(self):
"""Initializes the connection to the server via SSH."""
global paramiko
if paramiko is none:
import paramiko
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(self.server, username=self.user, pkey=self.pkey) | 0.005682 |
def extract_first_jpeg_in_pdf(fstream):
"""
Reads a given PDF file and scans for the first valid embedded JPEG image.
Returns either None (if none found) or a string of data for the image.
There is no 100% guarantee for this code, yet it seems to work fine with most
scanner-produced images around.
More testing might be needed though.
Note that in principle there is no serious problem extracting PNGs or other image types from PDFs,
however at the moment I do not have enough test data to try this, and the one I have seems to be unsuitable
for PDFMiner.
:param fstream: Readable binary stream of the PDF
:return: binary stream, containing the whole contents of the JPEG image or None if extraction failed.
"""
parser = PDFParser(fstream)
if PY2:
document = PDFDocument(parser)
else:
document = PDFDocument()
parser.set_document(document)
document.set_parser(parser)
document.initialize('')
rsrcmgr = PDFResourceManager()
device = PDFPageAggregator(rsrcmgr)
interpreter = PDFPageInterpreter(rsrcmgr, device)
pages = PDFPage.create_pages(document) if PY2 else document.get_pages()
for page in pages:
interpreter.process_page(page)
layout = device.result
for el in layout:
if isinstance(el, LTFigure):
for im in el:
if isinstance(im, LTImage):
# Found one!
st = None
try:
imdata = im.stream.get_data()
except:
# Failed to decode (seems to happen nearly always - there's probably a bug in PDFMiner), oh well...
imdata = im.stream.get_rawdata()
if imdata is not None and imdata.startswith(b'\xff\xd8\xff\xe0'):
return imdata
return None | 0.004071 |
def oei(cn, ns=None, lo=None, di=None, iq=None, ico=None, pl=None, fl=None,
fs=None, ot=None, coe=None, moc=None):
# pylint: disable=too-many-arguments, redefined-outer-name
"""
This function is a wrapper for
:meth:`~pywbem.WBEMConnection.OpenEnumerateInstances`.
Open an enumeration session to enumerate the instances of a class (including
instances of its subclasses) in a namespace.
Use the :func:`~wbemcli.piwp` function to retrieve the next set of
instances or the :func:`~wbcmeli.ce` function to close the enumeration
session before it is complete.
Parameters:
cn (:term:`string` or :class:`~pywbem.CIMClassName`):
Name of the class to be enumerated (case independent).
If specified as a `CIMClassName` object, its `host` attribute will be
ignored.
ns (:term:`string`):
Name of the CIM namespace to be used (case independent).
If `None`, defaults to the namespace of the `cn` parameter if
specified as a `CIMClassName`, or to the default namespace of the
connection.
lo (:class:`py:bool`):
LocalOnly flag: Exclude inherited properties.
`None` will cause the server default of `True` to be used.
Deprecated in :term:`DSP0200`: WBEM server implementations for `True`
may vary; this parameter should be set to `False` by the caller.
di (:class:`py:bool`):
DeepInheritance flag: Include properties added by subclasses.
`None` will cause the server default of `True` to be used.
iq (:class:`py:bool`):
IncludeQualifiers flag: Include qualifiers.
`None` will cause the server default of `False` to be used.
Deprecated in :term:`DSP0200`: Clients cannot rely on qualifiers to
be returned in this operation.
ico (:class:`py:bool`):
IncludeClassOrigin flag: Include class origin information for the
properties in the retrieved instances.
`None` will cause the server default of `False` to be used.
Deprecated in :term:`DSP0200`: WBEM servers may either implement this
parameter as specified, or may treat any specified value as `False`.
pl (:term:`string` or :term:`py:iterable` of :term:`string`):
PropertyList: Names of properties to be included (if not otherwise
excluded). An empty iterable indicates to include no properties.
If `None`, all properties will be included.
fl (:term:`string`):
Filter query language to be used for the filter defined in the `fs`
parameter. The DMTF-defined Filter Query Language
(see :term:`DSP0212`) is specified as "DMTF:FQL".
`None` means that no such filtering is peformed.
fs (:term:`string`):
Filter to apply to objects to be returned. Based on filter query
language defined by `fl` parameter.
`None` means that no such filtering is peformed.
ot (:class:`~pywbem.Uint32`):
Operation timeout in seconds. This is the minimum time the WBEM server
must keep the enumeration session open between requests on that
session.
A value of 0 indicates that the server should never time out.
The server may reject the proposed value.
`None` will cause the server to use its default timeout.
coe (:class:`py:bool`):
Continue on error flag.
`None` will cause the server to use its default of `False`.
moc (:class:`~pywbem.Uint32`):
Maximum number of objects to return for this operation.
`None` will cause the server to use its default of 0.
Returns:
A :func:`~py:collections.namedtuple` object containing the following
named items:
* **instances** (list of :class:`~pywbem.CIMInstance`):
The retrieved instances.
* **eos** (:class:`py:bool`):
`True` if the enumeration session is exhausted after this operation.
Otherwise `eos` is `False` and the `context` item is the context
object for the next operation on the enumeration session.
* **context** (:func:`py:tuple` of server_context, namespace):
A context object identifying the open enumeration session, including
its current enumeration state, and the namespace. This object must be
supplied with the next pull or close operation for this enumeration
session.
"""
return CONN.OpenEnumerateInstances(cn, ns,
LocalOnly=lo,
DeepInheritance=di,
IncludeQualifiers=iq,
IncludeClassOrigin=ico,
PropertyList=pl,
FilterQueryLanguage=fl,
FilterQuery=fs,
OperationTimeout=ot,
ContinueOnError=coe,
MaxObjectCount=moc) | 0.000579 |
def read(self, size=-1):
"""Returns bytes from self._buffer and update related offsets.
Args:
size: number of bytes to read starting from current offset.
Read the entire buffer if negative.
Returns:
Requested bytes from buffer.
"""
if size < 0:
offset = len(self._buffer)
else:
offset = self._offset + size
return self.read_to_offset(offset) | 0.007444 |
def is_open(self,id,time,day):
"""
Checks if the venue is open at the time of day given a venue id.
args:
id: string of venue id
time: string of the format ex: "12:00:00"
day: string of weekday ex: "Monday"
returns:
Bool if there is hours data available
None otherwise
Note:
can get the string of the day and time from a time object if desired
by using time.strftime()
ex:
day = time.strftime('%A',some_time_object)
time = time.strftime('%H:%M:%S',some_time_object)
"""
details = self.get_details(id)
has_data = False
for obj in details["objects"]:
hours = obj["open_hours"][day]
if hours:
has_data = True
for interval in hours:
interval = interval.replace(' ','').split('-')
open_time = interval[0]
close_time = interval[1]
if open_time < time < close_time:
return True
if has_data:
return False
else:
return None | 0.018198 |
def task_denotate(self, task, annotation):
""" Removes an annotation from a task. """
self._execute(
task['uuid'],
'denotate',
'--',
annotation
)
id, denotated_task = self.get_task(uuid=task[six.u('uuid')])
return denotated_task | 0.006329 |
def find_one(self, filter=None, *args, **kwargs):
"""Get a single file from gridfs.
All arguments to :meth:`find` are also valid arguments for
:meth:`find_one`, although any `limit` argument will be
ignored. Returns a single :class:`~gridfs.grid_file.GridOut`,
or ``None`` if no matching file is found. For example::
file = fs.find_one({"filename": "lisa.txt"})
:Parameters:
- `filter` (optional): a dictionary specifying
the query to be performing OR any other type to be used as
the value for a query for ``"_id"`` in the file collection.
- `*args` (optional): any additional positional arguments are
the same as the arguments to :meth:`find`.
- `**kwargs` (optional): any additional keyword arguments
are the same as the arguments to :meth:`find`.
"""
if filter is not None and not isinstance(filter, Mapping):
filter = {"_id": filter}
for f in self.find(filter, *args, **kwargs):
return f
return None | 0.001813 |
def encode_int(self, n):
""" Encodes an integer into a short Base64 string.
Example:
``encode_int(123)`` returns ``'B7'``.
"""
str = []
while True:
n, r = divmod(n, self.BASE)
str.append(self.ALPHABET[r])
if n == 0: break
return ''.join(reversed(str)) | 0.008621 |
def keyReleaseEvent(self, event):
"""
Pyqt specific key release callback function.
Translates and forwards events to :py:func:`keyboard_event`.
"""
self.keyboard_event(event.key(), self.keys.ACTION_RELEASE, 0) | 0.008032 |
def get_ext_tops(config):
'''
Get top directories for the dependencies, based on external configuration.
:return:
'''
config = copy.deepcopy(config)
alternatives = {}
required = ['jinja2', 'yaml', 'tornado', 'msgpack']
tops = []
for ns, cfg in salt.ext.six.iteritems(config or {}):
alternatives[ns] = cfg
locked_py_version = cfg.get('py-version')
err_msg = None
if not locked_py_version:
err_msg = 'Alternative Salt library: missing specific locked Python version'
elif not isinstance(locked_py_version, (tuple, list)):
err_msg = ('Alternative Salt library: specific locked Python version '
'should be a list of major/minor version')
if err_msg:
raise salt.exceptions.SaltSystemExit(err_msg)
if cfg.get('dependencies') == 'inherit':
# TODO: implement inheritance of the modules from _here_
raise NotImplementedError('This feature is not yet implemented')
else:
for dep in cfg.get('dependencies'):
mod = cfg['dependencies'][dep] or ''
if not mod:
log.warning('Module %s has missing configuration', dep)
continue
elif mod.endswith('.py') and not os.path.isfile(mod):
log.warning('Module %s configured with not a file or does not exist: %s', dep, mod)
continue
elif not mod.endswith('.py') and not os.path.isfile(os.path.join(mod, '__init__.py')):
log.warning('Module %s is not a Python importable module with %s', dep, mod)
continue
tops.append(mod)
if dep in required:
required.pop(required.index(dep))
required = ', '.join(required)
if required:
msg = 'Missing dependencies for the alternative version' \
' in the external configuration: {}'.format(required)
log.error(msg)
raise salt.exceptions.SaltSystemExit(msg)
alternatives[ns]['dependencies'] = tops
return alternatives | 0.002708 |
def dockerflow(flask_app, backend_check):
"""
ADD ROUTING TO HANDLE DOCKERFLOW APP REQUIREMENTS
(see https://github.com/mozilla-services/Dockerflow#containerized-app-requirements)
:param flask_app: THE (Flask) APP
:param backend_check: METHOD THAT WILL CHECK THE BACKEND IS WORKING AND RAISE AN EXCEPTION IF NOT
:return:
"""
global VERSION_JSON
try:
VERSION_JSON = File("version.json").read_bytes()
@cors_wrapper
def version():
return Response(
VERSION_JSON,
status=200,
headers={
"Content-Type": "application/json"
}
)
@cors_wrapper
def heartbeat():
try:
backend_check()
return Response(status=200)
except Exception as e:
Log.warning("heartbeat failure", cause=e)
return Response(
unicode2utf8(value2json(e)),
status=500,
headers={
"Content-Type": "application/json"
}
)
@cors_wrapper
def lbheartbeat():
return Response(status=200)
flask_app.add_url_rule(str('/__version__'), None, version, defaults={}, methods=[str('GET'), str('POST')])
flask_app.add_url_rule(str('/__heartbeat__'), None, heartbeat, defaults={}, methods=[str('GET'), str('POST')])
flask_app.add_url_rule(str('/__lbheartbeat__'), None, lbheartbeat, defaults={}, methods=[str('GET'), str('POST')])
except Exception as e:
Log.error("Problem setting up listeners for dockerflow", cause=e) | 0.003499 |
def _listload(l: Loader, value, type_) -> List:
"""
This loads into something like List[int]
"""
t = type_.__args__[0]
try:
return [l.load(v, t, annotation=Annotation(AnnotationType.INDEX, i)) for i, v in enumerate(value)]
except TypeError as e:
if isinstance(e, TypedloadException):
raise
raise TypedloadTypeError(str(e), value=value, type_=type_) | 0.007353 |
def repartition(self, num_partitions, repartition_function=None):
"""Return a new Streamlet containing all elements of the this streamlet but having
num_partitions partitions. Note that this is different from num_partitions(n) in
that new streamlet will be created by the repartition call.
If repartiton_function is not None, it is used to decide which parititons
(from 0 to num_partitions -1), it should route each element to.
It could also return a list of partitions if it wants to send it to multiple
partitions.
"""
from heronpy.streamlet.impl.repartitionbolt import RepartitionStreamlet
if repartition_function is None:
repartition_function = lambda x: x
repartition_streamlet = RepartitionStreamlet(num_partitions, repartition_function, self)
self._add_child(repartition_streamlet)
return repartition_streamlet | 0.008 |
def save(self, force=False, uuid=False, **kwargs):
"""
REPLACES the object in DB. This is forbidden with objects from find() methods unless force=True is given.
"""
if not self._initialized_with_doc and not force:
raise Exception("Cannot save a document not initialized from a Python dict. This might remove fields from the DB!")
self._initialized_with_doc = False
if '_id' not in self:
if uuid:
self['_id'] = str("%s-%s" % (self.mongokat_collection.__class__.__name__, uuid4()))
return self.mongokat_collection.save(self, **kwargs) | 0.007886 |
def siblingsId(self):
""" Shortcut for getting the previous and next passage identifier
:rtype: CtsReference
:returns: Following passage reference
"""
if self._next_id is False or self._prev_id is False:
self._prev_id, self._next_id = self.getPrevNextUrn(reference=self.urn.reference)
return self._prev_id, self._next_id | 0.007874 |
def url(self):
"""
Return the appropriate URL.
URL is constructed based on these field conditions:
* If empty (not `self.name`) and a placeholder is defined, the
URL to the placeholder is returned.
* Otherwise, defaults to vanilla ImageFieldFile behavior.
"""
if not self.name and self.field.placeholder_image_name:
return self.storage.url(self.field.placeholder_image_name)
return super(VersatileImageMixIn, self).url | 0.003861 |
def macro(parser, token):
'''
Works just like block, but does not render.
'''
name = token.strip()
parser.build_method(name, endnodes=['endmacro'])
return ast.Yield(value=ast.Str(s='')) | 0.004785 |
def exec_command(self, command, bufsize=-1, get_pty=False):
"""
Execute a command in the connection
@param command: command to execute
@type command: str
@param bufsize: buffer size
@type bufsize: int
@param get_pty: get pty
@type get_pty: bool
@return: the stdin, stdout, and stderr of the executing command
@rtype: tuple(L{paramiko.ChannelFile}, L{paramiko.ChannelFile},
L{paramiko.ChannelFile})
@raise SSHException: if the server fails to execute the command
"""
self.last_command = command
return self.cli.exec_command(command, bufsize, get_pty=get_pty) | 0.002874 |
async def reply_video_note(self, video_note: typing.Union[base.InputFile, base.String],
duration: typing.Union[base.Integer, None] = None,
length: typing.Union[base.Integer, None] = None,
disable_notification: typing.Union[base.Boolean, None] = None,
reply_markup=None,
reply=True) -> Message:
"""
As of v.4.0, Telegram clients support rounded square mp4 videos of up to 1 minute long.
Use this method to send video messages.
Source: https://core.telegram.org/bots/api#sendvideonote
:param video_note: Video note to send.
:type video_note: :obj:`typing.Union[base.InputFile, base.String]`
:param duration: Duration of sent video in seconds
:type duration: :obj:`typing.Union[base.Integer, None]`
:param length: Video width and height
:type length: :obj:`typing.Union[base.Integer, None]`
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: :obj:`typing.Union[base.Boolean, None]`
:param reply_markup: Additional interface options.
:type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]`
:param reply: fill 'reply_to_message_id'
:return: On success, the sent Message is returned.
:rtype: :obj:`types.Message`
"""
return await self.bot.send_video_note(chat_id=self.chat.id,
video_note=video_note,
duration=duration,
length=length,
disable_notification=disable_notification,
reply_to_message_id=self.message_id if reply else None,
reply_markup=reply_markup) | 0.007065 |
def append(self, value):
"""
Allows adding a child to the end of the sequence
:param value:
Native python datatype that will be passed to _child_spec to create
new child object
"""
# We inline this checks to prevent method invocation each time
if self.children is None:
self._parse_children()
self.children.append(self._make_value(value))
if self._native is not None:
self._native.append(self.children[-1].native)
self._mutated = True | 0.003578 |
def stem(
self,
word,
max_word_length=20,
max_acro_length=8,
return_rule_no=False,
var='standard',
):
"""Return UEA-Lite stem.
Parameters
----------
word : str
The word to stem
max_word_length : int
The maximum word length allowed
max_acro_length : int
The maximum acronym length allowed
return_rule_no : bool
If True, returns the stem along with rule number
var : str
Variant rules to use:
- ``Adams`` to use Jason Adams' rules
- ``Perl`` to use the original Perl rules
Returns
-------
str or (str, int)
Word stem
Examples
--------
>>> uealite('readings')
'read'
>>> uealite('insulted')
'insult'
>>> uealite('cussed')
'cuss'
>>> uealite('fancies')
'fancy'
>>> uealite('eroded')
'erode'
"""
def _stem_with_duplicate_character_check(word, del_len):
if word[-1] == 's':
del_len += 1
stemmed_word = word[:-del_len]
if re_match(r'.*(\w)\1$', stemmed_word):
stemmed_word = stemmed_word[:-1]
return stemmed_word
def _stem(word):
stemmed_word = word
rule_no = 0
if not word:
return word, 0
if word in self._problem_words or (
word == 'menses' and var == 'Adams'
):
return word, 90
if max_word_length and len(word) > max_word_length:
return word, 95
if "'" in word:
if word[-2:] in {"'s", "'S"}:
stemmed_word = word[:-2]
if word[-1:] == "'":
stemmed_word = word[:-1]
stemmed_word = stemmed_word.replace("n't", 'not')
stemmed_word = stemmed_word.replace("'ve", 'have')
stemmed_word = stemmed_word.replace("'re", 'are')
stemmed_word = stemmed_word.replace("'m", 'am')
return stemmed_word, 94
if word.isdigit():
return word, 90.3
else:
hyphen = word.find('-')
if len(word) > hyphen > 0:
if (
word[:hyphen].isalpha()
and word[hyphen + 1 :].isalpha()
):
return word, 90.2
else:
return word, 90.1
elif '_' in word:
return word, 90
elif word[-1] == 's' and word[:-1].isupper():
if var == 'Adams' and len(word) - 1 > max_acro_length:
return word, 96
return word[:-1], 91.1
elif word.isupper():
if var == 'Adams' and len(word) > max_acro_length:
return word, 96
return word, 91
elif re_match(r'^.*[A-Z].*[A-Z].*$', word):
return word, 92
elif word[0].isupper():
return word, 93
elif var == 'Adams' and re_match(
r'^[a-z](|[rl])(ing|ed)$', word
):
return word, 97
for n in range(7, 1, -1):
if word[-n:] in self._rules[var][n]:
rule_no, del_len, add_str = self._rules[var][n][word[-n:]]
if del_len:
stemmed_word = word[:-del_len]
else:
stemmed_word = word
if add_str:
stemmed_word += add_str
break
if not rule_no:
if re_match(r'.*\w\wings?$', word): # rule 58
stemmed_word = _stem_with_duplicate_character_check(
word, 3
)
rule_no = 58
elif re_match(r'.*\w\weds?$', word): # rule 62
stemmed_word = _stem_with_duplicate_character_check(
word, 2
)
rule_no = 62
elif word[-1] == 's': # rule 68
stemmed_word = word[:-1]
rule_no = 68
return stemmed_word, rule_no
stem, rule_no = _stem(word)
if return_rule_no:
return stem, rule_no
return stem | 0.000858 |
def host(environ): # pragma: no cover
"""
Reconstruct host from environment. A modified version
of http://www.python.org/dev/peps/pep-0333/#url-reconstruction
"""
url = environ['wsgi.url_scheme'] + '://'
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
return url + quote(environ.get('SCRIPT_NAME', '')) | 0.001508 |
def click_partial_link_text(self, partial_link_text,
timeout=settings.SMALL_TIMEOUT):
""" This method clicks the partial link text on a page. """
# If using phantomjs, might need to extract and open the link directly
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if self.browser == 'phantomjs':
if self.is_partial_link_text_visible(partial_link_text):
element = self.wait_for_partial_link_text(partial_link_text)
element.click()
return
soup = self.get_beautiful_soup()
html_links = soup.fetch('a')
for html_link in html_links:
if partial_link_text in html_link.text:
for html_attribute in html_link.attrs:
if html_attribute[0] == 'href':
href = html_attribute[1]
if href.startswith('//'):
link = "http:" + href
elif href.startswith('/'):
url = self.driver.current_url
domain_url = self.get_domain_url(url)
link = domain_url + href
else:
link = href
self.open(link)
return
raise Exception(
'Could not parse link from partial link_text '
'{%s}' % partial_link_text)
raise Exception(
"Partial link text {%s} was not found!" % partial_link_text)
# Not using phantomjs
element = self.wait_for_partial_link_text(
partial_link_text, timeout=timeout)
self.__demo_mode_highlight_if_active(
partial_link_text, by=By.PARTIAL_LINK_TEXT)
pre_action_url = self.driver.current_url
try:
element.click()
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.05)
element = self.wait_for_partial_link_text(
partial_link_text, timeout=timeout)
element.click()
if settings.WAIT_FOR_RSC_ON_CLICKS:
self.wait_for_ready_state_complete()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True) | 0.001126 |
def visit_call(self, node):
"""visit a Call node -> check if this is not a blacklisted builtin
call and check for * or ** use
"""
self._check_misplaced_format_function(node)
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == "exec":
self.add_message("exec-used", node=node)
elif name == "reversed":
self._check_reversed(node)
elif name == "eval":
self.add_message("eval-used", node=node) | 0.00266 |
def _obtain_queue(num_jobs):
"""Return queue type most appropriate for runtime model.
If we are using multiprocessing, that should be
multiprocessing.Manager().Queue. If we are just using a
single process, then use a normal queue type.
"""
if _should_use_multiprocessing(num_jobs):
return ReprQueue(multiprocessing.Manager().Queue())
return ReprQueue(Queue()) | 0.002519 |
def sqlvm_list(
client,
resource_group_name=None):
'''
Lists all SQL virtual machines in a resource group or subscription.
'''
if resource_group_name:
# List all sql vms in the resource group
return client.list_by_resource_group(resource_group_name=resource_group_name)
# List all sql vms in the subscription
return client.list() | 0.005168 |
def get_mkt_val(self, pxs=None):
""" return the market value series for the specified Series of pxs """
pxs = self._closing_pxs if pxs is None else pxs
return pxs * self.multiplier | 0.009756 |
def statplot(self, analytes=None, samples=None, figsize=None,
stat='mean', err='std', subset=None):
"""
Function for visualising per-ablation and per-sample means.
Parameters
----------
analytes : str or iterable
Which analyte(s) to plot
samples : str or iterable
Which sample(s) to plot
figsize : tuple
Figure (width, height) in inches
stat : str
Which statistic to plot. Must match
the name of the functions used in
'sample_stats'.
err : str
Which uncertainty to plot.
subset : str
Which subset of samples to plot.
"""
if not hasattr(self, 'stats'):
self.sample_stats()
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
analytes = [a for a in analytes if a !=
self.internal_standard]
if figsize is None:
figsize = (1.5 * len(self.stats), 3 * len(analytes))
fig, axs = plt.subplots(len(analytes), 1, figsize=figsize)
for ax, an in zip(axs, analytes):
i = 0
stab = self.getstats()
m, u = unitpicker(np.percentile(stab.loc[:, an].dropna(), 25), 0.1,
focus_stage='calibrated',
denominator=self.internal_standard)
for s in samples:
if self.srm_identifier not in s:
d = self.stats[s]
if d[stat].ndim == 2:
n = d[stat].shape[-1]
x = np.linspace(i - .1 * n / 2, i + .1 * n / 2, n)
else:
x = [i]
a_ind = d['analytes'] == an
# plot individual ablations with error bars
ax.errorbar(x, d[stat][a_ind][0] * m,
yerr=d[err][a_ind][0] * m,
marker='o', color=self.cmaps[an],
lw=0, elinewidth=1)
ax.set_ylabel('%s / %s (%s )' % (pretty_element(an),
pretty_element(self.internal_standard),
u))
# plot whole - sample mean
if len(x) > 1:
# mean calculation with error propagation?
# umean = un.uarray(d[stat][a_ind][0] * m, d[err][a_ind][0] * m).mean()
# std = un.std_devs(umean)
# mean = un.nominal_values(umean)
mean = np.nanmean(d[stat][a_ind][0] * m)
std = np.nanstd(d[stat][a_ind][0] * m)
ax.plot(x, [mean] * len(x), c=self.cmaps[an], lw=2)
ax.fill_between(x, [mean + std] * len(x),
[mean - std] * len(x),
lw=0, alpha=0.2, color=self.cmaps[an])
# highlight each sample
if i % 2 == 1:
ax.axvspan(i - .5, i + .5, color=(0, 0, 0, 0.05), lw=0)
i += 1
ax.set_xticks(np.arange(0, len(self.stats)))
ax.set_xlim(-0.5, len(self.stats) - .5)
ax.set_xticklabels(samples)
return fig, ax | 0.001914 |
def add_permission_role(self, role, perm_view):
"""
Add permission-ViewMenu object to Role
:param role:
The role object
:param perm_view:
The PermissionViewMenu object
"""
if perm_view not in role.permissions:
try:
role.permissions.append(perm_view)
self.get_session.merge(role)
self.get_session.commit()
log.info(
c.LOGMSG_INF_SEC_ADD_PERMROLE.format(str(perm_view), role.name)
)
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_PERMROLE.format(str(e)))
self.get_session.rollback() | 0.004076 |
def _get_as_obj(obj_dict, name):
"""
Turn a dictionary into a named tuple so it can be
passed into the constructor of a complex model generator.
"""
if obj_dict.get('_sa_instance_state'):
del obj_dict['_sa_instance_state']
obj = namedtuple(name, tuple(obj_dict.keys()))
for k, v in obj_dict.items():
setattr(obj, k, v)
log.info("%s = %s",k,getattr(obj,k))
return obj | 0.009302 |
def delete_snapshot(self, snapshot_id):
"""
Removes a snapshot from your account.
:param snapshot_id: The unique ID of the snapshot.
:type snapshot_id: ``str``
"""
response = self._perform_request(
url='/snapshots/' + snapshot_id, method='DELETE')
return response | 0.005797 |
def _check_errors(self, errors, prefix):
"""Check for errors and possible raise and format an error message.
:param errors: List of error messages.
:param prefix: str, Prefix message for error messages
"""
args = []
for uid, messages in errors:
error_msg = []
error_msg.append(prefix % uid)
for msg in messages:
error_msg.append(" (-) %s" % msg)
args.append("\n".join(error_msg))
if args:
raise RuntimeError(*args) | 0.003643 |
def import_complex_gateway_to_graph(diagram_graph, process_id, process_attributes, element):
"""
Adds to graph the new element that represents BPMN complex gateway.
In addition to attributes inherited from Gateway type, complex gateway
has additional attribute default flow (default value - none).
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param process_id: string object, representing an ID of process element,
:param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of
imported flow node,
:param element: object representing a BPMN XML 'complexGateway' element.
"""
element_id = element.getAttribute(consts.Consts.id)
BpmnDiagramGraphImport.import_gateway_to_graph(diagram_graph, process_id, process_attributes, element)
diagram_graph.node[element_id][consts.Consts.default] = element.getAttribute(consts.Consts.default) \
if element.hasAttribute(consts.Consts.default) else None | 0.00831 |
def user_open(url_or_command):
"""Open the specified paramater in the web browser if a URL is detected,
othewrise pass the paramater to the shell as a subprocess. This function
is inteded to bu used in on_leftclick/on_rightclick callbacks.
:param url_or_command: String containing URL or command
"""
from urllib.parse import urlparse
scheme = urlparse(url_or_command).scheme
if scheme == 'http' or scheme == 'https':
import webbrowser
import os
# webbrowser.open() sometimes prints a message for some reason and confuses i3
# Redirect stdout briefly to prevent this from happening.
savout = os.dup(1)
os.close(1)
os.open(os.devnull, os.O_RDWR)
try:
webbrowser.open(url_or_command)
finally:
os.dup2(savout, 1)
else:
import subprocess
subprocess.Popen(url_or_command, shell=True) | 0.00216 |
def split_by_line(content):
"""Split the given content into a list of items by newline.
Both \r\n and \n are supported. This is done since it seems
that TTY devices on POSIX systems use \r\n for newlines in
some instances.
If the given content is an empty string or a string of only
whitespace, an empty list will be returned. If the given
content does not contain any newlines, it will be returned
as the only element in a single item list.
Leading and trailing whitespace is remove from all elements
returned.
:param str content: Content to split by newlines
:return: List of items that were separated by newlines.
:rtype: list
"""
# Make sure we don't end up splitting a string with
# just a single trailing \n or \r\n into multiple parts.
stripped = content.strip()
if not stripped:
return []
if '\r\n' in stripped:
return _strip_all(stripped.split('\r\n'))
if '\n' in stripped:
return _strip_all(stripped.split('\n'))
return _strip_all([stripped]) | 0.000939 |
def open(cls, path='', encoding=None, error_handling=ERROR_PASS):
"""
open([path, [encoding]])
If you do not provide any encoding, it can be detected if the file
contain a bit order mark, unless it is set to utf-8 as default.
"""
source_file, encoding = cls._open_unicode_file(path, claimed_encoding=encoding)
new_file = cls(path=path, encoding=encoding)
new_file.read(source_file, error_handling=error_handling)
source_file.close()
return new_file | 0.005671 |
def get_driver(self):
'''
Get an already running instance of Webdriver. If there is none, it will create one.
Returns:
Webdriver - Selenium Webdriver instance.
Usage::
driver = WTF_WEBDRIVER_MANAGER.new_driver()
driver.get("http://the-internet.herokuapp.com")
same_driver = WTF_WEBDRIVER_MANAGER.get_driver()
print(driver is same_driver) # True
'''
driver = self.__get_driver_for_channel(self.__get_channel())
if driver is None:
driver = self.new_driver()
return driver | 0.004926 |
def reload(self):
"""
Create a new partition scheme. A scheme defines which utterances are in which partition.
The scheme only changes after every call if ``self.shuffle == True``.
Returns:
list: List of PartitionInfo objects, defining the new partitions (same as ``self.partitions``)
"""
# Create the order in which utterances will be loaded
utt_ids = sorted(self.utt_ids)
if self.shuffle:
self.rand.shuffle(utt_ids)
partitions = []
current_partition = PartitionInfo()
for utt_id in utt_ids:
utt_size = self.utt_sizes[utt_id]
utt_lengths = self.utt_lengths[utt_id]
# We add utterance to the partition as long the partition-size is not exceeded
# Otherwise we start with new partition.
if current_partition.size + utt_size > self.partition_size:
partitions.append(current_partition)
current_partition = PartitionInfo()
current_partition.utt_ids.append(utt_id)
current_partition.utt_lengths.append(utt_lengths)
current_partition.size += utt_size
if current_partition.size > 0:
partitions.append(current_partition)
self.partitions = partitions
return self.partitions | 0.003704 |
def drain_K(self):
""" Return the minor loss coefficient of the drain pipe.
:returns: Minor Loss Coefficient
:return: float
"""
drain_K = minorloss.PIPE_ENTRANCE_K_MINOR + minorloss.PIPE_ENTRANCE_K_MINOR + minorloss.PIPE_EXIT_K_MINOR
return drain_K | 0.010135 |
def dump(
self, stream, progress=None, lower=None, upper=None,
incremental=False, deltas=False
):
"""Dump the repository to a dumpfile stream.
:param stream: A file stream to which the dumpfile is written
:param progress: A file stream to which progress is written
:param lower: Must be a numeric version number
:param upper: Must be a numeric version number
See ``svnadmin help dump`` for details on the other arguments.
"""
cmd = [SVNADMIN, 'dump', '.']
if progress is None:
cmd.append('-q')
if lower is not None:
cmd.append('-r')
if upper is None:
cmd.append(str(int(lower)))
else:
cmd.append('%d:%d' % (int(lower), int(upper)))
if incremental:
cmd.append('--incremental')
if deltas:
cmd.append('--deltas')
p = subprocess.Popen(cmd, cwd=self.path, stdout=stream, stderr=progress)
p.wait()
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd) | 0.003549 |
def wait_until_invisibility_of(self, locator, timeout=None):
"""
Waits for an element to be invisible
@type locator: webdriverwrapper.support.locator.Locator
@param locator: the locator or css string to search for the element
@type timeout: int
@param timeout: the maximum number of seconds the driver will wait before timing out
@rtype: webdriverwrapper.WebElementWrapper
@return: Returns the element found
"""
timeout = timeout if timeout is not None else self.timeout
def wait():
'''
Wait function passed to executor
'''
element = WebDriverWait(self.driver, timeout).until(EC.invisibility_of_element_located(
(self.locator_handler.parse_locator(locator).By, self.locator_handler.parse_locator(locator).value)))
return WebElementWrapper.WebElementWrapper(self, locator, element)
return self.execute_and_handle_webdriver_exceptions(
wait, timeout, locator, 'Timeout waiting for element to be invisible') | 0.005329 |
def validate(self):
'''Validate all the entries in the environment cache.'''
for env in list(self):
if not env.exists:
self.remove(env) | 0.011111 |
def clear(self):
"""Clear all work items from the session.
This removes any associated results as well.
"""
with self._conn:
self._conn.execute('DELETE FROM results')
self._conn.execute('DELETE FROM work_items') | 0.007463 |
def exec_stmt_handle(self, tokens):
"""Process Python-3-style exec statements."""
internal_assert(1 <= len(tokens) <= 3, "invalid exec statement tokens", tokens)
if self.target.startswith("2"):
out = "exec " + tokens[0]
if len(tokens) > 1:
out += " in " + ", ".join(tokens[1:])
return out
else:
return "exec(" + ", ".join(tokens) + ")" | 0.006961 |
def print_build_info(zipped_pex=False):
"""Print build_info from release.yaml
:param zipped_pex: True if the PEX file is built with flag `zip_safe=False'.
"""
if zipped_pex:
release_file = get_zipped_heron_release_file()
else:
release_file = get_heron_release_file()
with open(release_file) as release_info:
release_map = yaml.load(release_info)
release_items = sorted(release_map.items(), key=lambda tup: tup[0])
for key, value in release_items:
print("%s : %s" % (key, value)) | 0.011583 |
def foreach(self, argv, func):
"""Apply the function to each index named in the argument vector."""
opts = cmdline(argv)
if len(opts.args) == 0:
error("Command requires an index name", 2)
for name in opts.args:
if name not in self.service.indexes:
error("Index '%s' does not exist" % name, 2)
index = self.service.indexes[name]
func(index) | 0.004598 |
def get_completed_tasks(self):
"""Return a list of all completed tasks in this project.
:return: A list of all completed tasks in this project.
:rtype: list of :class:`pytodoist.todoist.Task`
>>> from pytodoist import todoist
>>> user = todoist.login('[email protected]', 'password')
>>> project = user.get_project('PyTodoist')
>>> task = project.add_task('Install PyTodoist')
>>> task.complete()
>>> completed_tasks = project.get_completed_tasks()
>>> for task in completed_tasks:
... task.uncomplete()
"""
self.owner.sync()
tasks = []
offset = 0
while True:
response = API.get_all_completed_tasks(self.owner.api_token,
limit=_PAGE_LIMIT,
offset=offset,
project_id=self.id)
_fail_if_contains_errors(response)
response_json = response.json()
tasks_json = response_json['items']
if len(tasks_json) == 0:
break # There are no more completed tasks to retreive.
for task_json in tasks_json:
project = self.owner.projects[task_json['project_id']]
tasks.append(Task(task_json, project))
offset += _PAGE_LIMIT
return tasks | 0.001386 |
def _get_markobj(self, x, y, marktype, marksize, markcolor, markwidth):
"""Generate canvas object for given mark parameters."""
if marktype == 'circle':
obj = self.dc.Circle(
x=x, y=y, radius=marksize, color=markcolor, linewidth=markwidth)
elif marktype in ('cross', 'plus'):
obj = self.dc.Point(
x=x, y=y, radius=marksize, color=markcolor, linewidth=markwidth,
style=marktype)
elif marktype == 'box':
obj = self.dc.Box(
x=x, y=y, xradius=marksize, yradius=marksize, color=markcolor,
linewidth=markwidth)
else: # point, marksize
obj = self.dc.Box(
x=x, y=y, xradius=1, yradius=1, color=markcolor,
linewidth=markwidth, fill=True, fillcolor=markcolor)
return obj | 0.004598 |
def wrap_name_from_git(prefix, suffix, *args, **kwargs):
"""
wraps the result of make_name_from_git in a suffix and postfix
adding separators for each.
see docstring for make_name_from_git for a full list of parameters
"""
# 64 is maximum length allowed by OpenShift
# 2 is the number of dashes that will be added
prefix = ''.join(filter(VALID_BUILD_CONFIG_NAME_CHARS.match, list(prefix)))
suffix = ''.join(filter(VALID_BUILD_CONFIG_NAME_CHARS.match, list(suffix)))
kwargs['limit'] = kwargs.get('limit', 64) - len(prefix) - len(suffix) - 2
name_from_git = make_name_from_git(*args, **kwargs)
return '-'.join([prefix, name_from_git, suffix]) | 0.001451 |
def writeElement(self, data):
"""
Encodes C{data} to AMF. If the data is not able to be matched to an AMF
type, then L{pyamf.EncodeError} will be raised.
"""
key = type(data)
func = None
try:
func = self._func_cache[key]
except KeyError:
func = self.getTypeFunc(data)
if func is None:
raise pyamf.EncodeError('Unable to encode %r (type %r)' % (
data, key))
self._func_cache[key] = func
func(data) | 0.003597 |
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed | 0.004228 |
def _get_request_mode_info(interface):
'''
return requestmode for given interface
'''
settings = _load_config(interface, ['linklocalenabled', 'dhcpenabled'], -1)
link_local_enabled = int(settings['linklocalenabled'])
dhcp_enabled = int(settings['dhcpenabled'])
if dhcp_enabled == 1:
return 'dhcp_linklocal' if link_local_enabled == 1 else 'dhcp_only'
else:
if link_local_enabled == 1:
return 'linklocal_only'
if link_local_enabled == 0:
return 'static'
# some versions of nirtcfg don't set the dhcpenabled/linklocalenabled variables
# when selecting "DHCP or Link Local" from MAX, so return it by default to avoid
# having the requestmode "None" because none of the conditions above matched.
return 'dhcp_linklocal' | 0.00492 |
def _verify_connection(self):
"""
Checks availability of the Alooma server
:return: If the server is reachable, returns True
:raises: If connection fails, raises exceptions.ConnectionFailed
"""
try:
res = self._session.get(self._connection_validation_url, json={})
logger.debug(consts.LOG_MSG_VERIFYING_CONNECTION,
self._connection_validation_url,
res if res else 'No result from backend')
if not res.ok:
raise requests.exceptions.RequestException(res.content)
remote_batch_size = res.json().get(consts.MAX_REQUEST_SIZE_FIELD,
consts.DEFAULT_BATCH_SIZE)
if remote_batch_size < self._batch_max_size:
self._batch_max_size = remote_batch_size
self._notify(logging.INFO,
consts.LOG_MSG_NEW_BATCH_SIZE % remote_batch_size)
self._is_connected.set()
return True
except requests.exceptions.RequestException as ex:
msg = consts.LOG_MSG_CONNECTION_FAILED % str(ex)
self._notify(logging.ERROR, msg)
raise exceptions.ConnectionFailed(msg) | 0.001566 |
def parse_exchange_file(path, default_compartment):
"""Parse a file as a list of exchange compounds with flux limits.
The file format is detected and the file is parsed accordingly. Path can
be given as a string or a context.
"""
context = FilePathContext(path)
format = resolve_format(None, context.filepath)
if format == 'tsv':
logger.debug('Parsing exchange file {} as TSV'.format(
context.filepath))
with context.open('r') as f:
for entry in parse_exchange_table_file(f):
yield entry
elif format == 'yaml':
logger.debug('Parsing exchange file {} as YAML'.format(
context.filepath))
with context.open('r') as f:
for entry in parse_exchange_yaml_file(
context, f, default_compartment):
yield entry
else:
raise ParseError('Unable to detect format of exchange file {}'.format(
context.filepath)) | 0.001014 |
def setup_logging(config, D=None):
""" set up the logging system with the configured (in pyemma.cfg) logging config (logging.yml)
@param config: instance of pyemma.config module (wrapper)
"""
if not D:
import yaml
args = config.logging_config
default = False
if args.upper() == 'DEFAULT':
default = True
src = config.default_logging_file
else:
src = args
# first try to read configured file
try:
with open(src) as f:
D = yaml.load(f)
except EnvironmentError as ee:
# fall back to default
if not default:
try:
with open(config.default_logging_file) as f2:
D = yaml.load(f2)
except EnvironmentError as ee2:
raise LoggingConfigurationError('Could not read either configured nor '
'default logging configuration!\n%s' % ee2)
else:
raise LoggingConfigurationError('could not handle default logging '
'configuration file\n%s' % ee)
if D is None:
raise LoggingConfigurationError('Empty logging config! Try using default config by'
' setting logging_conf=DEFAULT in pyemma.cfg')
assert D
# this has not been set in PyEMMA version prior 2.0.2+
D.setdefault('version', 1)
# if the user has not explicitly disabled other loggers, we (contrary to Pythons
# default value) do not want to override them.
D.setdefault('disable_existing_loggers', False)
# configure using the dict
try:
dictConfig(D)
except ValueError as ve:
# issue with file handler?
if 'files' in str(ve) and 'rotating_files' in D['handlers']:
print("cfg dir", config.cfg_dir)
new_file = os.path.join(config.cfg_dir, 'pyemma.log')
warnings.warn("set logfile to %s, because there was"
" an error writing to the desired one" % new_file)
D['handlers']['rotating_files']['filename'] = new_file
else:
raise
dictConfig(D)
# get log file name of pyemmas root logger
logger = logging.getLogger('pyemma')
log_files = [getattr(h, 'baseFilename', None) for h in logger.handlers]
import atexit
@atexit.register
def clean_empty_log_files():
# gracefully shutdown logging system
logging.shutdown()
for f in log_files:
if f is not None and os.path.exists(f):
try:
if os.stat(f).st_size == 0:
os.remove(f)
except OSError as o:
print("during removal of empty logfiles there was a problem: ", o) | 0.003434 |
def get_bounds(pts):
"""Return the minimum point and maximum point bounding a
set of points."""
pts_t = np.asarray(pts).T
return np.asarray(([np.min(_pts) for _pts in pts_t],
[np.max(_pts) for _pts in pts_t])) | 0.004032 |
def is_accessable_by_others(filename):
"""Check if file is group or world accessable."""
mode = os.stat(filename)[stat.ST_MODE]
return mode & (stat.S_IRWXG | stat.S_IRWXO) | 0.005464 |
def marts(self):
"""List of available marts."""
if self._marts is None:
self._marts = self._fetch_marts()
return self._marts | 0.0125 |
def set_sequence_from_str(self, sequence):
"""
This is a convenience method to set the new QKeySequence of the
shortcut editor from a string.
"""
self._qsequences = [QKeySequence(s) for s in sequence.split(', ')]
self.update_warning() | 0.006944 |
def execute(self):
''' Begin capturing PCAPs and sending them to workbench '''
# Create a temporary directory
self.temp_dir = tempfile.mkdtemp()
os.chdir(self.temp_dir)
# Spin up the directory watcher
DirWatcher(self.temp_dir, self.file_created)
# Spin up tcpdump
self.subprocess_manager(self.tcpdump_cmd) | 0.005376 |
def __dict_to_deployment_spec(spec):
'''
Converts a dictionary into kubernetes AppsV1beta1DeploymentSpec instance.
'''
spec_obj = AppsV1beta1DeploymentSpec(template=spec.get('template', ''))
for key, value in iteritems(spec):
if hasattr(spec_obj, key):
setattr(spec_obj, key, value)
return spec_obj | 0.002915 |
def normalize_date_aggressively(date):
"""Normalize date, stripping date parts until a valid date is obtained."""
def _strip_last_part(date):
parts = date.split('-')
return '-'.join(parts[:-1])
fake_dates = {'0000', '9999'}
if date in fake_dates:
return None
try:
return normalize_date(date)
except ValueError:
if '-' not in date:
raise
else:
new_date = _strip_last_part(date)
return normalize_date_aggressively(new_date) | 0.001887 |
def check_successful_tx(web3: Web3, txid: str, timeout=180) -> Tuple[dict, dict]:
"""See if transaction went through (Solidity code did not throw).
:return: Transaction receipt and transaction info
"""
receipt = wait_for_transaction_receipt(web3=web3, txid=txid, timeout=timeout)
txinfo = web3.eth.getTransaction(txid)
if 'status' not in receipt:
raise KeyError(
'A transaction receipt does not contain the "status" field. '
'Does your chain have Byzantium rules enabled?',
)
if receipt['status'] == 0:
raise ValueError(f'Status 0 indicates failure')
if txinfo['gas'] == receipt['gasUsed']:
raise ValueError(f'Gas is completely used ({txinfo["gas"]}). Failure?')
return (receipt, txinfo) | 0.003851 |
def _set_datapath(self, datapath):
""" Set a datapath.
"""
if datapath:
self._datapath = datapath.rstrip(os.sep)
self._fifo = int(stat.S_ISFIFO(os.stat(self.datapath).st_mode))
else:
self._datapath = None
self._fifo = False | 0.006601 |
def split_values(ustring, sep=u','):
"""
Splits unicode string with separator C{sep},
but skips escaped separator.
@param ustring: string to split
@type ustring: C{unicode}
@param sep: separator (default to u',')
@type sep: C{unicode}
@return: tuple of splitted elements
"""
assert isinstance(ustring, six.text_type), "uvalue must be unicode, not %s" % type(ustring)
# unicode have special mark symbol 0xffff which cannot be used in a regular text,
# so we use it to mark a place where escaped column was
ustring_marked = ustring.replace(u'\,', u'\uffff')
items = tuple([i.strip().replace(u'\uffff', u',') for i in ustring_marked.split(sep)])
return items | 0.010959 |
def prepare_topoplots(topo, values):
"""Prepare multiple topo maps for cached plotting.
.. note:: Parameter `topo` is modified by the function by calling :func:`~eegtopo.topoplot.Topoplot.set_values`.
Parameters
----------
topo : :class:`~eegtopo.topoplot.Topoplot`
Scalp maps are created with this class
values : array, shape = [n_topos, n_channels]
Channel values for each topo plot
Returns
-------
topomaps : list of array
The map for each topo plot
"""
values = np.atleast_2d(values)
topomaps = []
for i in range(values.shape[0]):
topo.set_values(values[i, :])
topo.create_map()
topomaps.append(topo.get_map())
return topomaps | 0.002703 |
def guest_get_power_state(self, userid):
"""Returns power state."""
action = "get power state of guest '%s'" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
return self._vmops.get_power_state(userid) | 0.008032 |
def next(self, *args):
"""yield from .next()"""
self.initialize()
self.future = asyncio.Future(loop=self.loop)
self.handle = self.loop.call_at(self.get_next(), self.call_func, *args)
return self.future | 0.008299 |
def select(self, timeout=None):
"""
Wait until one or more of the registered file objects becomes ready
or until the timeout expires.
:param timeout:
maximum wait time, in seconds (see below for special meaning)
:returns:
A list of pairs (key, events) for each ready file object.
Note that the list may be empty if non-blocking behavior is
selected or if the blocking wait is interrupted by a signal.
The timeout argument has two additional special cases:
1) If timeout is None then the call will block indefinitely
2) If timeout <= 0 the call will never block
"""
if timeout is None:
epoll_timeout = -1
elif timeout <= 0:
epoll_timeout = 0
else:
epoll_timeout = timeout
max_events = len(self._fd_map) or -1
result = []
for fd, epoll_events in self._epoll.poll(epoll_timeout, max_events):
key = self._fd_map.get(fd)
events = _EpollSelectorEvents.from_epoll_events(epoll_events)
events &= key.events
if key:
result.append((key, _EpollSelectorEvents(events)))
return result | 0.001589 |
def get_item_abspath(self, identifier):
"""Return absolute path at which item content can be accessed.
:param identifier: item identifier
:returns: absolute path from which the item content can be accessed
"""
admin_metadata = self.get_admin_metadata()
uuid = admin_metadata["uuid"]
# Create directory for the specific dataset.
dataset_cache_abspath = os.path.join(self._s3_cache_abspath, uuid)
mkdir_parents(dataset_cache_abspath)
bucket_fpath = self.data_key_prefix + identifier
obj = self.s3resource.Object(self.bucket, bucket_fpath)
relpath = obj.get()['Metadata']['handle']
_, ext = os.path.splitext(relpath)
local_item_abspath = os.path.join(
dataset_cache_abspath,
identifier + ext
)
if not os.path.isfile(local_item_abspath):
tmp_local_item_abspath = local_item_abspath + ".tmp"
self.s3resource.Bucket(self.bucket).download_file(
bucket_fpath,
tmp_local_item_abspath
)
os.rename(tmp_local_item_abspath, local_item_abspath)
return local_item_abspath | 0.001665 |
def graphcut_subprocesses(graphcut_function, graphcut_arguments, processes = None):
"""
Executes multiple graph cuts in parallel.
This can result in a significant speed-up.
Parameters
----------
graphcut_function : function
The graph cut to use (e.g. `graphcut_stawiaski`).
graphcut_arguments : tuple
List of arguments to pass to the respective subprocesses resp. the ``graphcut_function``.
processes : integer or None
The number of processes to run simultaneously, if not supplied, will be the same
as the number of processors.
Returns
-------
segmentations : tuple of ndarray
The graph-cut segmentation results as list of boolean arraya.
"""
# initialize logger
logger = Logger.getInstance()
# check and eventually enhance input parameters
if not processes: processes = multiprocessing.cpu_count()
if not int == type(processes) or processes <= 0: raise ArgumentError('The number processes can not be zero or negative.')
logger.debug('Executing graph cuts in {} subprocesses.'.format(multiprocessing.cpu_count()))
# creates subprocess pool and execute
pool = multiprocessing.Pool(processes)
results = pool.map(graphcut_function, graphcut_arguments)
return results | 0.012066 |
def init_argparser_loaderplugin_registry(
self, argparser, default=None, help=(
'the name of the registry to use for the handling of loader '
'plugins that may be loaded from the given Python packages'
)):
"""
Default helper for setting up the loaderplugin registries flags.
Note that this is NOT part of the init_argparser due to
implementation specific requirements. Subclasses should
consider modifying the default value help message to cater to the
toolchain it encapsulates.
"""
argparser.add_argument(
'--loaderplugin-registry', default=default,
dest=CALMJS_LOADERPLUGIN_REGISTRY_NAME, action='store',
metavar=metavar('registry'),
help=help,
) | 0.002415 |
def method(self, symbol):
'''
Symbol decorator.
'''
assert issubclass(symbol, SymbolBase)
def wrapped(fn):
setattr(symbol, fn.__name__, fn)
return wrapped | 0.014019 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.