text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_stack_refs(refs: list): # copy pasted from Senza
"""
Returns a list of stack references with name and version.
"""
refs = list(refs)
refs.reverse()
stack_refs = []
last_stack = None
while refs:
ref = refs.pop()
if last_stack is not None and re.compile(r'v[0-9][a-zA-Z0-9-]*$').match(ref):
stack_refs.append(StackReference(last_stack, ref))
else:
try:
with open(ref) as fd:
data = yaml.safe_load(fd)
ref = data['SenzaInfo']['StackName']
except (OSError, IOError):
# It's still possible that the ref is a regex
pass
if refs:
version = refs.pop()
else:
version = None
stack_refs.append(StackReference(ref, version))
last_stack = ref
return stack_refs | 0.002181 |
def user_in_all_groups(user, groups):
"""Returns True if the given user is in all given groups"""
return user_is_superuser(user) or all(user_in_group(user, group) for group in groups) | 0.010471 |
def calculate_chunks(self):
"""
Work out the number of chunks the data is in, for cases
where the meta data doesn't change at all so there is no
lead in.
Also increments the number of values for objects in this
segment, based on the number of chunks.
"""
if self.toc['kTocDAQmxRawData']:
# chunks defined differently for DAQmxRawData format
try:
data_size = next(
o.number_values * o.raw_data_width
for o in self.ordered_objects
if o.has_data and o.number_values * o.raw_data_width > 0)
except StopIteration:
data_size = 0
else:
data_size = sum([
o.data_size
for o in self.ordered_objects if o.has_data])
total_data_size = self.next_segment_offset - self.raw_data_offset
if data_size < 0 or total_data_size < 0:
raise ValueError("Negative data size")
elif data_size == 0:
# Sometimes kTocRawData is set, but there isn't actually any data
if total_data_size != data_size:
raise ValueError(
"Zero channel data size but data length based on "
"segment offset is %d." % total_data_size)
self.num_chunks = 0
return
chunk_remainder = total_data_size % data_size
if chunk_remainder == 0:
self.num_chunks = int(total_data_size // data_size)
# Update data count for the overall tdms object
# using the data count for this segment.
for obj in self.ordered_objects:
if obj.has_data:
obj.tdms_object.number_values += (
obj.number_values * self.num_chunks)
else:
log.warning(
"Data size %d is not a multiple of the "
"chunk size %d. Will attempt to read last chunk" %
(total_data_size, data_size))
self.num_chunks = 1 + int(total_data_size // data_size)
self.final_chunk_proportion = (
float(chunk_remainder) / float(data_size))
for obj in self.ordered_objects:
if obj.has_data:
obj.tdms_object.number_values += (
obj.number_values * (self.num_chunks - 1) + int(
obj.number_values * self.final_chunk_proportion)) | 0.000793 |
def _clear_timeouts(self, cache_timeout):
"""
Clear the cache of timed out results.
"""
for key in list(self.timeouts):
if timer() - self.timeouts[key] > cache_timeout:
del self.timeouts[key]
del self.cache[key] | 0.006944 |
def QA_fetch_future_day_adv(
code,
start, end=None,
if_drop_index=True,
# 🛠 todo collections 参数没有用到, 且数据库是固定的, 这个变量后期去掉
collections=DATABASE.index_day):
'''
:param code: code: 字符串str eg 600085
:param start: 字符串str 开始日期 eg 2011-01-01
:param end: 字符串str 结束日期 eg 2011-05-01
:param if_drop_index: Ture False , dataframe drop index or not
:param collections: mongodb 数据库
:return:
'''
'获取期货日线'
end = start if end is None else end
start = str(start)[0:10]
end = str(end)[0:10]
# 🛠 todo 报告错误 如果开始时间 在 结束时间之后
# 🛠 todo 如果相等
res = QA_fetch_future_day(code, start, end, format='pd')
if res is None:
print("QA Error QA_fetch_future_day_adv parameter code=%s start=%s end=%s call QA_fetch_future_day return None" % (
code, start, end))
else:
res_set_index = res.set_index(['date', 'code'])
# if res_set_index is None:
# print("QA Error QA_fetch_index_day_adv set index 'date, code' return None")
# return None
return QA_DataStruct_Future_day(res_set_index) | 0.002669 |
def NAND(*args, **kwargs):
"""
ALL args must raise an exception when called overall.
Raise the specified exception on failure OR the first exception.
:params iterable[Certifier] args:
The certifiers to call
:param callable kwargs['exc']:
Callable that excepts the unexpectedly raised exception as argument and return an
exception to raise.
"""
errors = []
for arg in args:
try:
arg()
except CertifierError as e:
errors.append(e)
if (len(errors) != len(args)) and len(args) > 1:
exc = kwargs.get(
'exc',
CertifierValueError('Expecting no certified values'),
)
if exc is not None:
raise exc | 0.002663 |
def count_variants_function_builder(function_name, filterable_variant_function=None):
"""
Creates a function that counts variants that are filtered by the provided filterable_variant_function.
The filterable_variant_function is a function that takes a filterable_variant and returns True or False.
Users of this builder need not worry about applying e.g. the Cohort's default `filter_fn`. That will be applied as well.
"""
@count_function
def count(row, cohort, filter_fn, normalized_per_mb, **kwargs):
def count_filter_fn(filterable_variant, **kwargs):
assert filter_fn is not None, "filter_fn should never be None, but it is."
return ((filterable_variant_function(filterable_variant) if filterable_variant_function is not None else True) and
filter_fn(filterable_variant, **kwargs))
patient_id = row["patient_id"]
return cohort.load_variants(
patients=[cohort.patient_from_id(patient_id)],
filter_fn=count_filter_fn,
**kwargs)
count.__name__ = function_name
count.__doc__ = str("".join(inspect.getsourcelines(filterable_variant_function)[0])) if filterable_variant_function is not None else ""
return count | 0.006385 |
def get_dip(self):
"""
Return the fault dip as the average dip over the mesh.
The average dip is defined as the weighted mean inclination
of all the mesh cells. See
:meth:`openquake.hazardlib.geo.mesh.RectangularMesh.get_mean_inclination_and_azimuth`
:returns:
The average dip, in decimal degrees.
"""
# uses the same approach as in simple fault surface
if self.dip is None:
mesh = self.mesh
self.dip, self.strike = mesh.get_mean_inclination_and_azimuth()
return self.dip | 0.003396 |
async def punsubscribe(self, *args):
"""
Unsubscribe from the supplied patterns. If empy, unsubscribe from
all patterns.
"""
if args:
args = list_or_args(args[0], args[1:])
return await self.execute_command('PUNSUBSCRIBE', *args) | 0.00692 |
def loadData(cls):
""" Sigh, this was indeed a poorly conceived approach
since it hard blocks when the files are not in the source
so you can't easily bootstrap from another source and the
cognitive overhead is way, way too high :/
Adding dry_run/bootstrap to __new__ sort of helps? """
""" Have to run this out here because resSource is handicapped """
data = []
if cls.source_images.exists():
for folder in cls.source_images.glob('*'):
plate_num = int(folder.stem)
text_file = cls.source / f'{plate_num}.txt'
if not text_file.exists() or cls.run_ocr:
legends = []
raw_text = ''
for img in folder.glob('*.png'):
print('num', plate_num, img.stem)
p = subprocess.Popen(('tesseract',
img.as_posix(),
'stdout', '-l', 'eng', '--oem', '2', '--psm', '6'),
stdout=subprocess.PIPE)
bytes_text, err = p.communicate()
raw_text += bytes_text.decode() + '\n'
with open(text_file, 'wt') as f:
f.write(raw_text)
else:
with open(text_file, 'rt') as f:
raw_text = f.read()
legends = get_legends(raw_text)
data.append((plate_num, legends))
elif cls.source.exists():
for text_file in cls.source.glob('*.txt'):
plate_num = int(text_file.stem)
with open(text_file, 'rt') as f:
raw_text = f.read()
legends = get_legends(raw_text)
data.append((plate_num, legends))
return data | 0.003629 |
def msg_curse(self, args=None, max_width=None):
"""Return the dict to display in the curse interface."""
# Init the return message
ret = []
# Only process if stats exist, not empty (issue #871) and plugin not disabled
if not self.stats or (self.stats == {}) or self.is_disable():
return ret
# Build the string message
# Header
msg = '{:8}'.format('LOAD')
ret.append(self.curse_add_line(msg, "TITLE"))
# Core number
if 'cpucore' in self.stats and self.stats['cpucore'] > 0:
msg = '{}-core'.format(int(self.stats['cpucore']))
ret.append(self.curse_add_line(msg))
# New line
ret.append(self.curse_new_line())
# 1min load
msg = '{:8}'.format('1 min:')
ret.append(self.curse_add_line(msg))
msg = '{:>6.2f}'.format(self.stats['min1'])
ret.append(self.curse_add_line(msg))
# New line
ret.append(self.curse_new_line())
# 5min load
msg = '{:8}'.format('5 min:')
ret.append(self.curse_add_line(msg))
msg = '{:>6.2f}'.format(self.stats['min5'])
ret.append(self.curse_add_line(
msg, self.get_views(key='min5', option='decoration')))
# New line
ret.append(self.curse_new_line())
# 15min load
msg = '{:8}'.format('15 min:')
ret.append(self.curse_add_line(msg))
msg = '{:>6.2f}'.format(self.stats['min15'])
ret.append(self.curse_add_line(
msg, self.get_views(key='min15', option='decoration')))
return ret | 0.001855 |
def current_url_name(context):
"""
Returns the name of the current URL, namespaced, or False.
Example usage:
{% current_url_name as url_name %}
<a href="#"{% if url_name == 'myapp:home' %} class="active"{% endif %}">Home</a>
"""
url_name = False
if context.request.resolver_match:
url_name = "{}:{}".format(
context.request.resolver_match.namespace,
context.request.resolver_match.url_name
)
return url_name | 0.003604 |
def copy(self):
"""Copy a reaction
The referenced metabolites and genes are also copied.
"""
# no references to model when copying
model = self._model
self._model = None
for i in self._metabolites:
i._model = None
for i in self._genes:
i._model = None
# now we can copy
new_reaction = deepcopy(self)
# restore the references
self._model = model
for i in self._metabolites:
i._model = model
for i in self._genes:
i._model = model
return new_reaction | 0.003236 |
def AddEntry(self, thing, label=None, style=None):
"""
Add an entry to the legend.
If `label` is None, `thing.GetTitle()` will be used as the label.
If `style` is None, `thing.legendstyle` is used if present,
otherwise `P`.
"""
if isinstance(thing, HistStack):
things = thing
else:
things = [thing]
for thing in things:
if getattr(thing, 'inlegend', True):
thing_label = thing.GetTitle() if label is None else label
thing_style = getattr(thing, 'legendstyle', 'P') if style is None else style
super(Legend, self).AddEntry(thing, thing_label, thing_style)
keepalive(self, thing) | 0.003995 |
def saveAsJSON(self, fp):
"""
Write the records out as JSON. The first JSON object saved contains
the BLAST parameters.
@param fp: A C{str} file pointer to write to.
"""
first = True
for record in self.records():
if first:
print(dumps(self.params, separators=(',', ':')), file=fp)
first = False
print(dumps(self._convertBlastRecordToDict(record),
separators=(',', ':')), file=fp) | 0.003876 |
def checkJobGraphAcylic(self):
"""
:raises toil.job.JobGraphDeadlockException: if the connected component \
of jobs containing this job contains any cycles of child/followOn dependencies \
in the *augmented job graph* (see below). Such cycles are not allowed \
in valid job graphs.
A follow-on edge (A, B) between two jobs A and B is equivalent \
to adding a child edge to B from (1) A, (2) from each child of A, \
and (3) from the successors of each child of A. We call each such edge \
an edge an "implied" edge. The augmented job graph is a job graph including \
all the implied edges.
For a job graph G = (V, E) the algorithm is ``O(|V|^2)``. It is ``O(|V| + |E|)`` for \
a graph with no follow-ons. The former follow-on case could be improved!
"""
#Get the root jobs
roots = self.getRootJobs()
if len(roots) == 0:
raise JobGraphDeadlockException("Graph contains no root jobs due to cycles")
#Get implied edges
extraEdges = self._getImpliedEdges(roots)
#Check for directed cycles in the augmented graph
visited = set()
for root in roots:
root._checkJobGraphAcylicDFS([], visited, extraEdges) | 0.009295 |
def write_collection_from_tmpfile(self, collection_id, tmpfi, parent_sha, auth_info, commit_msg=''):
"""Given a collection_id, temporary filename of content, branch and auth_info
"""
return self.write_doc_from_tmpfile(collection_id,
tmpfi,
parent_sha,
auth_info,
commit_msg,
doctype_display_name="collection") | 0.007299 |
def score(self, eval_data, eval_metric, num_batch=None, batch_end_callback=None,
score_end_callback=None,
reset=True, epoch=0, sparse_row_id_fn=None):
"""Runs prediction on ``eval_data`` and evaluates the performance according to
the given ``eval_metric``.
Checkout `Module Tutorial <http://mxnet.io/tutorials/basic/module.html>`_ to see
a end-to-end use-case.
Parameters
----------
eval_data : DataIter
Evaluation data to run prediction on.
eval_metric : EvalMetric or list of EvalMetrics
Evaluation metric to use.
num_batch : int
Number of batches to run. Defaults to ``None``, indicating run until the `DataIter`
finishes.
batch_end_callback : function
Could also be a list of functions.
reset : bool
Defaults to ``True``. Indicates whether we should reset `eval_data` before starting
evaluating.
epoch : int
Defaults to 0. For compatibility, this will be passed to callbacks (if any).
During training, this will correspond to the training epoch number.
sparse_row_id_fn : A callback function
The function takes `data_batch` as an input and returns a dict of
str -> NDArray. The resulting dict is used for pulling row_sparse
parameters from the kvstore, where the str key is the name of the param,
and the value is the row id of the param to pull.
Examples
--------
>>> # An example of using score for prediction.
>>> # Evaluate accuracy on val_dataiter
>>> metric = mx.metric.Accuracy()
>>> mod.score(val_dataiter, metric)
>>> mod.score(val_dataiter, ['mse', 'acc'])
"""
assert self.binded and self.params_initialized
if reset:
eval_data.reset()
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
eval_metric.reset()
actual_num_batch = 0
for nbatch, eval_batch in enumerate(eval_data):
if num_batch is not None and nbatch == num_batch:
break
self.prepare(eval_batch, sparse_row_id_fn=sparse_row_id_fn)
self.forward(eval_batch, is_train=False)
if isinstance(eval_batch, list):
self.update_metric(eval_metric, [eb.label for eb in eval_batch], pre_sliced=True)
else:
self.update_metric(eval_metric, eval_batch.label)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(batch_end_callback):
callback(batch_end_params)
actual_num_batch += 1
if score_end_callback:
params = BatchEndParam(epoch=epoch,
nbatch=actual_num_batch,
eval_metric=eval_metric,
locals=locals())
for callback in _as_list(score_end_callback):
callback(params)
return eval_metric.get_name_value() | 0.003485 |
def _initialise_classifier(self, comparison_vectors):
"""Set the centers of the clusters."""
# Set the start point of the classifier.
self.kernel.init = numpy.array(
[[0.05] * len(list(comparison_vectors)),
[0.95] * len(list(comparison_vectors))]) | 0.006734 |
def cmd_ls(self, *args):
"""ls [options]
Execute list files command
"""
cmd_str = ' '.join(['ls'] + list(args))
self.plugin.exec_shell(cmd_str) | 0.01087 |
def make_event(event: Callable) -> Callable:
"""Create an event from a method signature."""
@property # type: ignore
@wraps(event)
def actualevent(self): # pylint: disable=missing-docstring
name = event.__name__[3:]
try:
# the getter post processing function
# is preserved with an underscore
getter = event(self).__name__
except AttributeError:
getter = None
return Event(name, self._uuid, getter) # pylint: disable=protected-access
return actualevent | 0.003597 |
def OpenFile(self, filepath):
"""open()-replacement that automatically handles zip files.
This assumes there is at most one .zip in the file path.
Args:
filepath: the path to the file to open.
Returns:
An open file-like object.
"""
archive = False
if '.zip/' in filepath:
archive = True
archive_type = '.zip'
if '.par/' in filepath:
archive = True
archive_type = '.par'
if archive:
path, archived_file = filepath.split(archive_type)
path += archive_type
zip_file = zipfile.ZipFile(path)
return zip_file.open(archived_file.strip('/'))
return open(filepath) | 0.01372 |
def buffer_leave(self, filename):
"""User is changing of buffer."""
self.log.debug('buffer_leave: %s', filename)
# TODO: This is questionable, and we should use location list for
# single-file errors.
self.editor.clean_errors() | 0.007491 |
def artist(self):
"""
:class:`Artist` object of album's artist
"""
if not self._artist:
self._artist = Artist(self._artist_id, self._artist_name, self._connection)
return self._artist | 0.012766 |
def infer_and_cast(value: Any):
"""
In some cases we'll be feeding params dicts to functions we don't own;
for example, PyTorch optimizers. In that case we can't use ``pop_int``
or similar to force casts (which means you can't specify ``int`` parameters
using environment variables). This function takes something that looks JSON-like
and recursively casts things that look like (bool, int, float) to (bool, int, float).
"""
# pylint: disable=too-many-return-statements
if isinstance(value, (int, float, bool)):
# Already one of our desired types, so leave as is.
return value
elif isinstance(value, list):
# Recursively call on each list element.
return [infer_and_cast(item) for item in value]
elif isinstance(value, dict):
# Recursively call on each dict value.
return {key: infer_and_cast(item) for key, item in value.items()}
elif isinstance(value, str):
# If it looks like a bool, make it a bool.
if value.lower() == "true":
return True
elif value.lower() == "false":
return False
else:
# See if it could be an int.
try:
return int(value)
except ValueError:
pass
# See if it could be a float.
try:
return float(value)
except ValueError:
# Just return it as a string.
return value
else:
raise ValueError(f"cannot infer type of {value}") | 0.001927 |
def CreateDevice(self, device_address):
'''Create a new device '''
device_name = 'dev_' + device_address.replace(':', '_').upper()
adapter_path = self.path
path = adapter_path + '/' + device_name
if path not in mockobject.objects:
raise dbus.exceptions.DBusException(
'Could not create device for %s.' % device_address,
name='org.bluez.Error.Failed')
adapter = mockobject.objects[self.path]
adapter.EmitSignal(ADAPTER_IFACE, 'DeviceCreated',
'o', [dbus.ObjectPath(path, variant_level=1)])
return dbus.ObjectPath(path, variant_level=1) | 0.001603 |
def send(self, event_name, *args, **kwargs):
"""
Method,
Calls all callbacks registered for `event_name`. The arguments given
are passed to each callback.
:param event_name: The event name to call the callbacks for.
:param args: The positional arguments passed to the callbacks.
:param kwargs: The keyword arguments passed to the callbacks.
Example:
>>> callbacks = Callbacks()
>>> @callbacks.register("my_event")
... def hello(your_name):
... print("Hello %s, how are you today." % your_name)
...
>>> callbacks.call("my_event", "Wessie")
Hello Wessie, how are you today.
"""
for callback in self.callbacks[event_name]:
# Handle errors (and maybe return values)
callback(*args, **kwargs) | 0.002265 |
def validate(self, value):
"""
Accepts: str, unicode
Returns: list of tuples in the format (ip, port)
"""
val = super(SlavesValue, self).validate(value)
slaves = val.replace(" ", "")
slaves = filter(None, slaves.split(','))
slaves = [x.split(":") for x in slaves]
res = list()
for x in slaves:
self._validate_ip(x[0])
if len(x) == 1:
res.append((x[0], 53))
else:
res.append((x[0], int(x[1])))
return res | 0.003571 |
def process_literal_param(self, value: Optional[List[int]],
dialect: Dialect) -> str:
"""Convert things on the way from Python to the database."""
retval = self._intlist_to_dbstr(value)
return retval | 0.011858 |
def outdict(self, ndigits=3):
"""Return dictionary structure rounded to a given precision."""
output = self.__dict__.copy()
for item in output:
output[item] = round(output[item], ndigits)
return output | 0.00813 |
def sinterstore(self, destkey, key, *keys):
"""Intersect multiple sets and store the resulting set in a key."""
return self.execute(b'SINTERSTORE', destkey, key, *keys) | 0.01087 |
def _async_recv(self):
"""No raw bytes should escape from this, all byte encoding and
decoding should be handling inside this function"""
logging.info("Receive loop started")
recbuffer = b""
while not self._stop_event.is_set():
time.sleep(0.01)
try:
recbuffer = recbuffer + self._socket.recv(1024)
data = recbuffer.split(b'\r\n')
recbuffer = data.pop()
if data:
for line in data:
self._process_data(line.decode(encoding='UTF-8', errors='ignore'))
except BlockingIOError as e:
pass
logging.info("Receive loop stopped") | 0.004127 |
def reject(self, timeout=1):
"""
Check that the process survives for timeout. Useful for checking whether program is waiting on input.
:param timeout: number of seconds to wait
:type timeout: int / float
:raises check50.Failure: if process ends before ``timeout``
"""
log(_("checking that input was rejected..."))
try:
self._wait(timeout)
except Failure as e:
if not isinstance(e.__cause__, TIMEOUT):
raise
else:
raise Failure(_("expected program to reject input, but it did not"))
return self | 0.006299 |
def post(method, hmc, uri, uri_parms, body, logon_required,
wait_for_completion):
"""Operation: Load Logical Partition (requires classic mode)."""
assert wait_for_completion is True # async not supported yet
lpar_oid = uri_parms[0]
lpar_uri = '/api/logical-partitions/' + lpar_oid
try:
lpar = hmc.lookup_by_uri(lpar_uri)
except KeyError:
raise InvalidResourceError(method, uri)
cpc = lpar.manager.parent
assert not cpc.dpm_enabled
status = lpar.properties.get('status', None)
force = body.get('force', False) if body else False
clear_indicator = body.get('clear-indicator', True) if body else True
store_status_indicator = body.get('store-status-indicator',
False) if body else False
if status == 'not-activated':
raise ConflictError(method, uri, reason=0,
message="LPAR {!r} could not be loaded "
"because the LPAR is in status {}.".
format(lpar.name, status))
elif status == 'operating' and not force:
raise ServerError(method, uri, reason=263,
message="LPAR {!r} could not be loaded "
"because the LPAR is already loaded "
"(and force was not specified).".
format(lpar.name))
load_address = body.get('load-address', None) if body else None
if not load_address:
# Starting with z14, this parameter is optional and a last-used
# property is available.
load_address = lpar.properties.get('last-used-load-address', None)
if load_address is None:
# TODO: Verify actual error for this case on a z14.
raise BadRequestError(method, uri, reason=5,
message="LPAR {!r} could not be loaded "
"because a load address is not specified "
"in the request or in the Lpar last-used "
"property".
format(lpar.name))
load_parameter = body.get('load-parameter', None) if body else None
if not load_parameter:
# Starting with z14, a last-used property is available.
load_parameter = lpar.properties.get(
'last-used-load-parameter', None)
if load_parameter is None:
load_parameter = ''
# Reflect the load in the resource
if clear_indicator:
lpar.properties['memory'] = ''
if store_status_indicator:
lpar.properties['stored-status'] = status
else:
lpar.properties['stored-status'] = None
lpar.properties['status'] = LparLoadHandler.get_status()
lpar.properties['last-used-load-address'] = load_address
lpar.properties['last-used-load-parameter'] = load_parameter | 0.000968 |
def extract_log(log_path, dict_type=dict):
"""
Parses the log file generated by a launcher and returns
dictionary with tid keys and specification values.
Ordering can be maintained by setting dict_type to the
appropriate constructor (i.e. OrderedDict). Keys are converted
from unicode to strings for kwarg use.
"""
log_path = (log_path if os.path.isfile(log_path)
else os.path.join(os.getcwd(), log_path))
with open(log_path,'r') as log:
splits = (line.split() for line in log)
uzipped = ((int(split[0]), json.loads(" ".join(split[1:]))) for split in splits)
szipped = [(i, dict((str(k),v) for (k,v) in d.items())) for (i,d) in uzipped]
return dict_type(szipped) | 0.010013 |
def buffered_generator(source_gen, buffer_size=2, use_multiprocessing=False):
r"""
Generator that runs a slow source generator in a separate process.
My generate function still seems faster on test cases.
However, this function is more flexible in its compatability.
Args:
source_gen (iterable): slow generator
buffer_size (int): the maximal number of items to pre-generate
(length of the buffer) (default = 2)
use_multiprocessing (bool): if False uses GIL-hindered threading
instead of multiprocessing (defualt = False).
Note:
use_multiprocessing = True seems to freeze if passed in a generator
built by six.moves.map.
References:
Taken from Sander Dieleman's data augmentation pipeline
https://github.com/benanne/kaggle-ndsb/blob/11a66cdbddee16c69514b9530a727df0ac6e136f/buffering.py
CommandLine:
python -m utool.util_parallel --test-buffered_generator:0
python -m utool.util_parallel --test-buffered_generator:1
Ignore:
>>> #functime = timeit.timeit(
>>> # 'ut.is_prime(' + str(prime) + ')', setup='import utool as ut',
>>> # number=500) / 1000.0
Example:
>>> # DISABLE_DOCTEST
>>> # UNSTABLE_DOCTEST
>>> from utool.util_parallel import * # NOQA
>>> import utool as ut
>>> num = 2 ** 14
>>> func = ut.is_prime
>>> data = [38873] * num
>>> data = list(range(num))
>>> with ut.Timer('serial') as t1:
... result1 = list(map(func, data))
>>> with ut.Timer('ut.generate2') as t3:
... result3 = list(ut.generate2(func, zip(data), chunksize=2, quiet=1, verbose=0))
>>> with ut.Timer('ut.buffered_generator') as t2:
... result2 = list(ut.buffered_generator(map(func, data)))
>>> assert len(result1) == num and len(result2) == num and len(result3) == num
>>> assert result3 == result2, 'inconsistent results'
>>> assert result1 == result2, 'inconsistent results'
Example1:
>>> # DISABLE_DOCTEST
>>> # VERYSLLOOWWW_DOCTEST
>>> from utool.util_parallel import _test_buffered_generator
>>> _test_buffered_generator2()
"""
if buffer_size < 2:
raise RuntimeError("Minimal buffer_ size is 2!")
if use_multiprocessing:
print('WARNING seems to freeze if passed in a generator')
#assert False, 'dont use this buffered multiprocessing'
if False:
pool = multiprocessing.Pool(processes=get_default_numprocs(),
initializer=init_worker,
maxtasksperchild=None)
Process = pool.Process
else:
Process = multiprocessing.Process
_Queue = multiprocessing.Queue
target = _buffered_generation_process
else:
_Queue = queue.Queue
Process = KillableThread
target = _buffered_generation_thread
# the effective buffer_ size is one less, because the generation process
# will generate one extra element and block until there is room in the
# buffer_.
buffer_ = _Queue(maxsize=buffer_size - 1)
# previously None was used as a sentinal, which fails when source_gen
# genrates None need to make object that it will not be generated by the
# process. A reasonable hack is to use the StopIteration exception instead
sentinal = StopIteration
process = Process(
target=target,
args=(iter(source_gen), buffer_, sentinal)
)
#if not use_multiprocessing:
process.daemon = True
process.start()
while True:
#output = buffer_.get(timeout=1.0)
output = buffer_.get()
if output is sentinal:
raise StopIteration
yield output | 0.001037 |
def import_from_dict(session, data, sync=[]):
"""Imports databases and druid clusters from dictionary"""
if isinstance(data, dict):
logging.info('Importing %d %s',
len(data.get(DATABASES_KEY, [])),
DATABASES_KEY)
for database in data.get(DATABASES_KEY, []):
Database.import_from_dict(session, database, sync=sync)
logging.info('Importing %d %s',
len(data.get(DRUID_CLUSTERS_KEY, [])),
DRUID_CLUSTERS_KEY)
for datasource in data.get(DRUID_CLUSTERS_KEY, []):
DruidCluster.import_from_dict(session, datasource, sync=sync)
session.commit()
else:
logging.info('Supplied object is not a dictionary.') | 0.001311 |
def _pprint(self, element, dim_label, vals):
"""Helper function to convert values to corresponding dimension type.
"""
if vals.dtype.kind not in 'SU':
dim = element.gridded.get_dimension(dim_label)
return [dim.pprint_value(v) for v in vals]
return vals | 0.006431 |
def do_pack(self):
"""! @brief Handle 'pack' subcommand."""
verbosity = self._args.verbose - self._args.quiet
cache = cmsis_pack_manager.Cache(verbosity < 0, False)
if self._args.clean:
LOG.info("Removing all pack data...")
cache.cache_clean()
if self._args.update:
LOG.info("Updating pack index...")
cache.cache_descriptors()
if self._args.show:
packs = pack_target.ManagedPacks.get_installed_packs()
pt = self._get_pretty_table(["Vendor", "Pack", "Version"])
for ref in packs:
pt.add_row([
ref.vendor,
ref.pack,
ref.version,
])
print(pt)
if self._args.find_devices or self._args.install_devices:
if not cache.index:
LOG.info("No pack index present, downloading now...")
cache.cache_descriptors()
patterns = self._args.find_devices or self._args.install_devices
# Find matching part numbers.
matches = set()
for pattern in patterns:
# Using fnmatch.fnmatch() was failing to match correctly.
pat = re.compile(fnmatch.translate(pattern + "*"), re.IGNORECASE)
results = {name for name in cache.index.keys() if pat.match(name)}
matches.update(results)
if not matches:
LOG.warning("No matching devices. Please make sure the pack index is up to date.")
return
if self._args.find_devices:
pt = self._get_pretty_table(["Part", "Vendor", "Pack", "Version"])
for name in sorted(matches):
info = cache.index[name]
ref, = cache.packs_for_devices([info])
pt.add_row([
info['name'],
ref.vendor,
ref.pack,
ref.version,
])
print(pt)
elif self._args.install_devices:
devices = [cache.index[dev] for dev in matches]
packs = cache.packs_for_devices(devices)
if not self._args.no_download:
print("Downloading packs (press Control-C to cancel):")
else:
print("Would download packs:")
for pack in packs:
print(" " + str(pack))
if not self._args.no_download:
cache.download_pack_list(packs) | 0.004645 |
def post(self, path, data=None, json=None, headers=None, **kwargs):
"""
Sends a POST request to host/path.
:param path: String, resource path on server
:param data: Dictionary, bytes or file-like object to send in the body of the request
:param json: JSON formatted data to send in the body of the request
:param headers: Dictionary of HTTP headers to be sent with the request,
overwrites default headers if there is overlap
:param kwargs: Other arguments used in the requests.request call
valid parameters in kwargs are the optional parameters of Requests.Request
http://docs.python-requests.org/en/master/api/
:return: requests.Response
:raises: RequestException
"""
if headers is not None:
merger = jsonmerge.Merger(SCHEMA)
kwargs["headers"] = merger.merge(self.defaultHeaders, headers)
else:
kwargs["headers"] = self.defaultHeaders
url = combine_urls(self.host, path)
if self.cert is not None:
kwargs["cert"] = self.cert
self.logger.debug("Trying to send HTTP POST to {}".format(url))
try:
resp = requests.post(url, data, json, **kwargs)
self._log_response(resp)
except requests.RequestException as es:
self._log_exception(es)
raise
return resp | 0.002823 |
def _add_group_remover(self):
"""
Add a ``_remove_eg_x()`` method to the element class for this choice
group.
"""
def _remove_choice_group(obj):
for tagname in self._member_nsptagnames:
obj.remove_all(tagname)
_remove_choice_group.__doc__ = (
'Remove the current choice group child element if present.'
)
self._add_to_class(
self._remove_choice_group_method_name, _remove_choice_group
) | 0.003914 |
def _abilities(self, fn=None):
"""Return the list of abilities filtered by `fn`."""
out = {}
for cmd in self._obs.observation.abilities:
ability = _Ability(cmd, self._static_data.abilities)
if not fn or fn(ability):
out[ability.ability_id] = ability
return list(out.values()) | 0.009646 |
def draw_edges(self):
"""
Renders edges to the figure.
"""
for i, (start, end) in enumerate(self.graph.edges()):
start_theta = node_theta(self.nodes, start)
end_theta = node_theta(self.nodes, end)
verts = [
get_cartesian(self.plot_radius, start_theta),
(0, 0),
get_cartesian(self.plot_radius, end_theta),
]
color = self.edge_colors[i]
codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
lw = self.edge_widths[i]
path = Path(verts, codes)
patch = patches.PathPatch(
path, lw=lw, edgecolor=color, zorder=1, **self.edgeprops
)
self.ax.add_patch(patch) | 0.002591 |
def register_device(self, word_device):
"""
.. _register_device:
Register the WordDevice_ ``word_device`` in the bus
returns the start address of the device.
raises: BUSSetupError_, if the device cannot be registered.
"""
if(self._lock):
raise BUSSetupError("BUS already locked.")
size = word_device.size
if(self.current_max_offset + size >= self.max_addr):
raise BUSSetupError("Addresspace({}) would exceed width of BUS({})".format(self.current_max_offset+ size,
self.width))
self.start_addresses[word_device] = self.current_max_offset
res = self.current_max_offset
self.current_max_offset += size
self.index[range(res, self.current_max_offset)] = word_device
self.devices.append(word_device)
return res | 0.030708 |
def _handleLookupType2Format2(subtable, lookupIndex, subtableIndex):
"""
Extract kerning, left class and right class dictionaries from a Lookup Type 2 Format 2.
"""
# extract the classes
leftClasses = _extractFeatureClasses(lookupIndex=lookupIndex, subtableIndex=subtableIndex, classDefs=subtable.ClassDef1.classDefs, coverage=subtable.Coverage.glyphs)
rightClasses = _extractFeatureClasses(lookupIndex=lookupIndex, subtableIndex=subtableIndex, classDefs=subtable.ClassDef2.classDefs)
# extract the pairs
kerning = {}
for class1RecordIndex, class1Record in enumerate(subtable.Class1Record):
for class2RecordIndex, class2Record in enumerate(class1Record.Class2Record):
leftClass = (lookupIndex, subtableIndex, class1RecordIndex)
rightClass = (lookupIndex, subtableIndex, class2RecordIndex)
valueFormat1 = subtable.ValueFormat1
if valueFormat1:
value = class2Record.Value1
else:
value = class2Record.Value2
if hasattr(value, "XAdvance") and value.XAdvance != 0:
value = value.XAdvance
kerning[leftClass, rightClass] = value
return kerning, leftClasses, rightClasses | 0.00401 |
def _build_crawlid_info(self, master, dict):
'''
Builds the crawlid info object
@param master: the master dict
@param dict: the dict object received
@return: the crawlid info object
'''
master['total_pending'] = 0
master['total_domains'] = 0
master['appid'] = dict['appid']
master['crawlid'] = dict['crawlid']
master['spiderid'] = dict['spiderid']
master['domains'] = {}
timeout_key = 'timeout:{sid}:{aid}:{cid}'.format(sid=dict['spiderid'],
aid=dict['appid'],
cid=dict['crawlid'])
if self.redis_conn.exists(timeout_key):
master['expires'] = self.redis_conn.get(timeout_key)
# get all domain queues
match_string = '{sid}:*:queue'.format(sid=dict['spiderid'])
for key in self.redis_conn.scan_iter(match=match_string):
domain = key.split(":")[1]
sortedDict = self._get_bin(key)
# now iterate through binned dict
for score in sortedDict:
for item in sortedDict[score]:
if 'meta' in item:
item = item['meta']
if item['appid'] == dict['appid'] and item['crawlid'] == dict['crawlid']:
if domain not in master['domains']:
master['domains'][domain] = {}
master['domains'][domain]['total'] = 0
master['domains'][domain]['high_priority'] = -9999
master['domains'][domain]['low_priority'] = 9999
master['total_domains'] = master['total_domains'] + 1
master['domains'][domain]['total'] = master['domains'][domain]['total'] + 1
if item['priority'] > master['domains'][domain]['high_priority']:
master['domains'][domain]['high_priority'] = item['priority']
if item['priority'] < master['domains'][domain]['low_priority']:
master['domains'][domain]['low_priority'] = item['priority']
master['total_pending'] = master['total_pending'] + 1
return master | 0.003815 |
def index(self, values=None, only_index=None):
"""
Index all values stored in the field, or only given ones if any.
"""
assert self.indexable, "Field not indexable"
assert not only_index or self.has_index(only_index), "Invalid index"
if only_index:
only_index = only_index if isclass(only_index) else only_index.__class__
if values is None:
values = self.proxy_get()
for value in values:
if value is not None:
needs_to_check_uniqueness = bool(self.unique)
for index in self._indexes:
if only_index and not isinstance(index, only_index):
continue
index.add(value, check_uniqueness=needs_to_check_uniqueness and index.handle_uniqueness)
if needs_to_check_uniqueness and index.handle_uniqueness:
# uniqueness check is done for this value
needs_to_check_uniqueness = False | 0.003857 |
def measure_topology(fbasename=None, log=None, ml_version=ml_version):
"""Measures mesh topology
Args:
fbasename (str): input filename.
log (str): filename to log output
Returns:
dict: dictionary with the following keys:
vert_num (int): number of vertices
edge_num (int): number of edges
face_num (int): number of faces
unref_vert_num (int): number or unreferenced vertices
boundry_edge_num (int): number of boundary edges
part_num (int): number of parts (components) in the mesh.
manifold (bool): True if mesh is two-manifold, otherwise false.
non_manifold_edge (int): number of non_manifold edges.
non_manifold_vert (int): number of non-manifold verices
genus (int or str): genus of the mesh, either a number or
'undefined' if the mesh is non-manifold.
holes (int or str): number of holes in the mesh, either a number
or 'undefined' if the mesh is non-manifold.
"""
ml_script1_file = 'TEMP3D_measure_topology.mlx'
ml_script1 = mlx.FilterScript(file_in=fbasename, ml_version=ml_version)
compute.measure_topology(ml_script1)
ml_script1.save_to_file(ml_script1_file)
ml_script1.run_script(log=log, script_file=ml_script1_file)
topology = ml_script1.topology
return topology | 0.000711 |
def password_valid(self, wallet):
"""
Checks whether the password entered for **wallet** is valid
:param wallet: Wallet to check password for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.password_valid(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
True
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet}
resp = self.call('password_valid', payload)
return resp['valid'] == '1' | 0.005093 |
def upload(self):
'''一般上传模式.
使用这种方式上传, 不可以中断上传过程, 但因为只用它来上传小的文件, 所以
最终的影响不会很大.'''
info = pcs.upload(self.cookie, self.row[SOURCEPATH_COL],
self.row[PATH_COL], self.upload_mode)
if info:
self.emit('uploaded', self.row[FID_COL])
else:
self.emit('network-error', self.row[FID_COL]) | 0.005305 |
def _run_pyroma(setup_file, show_lint_files):
"""Run pyroma."""
from pyroma import projectdata, ratings
from prospector.message import Message, Location
_debug_linter_status("pyroma", setup_file, show_lint_files)
return_dict = dict()
data = projectdata.get_data(os.getcwd())
all_tests = ratings.ALL_TESTS
for test in [mod() for mod in [t.__class__ for t in all_tests]]:
if test.test(data) is False:
class_name = test.__class__.__name__
key = _Key(setup_file, 0, class_name)
loc = Location(setup_file, None, None, 0, 0)
msg = test.message()
return_dict[key] = Message("pyroma",
class_name,
loc,
msg)
return return_dict | 0.001188 |
def __load_section(self, section_key):
"""
Reads the set of article links for a section if they are not cached.
"""
if self._sections[section_key] is not None: return
articles = []
for page in count(1):
if page > 50:
raise Exception('Last page detection is probably broken')
url = '{domain}{section}&iMenuID=1&iSubMenuID={page}'.format(
domain = DOMAIN,
section = SECTIONS[section_key],
page = page
)
body = self._session.get(url).content
# This is a very hacky way of detecting the last page
# that will probably break again in the future
if "알수 없는 주소" in body: # "Unknown Address"
break
# Parse out all the article links
root = html.fromstring(body)
title_lines = root.find_class('ListNewsLineTitle')
for title_line in title_lines:
title_link = title_line.find('a')
# The links do a JS open in a new window, so we need to parse
# it out using this ugly, brittle junk
href = title_link.get('href')
match = re.match("javascript:article_open\('(.+)'\)", href)
if not match:
raise Exception("The site's link format has changed and is not compatible")
path = match.group(1).decode('string_escape')
articles.append(Article(
self._session,
title_link.text_content().strip(),
DOMAIN + '/en/' + path
))
self._sections[section_key] = articles | 0.007497 |
def interaction_columns_used(self):
"""
Columns from the interaction dataset used for filtering and in
the model. These may come originally from either the choosers or
alternatives tables.
"""
return list(tz.unique(tz.concatv(
util.columns_in_filters(self.interaction_predict_filters),
util.columns_in_formula(self.model_expression)))) | 0.004902 |
def scheme_chunker(text, getreffs):
""" This is the scheme chunker which will resolve the reference giving a callback (getreffs) and a text object with its metadata
:param text: Text Object representing either an edition or a translation
:type text: MyCapytains.resources.inventory.Text
:param getreffs: callback function which retrieves a list of references
:type getreffs: function
:return: List of urn references with their human readable version
:rtype: [(str, str)]
"""
level = len(text.citation)
types = [citation.name for citation in text.citation]
if types == ["book", "poem", "line"]:
level = 2
elif types == ["book", "line"]:
return line_chunker(text, getreffs)
return [tuple([reff.split(":")[-1]]*2) for reff in getreffs(level=level)] | 0.002454 |
def get_container_instance_group(access_token, subscription_id, resource_group,
container_group_name):
'''Get the JSON definition of a container group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
container_group_name (str): Name of container instance group.
Returns:
HTTP response. JSON body of container group.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.ContainerInstance/ContainerGroups/',
container_group_name,
'?api-version=', CONTAINER_API])
return do_get(endpoint, access_token) | 0.002198 |
def del_controller(self):
"""
Deletes the configured OpenFlow controller address.
This method is corresponding to the following ovs-vsctl command::
$ ovs-vsctl del-controller <bridge>
"""
command = ovs_vsctl.VSCtlCommand('del-controller', [self.br_name])
self.run_command([command]) | 0.005814 |
def get_level_lengths(levels, sentinel=''):
"""For each index in each level the function returns lengths of indexes.
Parameters
----------
levels : list of lists
List of values on for level.
sentinel : string, optional
Value which states that no new index starts on there.
Returns
----------
Returns list of maps. For each level returns map of indexes (key is index
in row and value is length of index).
"""
if len(levels) == 0:
return []
control = [True] * len(levels[0])
result = []
for level in levels:
last_index = 0
lengths = {}
for i, key in enumerate(level):
if control[i] and key == sentinel:
pass
else:
control[i] = False
lengths[last_index] = i - last_index
last_index = i
lengths[last_index] = len(level) - last_index
result.append(lengths)
return result | 0.001015 |
def _call_function(name, returner=None, **kwargs):
'''
Calls a function from the specified module.
:param name:
:param kwargs:
:return:
'''
argspec = salt.utils.args.get_function_argspec(__salt__[name])
# func_kw is initialized to a dictionary of keyword arguments the function to be run accepts
func_kw = dict(zip(argspec.args[-len(argspec.defaults or []):], # pylint: disable=incompatible-py3-code
argspec.defaults or []))
# func_args is initialized to a list of positional arguments that the function to be run accepts
func_args = argspec.args[:len(argspec.args or []) - len(argspec.defaults or [])]
arg_type, kw_to_arg_type, na_type, kw_type = [], {}, {}, False
for funcset in reversed(kwargs.get('func_args') or []):
if not isinstance(funcset, dict):
# We are just receiving a list of args to the function to be run, so just append
# those to the arg list that we will pass to the func.
arg_type.append(funcset)
else:
for kwarg_key in six.iterkeys(funcset):
# We are going to pass in a keyword argument. The trick here is to make certain
# that if we find that in the *args* list that we pass it there and not as a kwarg
if kwarg_key in func_args:
kw_to_arg_type[kwarg_key] = funcset[kwarg_key]
continue
else:
# Otherwise, we're good and just go ahead and pass the keyword/value pair into
# the kwargs list to be run.
func_kw.update(funcset)
arg_type.reverse()
for arg in func_args:
if arg in kw_to_arg_type:
arg_type.append(kw_to_arg_type[arg])
_exp_prm = len(argspec.args or []) - len(argspec.defaults or [])
_passed_prm = len(arg_type)
missing = []
if na_type and _exp_prm > _passed_prm:
for arg in argspec.args:
if arg not in func_kw:
missing.append(arg)
if missing:
raise SaltInvocationError('Missing arguments: {0}'.format(', '.join(missing)))
elif _exp_prm > _passed_prm:
raise SaltInvocationError('Function expects {0} parameters, got only {1}'.format(
_exp_prm, _passed_prm))
mret = __salt__[name](*arg_type, **func_kw)
if returner is not None:
returners = salt.loader.returners(__opts__, __salt__)
if returner in returners:
returners[returner]({'id': __opts__['id'], 'ret': mret,
'fun': name, 'jid': salt.utils.jid.gen_jid(__opts__)})
return mret | 0.00452 |
def parse_feeds(self, message_channel=True):
"""
Iterates through each of the feed URLs, parses their items, and
sends any items to the channel that have not been previously
been parsed.
"""
if parse:
for feed_url in self.feeds:
feed = parse(feed_url)
for item in feed.entries:
if item["id"] not in self.feed_items:
self.feed_items.add(item["id"])
if message_channel:
message = self.format_item_message(feed, item)
self.message_channel(message)
return | 0.002878 |
def apply(self, func, applyto='measurement', noneval=nan, setdata=False):
"""
Apply func either to self or to associated data.
If data is not already parsed, try and read it.
Parameters
----------
func : callable
The function either accepts a measurement object or an FCS object.
Does some calculation and returns the result.
applyto : ['data' | 'measurement']
* 'data' : apply to associated data
* 'measurement' : apply to measurement object itself.
noneval : obj
Value to return if `applyto` is 'data', but no data is available.
setdata : bool
Used only if data is not already set.
If true parsed data will be assigned to self.data
Otherwise data will be discarded at end of apply.
"""
applyto = applyto.lower()
if applyto == 'data':
if self.data is not None:
data = self.data
elif self.datafile is None:
return noneval
else:
data = self.read_data()
if setdata:
self.data = data
return func(data)
elif applyto == 'measurement':
return func(self)
else:
raise ValueError('Encountered unsupported value "%s" for applyto parameter.' % applyto) | 0.002134 |
def run(self,shots=1,targetT=0.02,verbose=False):
"""
Run SA with provided QUBO.
Set qubo attribute in advance of calling this method.
"""
if self.qubo != []:
self.qi()
J = self.reJ()
N = len(J)
itetemp = 100
Rtemp = 0.75
self.E = []
qq = []
for i in range(shots):
T = self.Ts
q = np.random.choice([-1,1],N)
EE = []
EE.append(Ei(q,self.J)+self.ep)
while T>targetT:
x_list = np.random.randint(0, N, itetemp)
for x in x_list:
q2 = np.ones(N)*q[x]
q2[x] = 1
dE = -2*sum(q*q2*J[:,x])
if dE < 0 or np.exp(-dE/T) > np.random.random_sample():
q[x] *= -1
EE.append(Ei(q,self.J)+self.ep)
T *= Rtemp
self.E.append(EE)
qtemp = (np.asarray(q,int)+1)/2
qq.append([int(s) for s in qtemp])
if verbose == True:
print(i,':',[int(s) for s in qtemp])
if shots == 1:
qq = qq[0]
if shots == 1:
self.E = self.E[0]
return qq | 0.057671 |
def text_till(self, strings, keep_index=False):
"""Returns all text till it encounters the given string (or one of the given strings)"""
if isinstance(strings, str):
strings = [strings]
original_index = self.index
text = ""
matched_string = ""
while self.more:
test_against = self.characters(len(max(strings, key=len)))
for string in strings:
if string.startswith("^"):
if test_against[0] in (" ", "\t", "\n", ")", "(") and test_against[1:].startswith(string[1:]):
matched_string = string
break
if test_against.startswith(string):
matched_string = string
break
if matched_string:
break
text += self.pop()
self += 1
if keep_index:
self.index = original_index
return (text, matched_string) | 0.00402 |
def add_or_update_records(cls, tables: I2B2Tables, records: List["ObservationFact"]) -> Tuple[int, int]:
"""
Add or update the observation_fact table as needed to reflect the contents of records
:param tables: i2b2 sql connection
:param records: records to apply
:return: number of records added / modified
"""
return cls._add_or_update_records(tables.crc_connection, tables.observation_fact, records) | 0.010941 |
def addToPrePrepares(self, pp: PrePrepare) -> None:
"""
Add the specified PRE-PREPARE to this replica's list of received
PRE-PREPAREs and try sending PREPARE
:param pp: the PRE-PREPARE to add to the list
"""
key = (pp.viewNo, pp.ppSeqNo)
self.prePrepares[key] = pp
self.lastPrePrepareSeqNo = pp.ppSeqNo
self.last_accepted_pre_prepare_time = pp.ppTime
self.dequeue_prepares(*key)
self.dequeue_commits(*key)
self.stats.inc(TPCStat.PrePrepareRcvd)
self.tryPrepare(pp) | 0.003515 |
def execute_from_command_line(argv=None):
"""A simple method that runs a Command."""
if sys.stdout.encoding is None:
print('please set python env PYTHONIOENCODING=UTF-8, example: '
'export PYTHONIOENCODING=UTF-8, when writing to stdout',
file=sys.stderr)
exit(1)
command = Command(argv)
command.execute() | 0.00274 |
def sendline(self, text):
"""Sends an input line to the running program, including os.linesep.
Args:
text (str): The input text to be send.
Raises:
TerminationException: The program terminated before / while / after sending the input.
NestedException: An internal problem occured while waiting for the output.
"""
logger.debug("Sending input '{0}' to '{1}'".format(text, self.name))
try:
return self._spawn.sendline(text)
except pexpect.exceptions.EOF as e:
logger.debug("Raising termination exception.")
raise TerminationException(instance=self, real_exception=e, output=self.get_output())
except pexpect.exceptions.TIMEOUT as e:
logger.debug("Raising timeout exception.")
raise TimeoutException(instance=self, real_exception=e, output=self.get_output())
except Exception as e:
logger.debug("Sending input failed: " + str(e))
raise NestedException(instance=self, real_exception=e, output=self.get_output()) | 0.00726 |
def scrape(cls, selector, root, xpath=False):
"""Return EntityList for the given selector."""
log.debug('Called scrape classmethod with root: %s' % root)
roots = selector.xpath(root) if xpath else selector.css(root)
results = [cls(r) for r in roots]
return EntityList(*results) | 0.006309 |
def listVolumes(self):
""" Return list of all volumes in this Store's selected directory. """
for (vol, paths) in self.paths.items():
for path in paths:
if path.startswith('/'):
continue
if path == '.':
continue
if self.userVolume is not None and os.path.basename(path) != self.userVolume:
continue
yield vol
break | 0.006224 |
def connect(self):
"""
Connects to Scratch.
"""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.socket.connect((self.host, self.port))
except socket.error as (err, msg):
self.connected = False
raise ScratchError("[Errno %d] %s" % (err, msg))
self.connected = True | 0.005249 |
def create_metrics(
self, metric_configs: Iterable[MetricConfig]) -> Dict[str, Metric]:
"""Create and register metrics from a list of MetricConfigs."""
return self.registry.create_metrics(metric_configs) | 0.008658 |
def read_element_using_argtuple(self, argtuple):
"""
takes a tuple of keys
returns node found in cfg_dict
found by traversing cfg_dict by successive
application of keys from element_path
"""
# doesn't support DELIMITED, only dict-based formats
if self.format == FMT_DELIMITED:
return None
node = self.cfg_dict
for key in argtuple:
node = node[key]
return node | 0.041026 |
def T_sigma(self, sigma):
"""
Given a policy `sigma`, return the T_sigma operator.
Parameters
----------
sigma : array_like(int, ndim=1)
Policy vector, of length n.
Returns
-------
callable
The T_sigma operator.
"""
R_sigma, Q_sigma = self.RQ_sigma(sigma)
return lambda v: R_sigma + self.beta * Q_sigma.dot(v) | 0.004717 |
def _get_client_impl(self):
"""
Get the versioned client implementation corresponding to the current profile.
:return: The versioned client implementation.
"""
api_version = self._get_api_version(None)
if api_version not in self._client_impls:
self._create_client_impl(api_version)
return self._client_impls[api_version] | 0.007712 |
def _resolve_fn_sub(uri_data):
"""
Tries to resolve an Integration URI which contains Fn::Sub intrinsic function. This method tries to resolve
and produce a string output.
Example:
{
"Fn::Sub":
"arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${LambdaFunction.Arn}/invocations"
}
Fn::Sub Processing:
~~~~~~~~~~~~~~~~~~
If this is a Fn::Sub, resolve as following:
1. Get the ARN String:
- If Sub is using the array syntax, then use element which is a string.
- If Sub is using string syntax, then just use the string.
2. If there is a ${XXX.Arn} then replace it with a dummy ARN
3. Otherwise skip it
.. code:
Input:
{
"Fn::Sub":
"arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${LambdaFunction.Arn}/invocations"
}
Output: "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/arn:aws:lambda:us-east-1:LambdaFunction/invocations" # NOQA
Note
~~~~
This method supports only a very restricted subset of intrinsic functions with Swagger document. This is the
best we can do without implementing a full blown intrinsic function resolution module.
Parameters
----------
uri_data : string or dict
Value of Integration URI. It can either be a string or an intrinsic function that resolves to a string
Returns
-------
string
Integration URI as a string, if we were able to resolve the Sub intrinsic
dict
Input data is returned unmodified if we are unable to resolve the intrinsic
"""
# Try the short form of Fn::Sub syntax where the value is the ARN
arn = uri_data[LambdaUri._FN_SUB]
if isinstance(arn, list):
# This is the long form of Fn::Sub syntax
#
# {
# "Fn::Sub":[ "arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${MyARn}/invocations",
# {"MyARn": {"Ref": MyFunction"}
# ]
# }
#
# Get the ARN out of the list
arn = arn[0]
if not isinstance(arn, string_types):
# Even after all the processing, ARN is still not a string. Probably customer provided wrong syntax
# for Fn::Sub. Let's skip this altogether
LOG.debug("Unable to resolve Fn::Sub value for integration URI: %s", uri_data)
return uri_data
# Now finally we got the ARN string. Let us try to resolve it.
# We only support value of type ${XXX.Arn} or ${YYY.Alias}. The `.Alias` syntax is a SAM specific intrinsic
# to get ARN of Lambda Alias when using DeploymentPreference
lambda_function_arn_template = r'arn:aws:lambda:${AWS::Region}:123456789012:function:\1'
return re.sub(LambdaUri._REGEX_SUB_FUNCTION_ARN, # Find all ${blah} patterns
# Replace with Lambda Function ARN, where function name is from pattern
lambda_function_arn_template,
arn) | 0.002727 |
def _get_key(self):
"""Get key using token from gateway"""
init_vector = bytes(bytearray.fromhex('17996d093d28ddb3ba695a2e6f58562e'))
encryptor = Cipher(algorithms.AES(self.key.encode()), modes.CBC(init_vector),
backend=default_backend()).encryptor()
ciphertext = encryptor.update(self.token.encode()) + encryptor.finalize()
if isinstance(ciphertext, str): # For Python 2 compatibility
return ''.join('{:02x}'.format(ord(x)) for x in ciphertext)
return ''.join('{:02x}'.format(x) for x in ciphertext) | 0.008503 |
def transcripts(context, build, hgnc_id, json):
"""Show all transcripts in the database"""
LOG.info("Running scout view transcripts")
adapter = context.obj['adapter']
if not json:
click.echo("Chromosome\tstart\tend\ttranscript_id\thgnc_id\trefseq\tis_primary")
for tx_obj in adapter.transcripts(build=build, hgnc_id=hgnc_id):
if json:
pp(tx_obj)
continue
click.echo("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}".format(
tx_obj['chrom'],
tx_obj['start'],
tx_obj['end'],
tx_obj['ensembl_transcript_id'],
tx_obj['hgnc_id'],
tx_obj.get('refseq_id', ''),
tx_obj.get('is_primary') or '',
)) | 0.002721 |
def set_file_priority(self, infohash, file_id, priority):
"""
Set file of a torrent to a supplied priority level.
:param infohash: INFO HASH of torrent.
:param file_id: ID of the file to set priority.
:param priority: Priority level of the file.
"""
if priority not in [0, 1, 2, 7]:
raise ValueError("Invalid priority, refer WEB-UI docs for info.")
elif not isinstance(file_id, int):
raise TypeError("File ID must be an int")
data = {'hash': infohash.lower(),
'id': file_id,
'priority': priority}
return self._post('command/setFilePrio', data=data) | 0.002911 |
def _create_code_edit(self, mimetype, *args, **kwargs):
"""
Create a code edit instance based on the mimetype of the file to
open/create.
:type mimetype: mime type
:param args: Positional arguments that must be forwarded to the editor
widget constructor.
:param kwargs: Keyworded arguments that must be forwarded to the editor
widget constructor.
:return: Code editor widget instance.
"""
if mimetype in self.editors.keys():
return self.editors[mimetype](
*args, parent=self.main_tab_widget, **kwargs)
editor = self.fallback_editor(*args, parent=self.main_tab_widget,
**kwargs)
return editor | 0.002597 |
def set_float(val):
""" utility to set a floating value,
useful for converting from strings """
out = None
if not val in (None, ''):
try:
out = float(val)
except ValueError:
return None
if numpy.isnan(out):
out = default
return out | 0.006431 |
def AgregarFlete(self, descripcion, importe):
"Agrega la información referente al flete de la liquidación (opcional)"
flete = dict(descripcion=descripcion, importe=importe)
self.solicitud['flete'] = flete
return True | 0.008065 |
async def _read_packet(self, packet_type=MysqlPacket):
"""Read an entire "mysql packet" in its entirety from the network
and return a MysqlPacket type that represents the results.
"""
buff = b''
while True:
try:
packet_header = await self._read_bytes(4)
except asyncio.CancelledError:
self._close_on_cancel()
raise
btrl, btrh, packet_number = struct.unpack(
'<HBB', packet_header)
bytes_to_read = btrl + (btrh << 16)
# Outbound and inbound packets are numbered sequentialy, so
# we increment in both write_packet and read_packet. The count
# is reset at new COMMAND PHASE.
if packet_number != self._next_seq_id:
raise InternalError(
"Packet sequence number wrong - got %d expected %d" %
(packet_number, self._next_seq_id))
self._next_seq_id = (self._next_seq_id + 1) % 256
try:
recv_data = await self._read_bytes(bytes_to_read)
except asyncio.CancelledError:
self._close_on_cancel()
raise
buff += recv_data
# https://dev.mysql.com/doc/internals/en/sending-more-than-16mbyte.html
if bytes_to_read == 0xffffff:
continue
if bytes_to_read < MAX_PACKET_LEN:
break
packet = packet_type(buff, self._encoding)
packet.check_error()
return packet | 0.001262 |
def render_to_string(self, template_file, context):
"""Render given template to string and add object to context"""
context = context if context else {}
if self.object:
context['object'] = self.object
context[self.object.__class__.__name__.lower()] = self.object
return render_to_string(template_file, context, self.request) | 0.005263 |
def connectionLost(self, reason):
"""
Called by the DBus Connection object when the connection is lost.
@type reason: L{twistd.python.failure.Failure}
@param reason: The value passed to the associated connection's
connectionLost method.
"""
for wref in self._weakProxies.valuerefs():
p = wref()
if p is not None:
p.connectionLost(reason) | 0.004454 |
def syncItems(self):
""" Returns an instance of :class:`plexapi.sync.SyncList` for current device.
Raises:
:class:`plexapi.exceptions.BadRequest`: when the device doesn`t provides `sync-target`.
"""
if 'sync-target' not in self.provides:
raise BadRequest('Requested syncList for device which do not provides sync-target')
return self._server.syncItems(client=self) | 0.011416 |
def composition_hooks(self):
"""
:rtype: twilio.rest.video.v1.composition_hook.CompositionHookList
"""
if self._composition_hooks is None:
self._composition_hooks = CompositionHookList(self)
return self._composition_hooks | 0.007326 |
def select_pattern(node, pattern, state=None):
'''
Yield descendant nodes matching the given pattern specification
pattern - tuple of steps, each of which matches an element by name, with "*" acting like a wildcard, descending the tree in tuple order
sort of like a subset of XPath in Python tuple form
state - for internal use only
pattern examples:
("a", "b", "c") - all c elements whose parent is a b element whose parent is an a element whose parent is node
("*", "*") - any "grandchild" of node
("*", "*", "*") - any "great grandchild" of node
("**", "a") - any a descendant of node
>>> from amara3.uxml import tree
>>> from amara3.uxml.treeutil import *
>>>
>>> tb = tree.treebuilder()
>>> DOC = '<a xmlns="urn:namespaces:suck"><b><x>1</x></b><c><x>2</x><d><x>3</x></d></c><x>4</x><y>5</y></a>'
>>> root = tb.parse(DOC)
>>> results = [ e.xml_value for e in select_pattern(root, ('**', 'x')) ]
>>> results
['1', '2', '3', '4']
'''
if state is None:
state = _prep_pattern(pattern)
#for child in select_elements(elem):
if isinstance(node, element):
for child in node.xml_children:
new_state = state(child)
if new_state == MATCHED_STATE:
yield child
elif new_state is not None:
yield from select_pattern(child, None, state=new_state)
return | 0.003479 |
def get_logger_config(log_dir='/var/tmp',
logging_env='no_env',
edx_filename='edx.log',
dev_env=False,
debug=False,
local_loglevel='INFO',
service_variant='ecomworker'):
"""
Returns a dictionary containing logging configuration.
If dev_env is True, logging will not be done via local rsyslogd.
Instead, application logs will be dropped into log_dir. 'edx_filename'
is ignored unless dev_env is True.
"""
# Revert to INFO if an invalid string is passed in
if local_loglevel not in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']:
local_loglevel = 'INFO'
hostname = platform.node().split('.')[0]
syslog_format = (
'[service_variant={service_variant}]'
'[%(name)s][env:{logging_env}] %(levelname)s '
'[{hostname} %(process)d] [%(filename)s:%(lineno)d] '
'- %(message)s'
).format(
service_variant=service_variant,
logging_env=logging_env, hostname=hostname
)
if debug:
handlers = ['console']
else:
handlers = ['local']
logger_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s %(levelname)s %(process)d '
'[%(name)s] %(filename)s:%(lineno)d - %(message)s',
},
'syslog_format': {'format': syslog_format},
'raw': {'format': '%(message)s'},
},
'handlers': {
'console': {
'level': 'DEBUG' if debug else 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stdout,
},
},
'loggers': {
'requests': {
'handlers': handlers,
'level': 'WARNING',
'propagate': True
},
'': {
'handlers': handlers,
'level': 'DEBUG',
'propagate': False
},
}
}
if dev_env:
edx_file_loc = os.path.join(log_dir, edx_filename)
logger_config['handlers'].update({
'local': {
'class': 'logging.handlers.RotatingFileHandler',
'level': local_loglevel,
'formatter': 'standard',
'filename': edx_file_loc,
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
})
else:
logger_config['handlers'].update({
'local': {
'level': local_loglevel,
'class': 'logging.handlers.SysLogHandler',
# Use a different address for Mac OS X
'address': '/var/run/syslog' if sys.platform == 'darwin' else '/dev/log',
'formatter': 'syslog_format',
'facility': SysLogHandler.LOG_LOCAL0,
},
})
return logger_config | 0.000649 |
def max_sliding_window(nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
if not nums:
return nums
queue = collections.deque()
res = []
for num in nums:
if len(queue) < k:
queue.append(num)
else:
res.append(max(queue))
queue.popleft()
queue.append(num)
res.append(max(queue))
return res | 0.00237 |
def _get_result_files_base(self, temp_dir):
"""Given the temp directory that is created for each run, return the path to the directory
where files created by the tool are stored."""
if not self._use_namespaces:
return super(ContainerExecutor, self)._get_result_files_base(temp_dir)
else:
return os.path.join(temp_dir, "temp") | 0.010499 |
def generateSetupFile(self, outpath='.', egg=False):
"""
Generates the setup file for this builder.
"""
outpath = os.path.abspath(outpath)
outfile = os.path.join(outpath, 'setup.py')
opts = {
'name': self.name(),
'distname': self.distributionName(),
'version': self.version(),
'author': self.author(),
'author_email': self.authorEmail(),
'keywords': self.keywords(),
'license': self.license(),
'brief': self.brief(),
'description': self.description(),
'url': self.companyUrl()
}
wrap_dict = lambda x: map(lambda k: "r'{0}': [{1}]".format(k[0],
',\n'.join(wrap_str(k[1]))),
x.items())
opts['dependencies'] = ',\n'.join(wrap_str(self.dependencies()))
opts['classifiers'] = ',\n'.join(wrap_str(self.classifiers()))
if os.path.isfile(self.sourcePath()):
basepath = os.path.normpath(os.path.dirname(self.sourcePath()))
else:
basepath = os.path.normpath(self.sourcePath())
self.generatePlugins(basepath)
exts = set()
for root, folders, files in os.walk(basepath):
for file_ in files:
_, ext = os.path.splitext(file_)
if ext not in ('.py', '.pyc', '.pyo'):
exts.add('*' + ext)
exts = list(exts)
text = templ.SETUPFILE.format(**opts)
# generate the file
if not os.path.exists(outfile):
f = open(outfile, 'w')
f.write(text)
f.close()
# generate the manifest file
manfile = os.path.join(outpath, 'MANIFEST.in')
if not os.path.exists(manfile):
f = open(manfile, 'w')
f.write('include *.md *.txt *.ini *.cfg *.rst\n')
f.write('recursive-include {0} {1}\n'.format(self.name(), ' '.join(exts)))
f.close()
# generate the egg
if egg:
cmd = 'cd {0} && $PYTHON setup.py bdist_egg'.format(outpath)
cmd = os.path.expandvars(cmd)
cmdexec(cmd) | 0.00222 |
def update_payload_in_draft_for_edit_extension(self, upload_stream, publisher_name, extension_name, draft_id, file_name=None, **kwargs):
"""UpdatePayloadInDraftForEditExtension.
[Preview API]
:param object upload_stream: Stream to upload
:param str publisher_name:
:param str extension_name:
:param str draft_id:
:param String file_name: Header to pass the filename of the uploaded data
:rtype: :class:`<ExtensionDraft> <azure.devops.v5_0.gallery.models.ExtensionDraft>`
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
if extension_name is not None:
route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str')
if draft_id is not None:
route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='PUT',
location_id='02b33873-4e61-496e-83a2-59d1df46b7d8',
version='5.0-preview.1',
route_values=route_values,
content=content,
media_type='application/octet-stream')
return self._deserialize('ExtensionDraft', response) | 0.005639 |
def removeSubQueue(self, queue):
'''
remove a sub queue from current queue.
This unblock the sub-queue, retrieve all events from the queue and put them back to the parent.
Call clear on the sub-queue first if the events are not needed any more.
:param queue: the name or queue object to remove
:returns: ((queueevents,...), (queueEmptyEvents,...)) Possible queue events from removing sub-queues
'''
q = self.queueindex[queue]
q[1].unblockall()
q[1]._removeFromTree()
ret = ([],[])
while q[1].canPop():
r = q[1].pop()
self.append(r[0], True)
ret[0].extend(r[1])
ret[1].extend(r[2])
self.queues[q[0]].removeSubQueue(q[1])
# Remove from index
if q[2] is not None:
del self.queueindex[q[2]]
del self.queueindex[q[1]]
newblocked = not self.canPop()
if newblocked != self.blocked:
self.blocked = newblocked
if self.parent is not None:
self.parent.notifyBlock(self, newblocked)
return ret | 0.010256 |
def ensure_no_conflicts_in_commanddicts(originaldict, comparedict):
"""
<Purpose>
Recursively compares two commanddicts to see if they have conflicting commands.
<Arguments>
originaldict: A commanddict to compare.
comparedict: A commanddict to compare.
<Side Effects>
None
<Exceptions>
ModuleConflictError - A command was conflicting.
The error detail is the problematic command.
<Returns>
None
"""
"""
Child nodes are identical if they all of the following are identical:
helptext/callback/summary.
There are 3 cases we have to worry about.
> Shared child node.
> Child nodes are identical. Check grandchildren.
> Only one is defined. Check grandchildren.
> Both child nodes are defined and are not identical. Reject.
> Node is not shared. Accept.
"""
for child in comparedict.keys():
# Node not shared.
if child not in originaldict:
continue
# Shared node
comparechild_defined = is_commanddictnode_defined(comparedict[child])
originalchild_defined = is_commanddictnode_defined(originaldict[child])
# Only one is defined, or;
# both are defined and they are identical
if ((comparechild_defined ^ originalchild_defined) or
(comparechild_defined and originalchild_defined and
_are_cmd_nodes_same(originaldict[child], comparedict[child]))):
try:
ensure_no_conflicts_in_commanddicts(comparedict[child]['children'], originaldict[child]['children'])
except seash_exceptions.ModuleConflictError, e:
# Reconstruct the full command recursively
raise seash_exceptions.ModuleConflictError(child + " " + str(e) + " ("+module_name+")")
continue
# Not identical. Conflict found.
# Also include which module the conflicting module was found from.
if 'module' in originaldict[child]:
module_name = originaldict['module'][child]
else:
module_name = "default"
raise seash_exceptions.ModuleConflictError(child + ' ('+module_name+')') | 0.01295 |
def pingback_extensions_get_pingbacks(target):
"""
pingback.extensions.getPingbacks(url) => '[url, url, ...]'
Returns an array of URLs that link to the specified url.
See: http://www.aquarionics.com/misc/archives/blogite/0198.html
"""
site = Site.objects.get_current()
target_splitted = urlsplit(target)
if target_splitted.netloc != site.domain:
return TARGET_DOES_NOT_EXIST
try:
view, args, kwargs = resolve(target_splitted.path)
except Resolver404:
return TARGET_DOES_NOT_EXIST
try:
entry = Entry.published.get(
slug=kwargs['slug'],
publication_date__year=kwargs['year'],
publication_date__month=kwargs['month'],
publication_date__day=kwargs['day'])
except (KeyError, Entry.DoesNotExist):
return TARGET_IS_NOT_PINGABLE
return [pingback.user_url for pingback in entry.pingbacks] | 0.001081 |
def read_sql(sql, con, filePath, index_col=None, coerce_float=True,
params=None, parse_dates=None, columns=None, chunksize=None):
"""
Read SQL query or database table into a DataFrameModel.
Provide a filePath argument in addition to the *args/**kwargs from
pandas.read_sql and get a DataFrameModel.
NOTE: The chunksize option is overridden to None always (for now).
Reference:
http://pandas.pydata.org/pandas-docs/version/0.18.1/generated/pandas.read_sql.html
pandas.read_sql(sql, con, index_col=None, coerce_float=True,
params=None, parse_dates=None, columns=None, chunksize=None)
:return: DataFrameModel
"""
# TODO: Decide if chunksize is worth keeping and how to handle?
df = pandas.read_sql(sql, con, index_col, coerce_float,
params, parse_dates, columns, chunksize=None)
return DataFrameModel(df, filePath=filePath) | 0.003222 |
def set_attributes(self, el, pyobj):
'''Instance data attributes contains a dictionary
of keys (namespaceURI,localName) and attribute values.
These values can be self-describing (typecode), or use
attribute_typecode_dict to determine serialization.
Paramters:
el -- MessageInterface representing the element
pyobj --
'''
if not hasattr(pyobj, self.attrs_aname):
return
if not isinstance(getattr(pyobj, self.attrs_aname), dict):
raise TypeError,\
'pyobj.%s must be a dictionary of names and values'\
% self.attrs_aname
for attr, value in getattr(pyobj, self.attrs_aname).items():
namespaceURI,localName = None, attr
if type(attr) in _seqtypes:
namespaceURI, localName = attr
what = None
if getattr(self, 'attribute_typecode_dict', None) is not None:
what = self.attribute_typecode_dict.get(attr)
if what is None and namespaceURI is None:
what = self.attribute_typecode_dict.get(localName)
# allow derived type
if hasattr(value, 'typecode') and not isinstance(what, AnyType):
if what is not None and not isinstance(value.typecode, what):
raise EvaluateException, \
'self-describing attribute must subclass %s'\
%what.__class__
what = value.typecode
self.logger.debug("attribute create -- %s", value)
if isinstance(what, QName):
what.set_prefix(el, value)
#format the data
if what is None:
value = str(value)
else:
value = what.get_formatted_content(value)
el.setAttributeNS(namespaceURI, localName, value) | 0.005632 |
def asRGB(self):
"""Return image as RGB pixels. RGB colour images are passed
through unchanged; greyscales are expanded into RGB
triplets (there is a small speed overhead for doing this).
An alpha channel in the source image will raise an
exception.
The return values are as for the :meth:`read` method
except that the *metadata* reflect the returned pixels, not the
source image. In particular, for this method
``metadata['greyscale']`` will be ``False``.
"""
width,height,pixels,meta = self.asDirect()
if meta['alpha']:
raise Error("will not convert image with alpha channel to RGB")
if not meta['greyscale']:
return width,height,pixels,meta
meta['greyscale'] = False
typecode = 'BH'[meta['bitdepth'] > 8]
def iterrgb():
for row in pixels:
a = array(typecode, [0]) * 3 * width
for i in range(3):
a[i::3] = row
yield a
return width,height,iterrgb(),meta | 0.010919 |
def _parse_sequences(ilines, expect_qlen):
"""Parse the sequences in the current block.
Sequence looks like:
$3=227(209):
>gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75
{()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}*
"""
while True:
first = next(ilines)
if first.startswith('_') and first.endswith('].'):
# End of sequences & end of block
break
# ENH: handle wrapped lines?
try:
index, this_len, query_len = _parse_seq_preheader(first)
except ValueError:
logging.warn('Unparseable line (SKIPPING):\n%s', first)
continue
(rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description
) = _parse_seq_header(next(ilines))
try:
headseq, molseq, tailseq = _parse_seq_body(next(ilines))
except ValueError:
logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id)
continue
# Validation
if expect_qlen != query_len:
logging.warn("Query length in %s given as %d; expected %d",
rec_id, query_len, expect_qlen)
if not headseq and not headlen:
headlen = 0
if not tailseq and not taillen:
taillen = 0
if headseq:
if headlen is None:
headlen = len(headseq)
elif headlen != len(headseq):
logging.warn("Conflicting head flank lengths in %s: %d, %d",
rec_id, headlen, len(headseq))
if tailseq:
if taillen is None:
taillen = len(tailseq)
elif taillen != len(tailseq):
logging.warn("Conflicting tail flank lengths in %s: %d, %d",
rec_id, taillen, len(tailseq))
yield {'index': index,
'id': rec_id,
'description': description,
'dbxrefs': dbxrefs,
'phylum': phylum,
'taxchar': taxchar,
'head_len': headlen,
'tail_len': taillen,
'head_seq': headseq,
'tail_seq': tailseq,
'length': this_len,
'seq': molseq,
} | 0.001149 |
def net_send(out_data, conn):
''' Write pending data from websocket to network. '''
print('Sending {} bytes'.format(len(out_data)))
conn.send(out_data) | 0.006135 |
def upgrade(reboot=False, at_time=None):
'''
Upgrade the kernel and optionally reboot the system.
reboot : False
Request a reboot if a new kernel is available.
at_time : immediate
Schedule the reboot at some point in the future. This argument
is ignored if ``reboot=False``. See
:py:func:`~salt.modules.system.reboot` for more details
on this argument.
CLI Example:
.. code-block:: bash
salt '*' kernelpkg.upgrade
salt '*' kernelpkg.upgrade reboot=True at_time=1
.. note::
An immediate reboot often shuts down the system before the minion has a
chance to return, resulting in errors. A minimal delay (1 minute) is
useful to ensure the result is delivered to the master.
'''
result = __salt__['pkg.upgrade'](name=_package_name())
_needs_reboot = needs_reboot()
ret = {
'upgrades': result,
'active': active(),
'latest_installed': latest_installed(),
'reboot_requested': reboot,
'reboot_required': _needs_reboot
}
if reboot and _needs_reboot:
log.warning('Rebooting system due to kernel upgrade')
__salt__['system.reboot'](at_time=at_time)
return ret | 0.000801 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.